mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
7 Commits
go
...
swiftyos/l
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
428963151c | ||
|
|
2a843cac2d | ||
|
|
8707bdc7fb | ||
|
|
838121e716 | ||
|
|
5bb4e1d9c0 | ||
|
|
a56235a7db | ||
|
|
ee2be28505 |
@@ -23,18 +23,6 @@
|
||||
# Frontend
|
||||
!frontend/build/web/
|
||||
|
||||
# rnd
|
||||
!rnd/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
# rnd
|
||||
rnd/autogpt_builder/.next/
|
||||
rnd/autogpt_builder/node_modules
|
||||
rnd/autogpt_builder/.env.example
|
||||
rnd/autogpt_builder/.env.local
|
||||
rnd/autogpt_server/.env
|
||||
rnd/autogpt_server/.venv/
|
||||
|
||||
rnd/market/.env
|
||||
|
||||
5
.gitattributes
vendored
5
.gitattributes
vendored
@@ -3,8 +3,3 @@ frontend/build/** linguist-generated
|
||||
**/poetry.lock linguist-generated
|
||||
|
||||
docs/_javascript/** linguist-vendored
|
||||
|
||||
# Exclude VCR cassettes from stats
|
||||
forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
* text=auto
|
||||
12
.github/CODEOWNERS
vendored
12
.github/CODEOWNERS
vendored
@@ -1,7 +1,5 @@
|
||||
* @Significant-Gravitas/maintainers
|
||||
.github/workflows/ @Significant-Gravitas/devops
|
||||
forge/ @Significant-Gravitas/forge-maintainers
|
||||
benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
rnd/infra @Significant-Gravitas/devops
|
||||
.github/CODEOWNERS @Significant-Gravitas/admins
|
||||
.github/workflows/ @Significant-Gravitas/devops
|
||||
autogpt/ @Significant-Gravitas/maintainers
|
||||
forge/ @Significant-Gravitas/forge-maintainers
|
||||
benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
|
||||
17
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
17
.github/ISSUE_TEMPLATE/1.bug.yml
vendored
@@ -88,16 +88,14 @@ body:
|
||||
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: What LLM Provider do you use?
|
||||
label: Do you use OpenAI GPT-3 or GPT-4?
|
||||
description: >
|
||||
If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
|
||||
the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
|
||||
options:
|
||||
- Azure
|
||||
- Groq
|
||||
- Anthropic
|
||||
- Llamafile
|
||||
- Other (detail in issue)
|
||||
- GPT-3.5
|
||||
- GPT-4
|
||||
- GPT-4(32k)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -128,13 +126,6 @@ body:
|
||||
label: Specify the area
|
||||
description: Please specify the area you think is best related to the issue.
|
||||
|
||||
- type: input
|
||||
attributes:
|
||||
label: What commit or version are you using?
|
||||
description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe your issue.
|
||||
|
||||
8
.github/labeler.yml
vendored
8
.github/labeler.yml
vendored
@@ -17,11 +17,3 @@ Frontend:
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
Builder:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_builder/**
|
||||
|
||||
Server:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_server/**
|
||||
|
||||
41
.github/workflows/autogpt-builder-ci.yml
vendored
41
.github/workflows/autogpt-builder-ci.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: AutoGPT Builder CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_builder
|
||||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '21'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Check formatting with Prettier
|
||||
run: |
|
||||
npx prettier --check .
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
npm run lint
|
||||
107
.github/workflows/autogpt-ci.yml
vendored
107
.github/workflows/autogpt-ci.yml
vendored
@@ -6,11 +6,13 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -71,6 +73,37 @@ jobs:
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
cassette_base_branch="${PR_BASE}"
|
||||
cd tests/vcr_cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
@@ -130,6 +163,80 @@ jobs:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: autogpt-agent,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||
else
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
fi
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||
env:
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/vcr_cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||
REPO="${{ github.repository }}"
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
2
.github/workflows/autogpt-docker-ci.yml
vendored
2
.github/workflows/autogpt-docker-ci.yml
vendored
@@ -6,11 +6,13 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
|
||||
56
.github/workflows/autogpt-infra-ci.yml
vendored
56
.github/workflows/autogpt-infra-ci.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: AutoGPT Builder Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: TFLint
|
||||
uses: pauloconnor/tflint-action@v0.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tflint_path: terraform/
|
||||
tflint_recurse: true
|
||||
tflint_changed_only: false
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.14.4
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
179
.github/workflows/autogpt-server-ci.yml
vendored
179
.github/workflows/autogpt-server-ci.yml
vendored
@@ -6,11 +6,13 @@ on:
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
- "!autogpt/tests/vcr_cassettes"
|
||||
pull_request:
|
||||
branches: [master, development, release-*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
- "!autogpt/tests/vcr_cassettes"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -34,15 +36,6 @@ jobs:
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Setup PostgreSQL
|
||||
uses: ikalnytskyi/action-setup-postgres@v6
|
||||
with:
|
||||
username: ${{ secrets.DB_USER || 'postgres' }}
|
||||
password: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
database: postgres
|
||||
port: 5432
|
||||
id: postgres
|
||||
|
||||
# Quite slow on macOS (2~4 minutes to set up Docker)
|
||||
# - name: Set up Docker (macOS)
|
||||
# if: runner.os == 'macOS'
|
||||
@@ -119,37 +112,157 @@ jobs:
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
else
|
||||
poetry run pytest -vv test
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
poetry run pytest -vv \
|
||||
test
|
||||
env:
|
||||
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
DB_USER: ${{ secrets.DB_USER || 'postgres' }}
|
||||
DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
|
||||
DB_NAME: postgres
|
||||
DB_PORT: 5432
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: autogpt-server,${{ runner.os }}
|
||||
|
||||
build:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
platform-os: [ubuntu, macos, macos-arm64, windows]
|
||||
runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
# On Windows, unpacking cached dependencies takes longer than just installing them
|
||||
if: runner.os != 'Windows'
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Poetry (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
|
||||
|
||||
$env:PATH += ";$env:APPDATA\Python\Scripts"
|
||||
echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
|
||||
- name: install rpm
|
||||
if: matrix.platform-os == 'ubuntu'
|
||||
run: sudo apt-get install -y alien fakeroot rpm
|
||||
|
||||
- name: Build distribution
|
||||
run: |
|
||||
case "${{ matrix.platform-os }}" in
|
||||
"macos" | "macos-arm64")
|
||||
${MAC_COMMAND}
|
||||
;;
|
||||
"windows")
|
||||
${WINDOWS_COMMAND}
|
||||
;;
|
||||
*)
|
||||
${LINUX_COMMAND}
|
||||
;;
|
||||
esac
|
||||
env:
|
||||
MAC_COMMAND: "poetry run poe dist_dmg"
|
||||
WINDOWS_COMMAND: "poetry run poe dist_msi"
|
||||
LINUX_COMMAND: "poetry run poe dist_appimage"
|
||||
|
||||
# break this into seperate steps each with their own name that matches the file
|
||||
- name: Upload App artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-app-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.app
|
||||
|
||||
- name: Upload dmg artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-dmg-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/AutoGPTServer.dmg
|
||||
|
||||
- name: Upload msi artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-msi-${{ matrix.platform-os }}
|
||||
path: D:\a\AutoGPT\AutoGPT\rnd\autogpt_server\dist\*.msi
|
||||
|
||||
- name: Upload deb artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-deb-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.deb
|
||||
|
||||
- name: Upload rpm artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-rpm-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.rpm
|
||||
|
||||
- name: Upload tar.gz artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-tar.gz-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.tar.gz
|
||||
|
||||
- name: Upload zip artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-zip-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.zip
|
||||
|
||||
- name: Upload pkg artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-pkg-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.pkg
|
||||
|
||||
- name: Upload AppImage artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: autogptserver-AppImage-${{ matrix.platform-os }}
|
||||
path: /Users/runner/work/AutoGPT/AutoGPT/rnd/autogpt_server/build/*.AppImage
|
||||
|
||||
107
.github/workflows/forge-ci.yml
vendored
107
.github/workflows/forge-ci.yml
vendored
@@ -6,13 +6,11 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -68,37 +66,6 @@ jobs:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
env:
|
||||
PR_BASE: ${{ github.event.pull_request.base.ref }}
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
cassette_base_branch="${PR_BASE}"
|
||||
cd tests/vcr_cassettes
|
||||
|
||||
if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
|
||||
cassette_base_branch="master"
|
||||
fi
|
||||
|
||||
if git ls-remote --exit-code --heads origin $cassette_branch ; then
|
||||
git fetch origin $cassette_branch
|
||||
git fetch origin $cassette_base_branch
|
||||
|
||||
git checkout $cassette_branch
|
||||
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '$cassette_base_branch'."
|
||||
else
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '$cassette_base_branch'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
@@ -154,80 +121,6 @@ jobs:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
flags: forge,${{ runner.os }}
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
if [ "${{ runner.os }}" = 'macOS' ]; then
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
|
||||
else
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
fi
|
||||
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
git config "$config_key" \
|
||||
"Authorization: Basic $base64_pat"
|
||||
|
||||
echo "config_key=$config_key" >> $GITHUB_OUTPUT
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
|
||||
env:
|
||||
PR_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
|
||||
else
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/vcr_cassettes
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
git push origin HEAD:$cassette_branch
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
git add tests/vcr_cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
if: steps.setup_git_auth.outcome == 'success'
|
||||
run: |
|
||||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER="${{ github.event.pull_request.number }}"
|
||||
TOKEN="${{ secrets.PAT_REVIEW }}"
|
||||
REPO="${{ github.repository }}"
|
||||
|
||||
if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
|
||||
echo "Adding label and comment..."
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh issue edit $PR_NUMBER --add-label "behaviour change"
|
||||
gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
fi
|
||||
|
||||
- name: Upload logs to artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
|
||||
2
.github/workflows/pr-label.yml
vendored
2
.github/workflows/pr-label.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
push:
|
||||
branches: [ master, development, release-* ]
|
||||
paths-ignore:
|
||||
- 'forge/tests/vcr_cassettes'
|
||||
- 'autogpt/tests/vcr_cassettes'
|
||||
- 'benchmark/reports/**'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
|
||||
4
.github/workflows/python-checks.yml
vendored
4
.github/workflows/python-checks.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
@@ -18,7 +18,7 @@ on:
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
- '!autogpt/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
|
||||
31
.github/workflows/workflow-checker.yml
vendored
31
.github/workflows/workflow-checker.yml
vendored
@@ -1,31 +0,0 @@
|
||||
name: PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
name: Check PR Status
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# - name: Wait some time for all actions to start
|
||||
# run: sleep 30
|
||||
- uses: actions/checkout@v4
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install requests
|
||||
- name: Check PR Status
|
||||
run: |
|
||||
echo "Current directory before running Python script:"
|
||||
pwd
|
||||
echo "Attempting to run Python script:"
|
||||
python check_actions_status.py
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -32,6 +32,7 @@ dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
@@ -161,7 +162,7 @@ agbenchmark/reports/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
|
||||
package.json
|
||||
|
||||
# Allow for locally private items
|
||||
# private
|
||||
@@ -169,5 +170,3 @@ pri*
|
||||
# ignore
|
||||
ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
rnd/autogpt_server/settings.py
|
||||
|
||||
4
.gitmodules
vendored
4
.gitmodules
vendored
@@ -1,3 +1,3 @@
|
||||
[submodule "forge/tests/vcr_cassettes"]
|
||||
path = forge/tests/vcr_cassettes
|
||||
[submodule "autogpt/tests/vcr_cassettes"]
|
||||
path = autogpt/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
|
||||
@@ -97,7 +97,7 @@ repos:
|
||||
alias: pyright-benchmark
|
||||
entry: poetry -C benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
files: ^benchmark/(agbenchmark|tests)/
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
61
.vscode/all-projects.code-workspace
vendored
61
.vscode/all-projects.code-workspace
vendored
@@ -1,61 +0,0 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"name": "autogpt",
|
||||
"path": "../autogpt"
|
||||
},
|
||||
{
|
||||
"name": "benchmark",
|
||||
"path": "../benchmark"
|
||||
},
|
||||
{
|
||||
"name": "docs",
|
||||
"path": "../docs"
|
||||
},
|
||||
{
|
||||
"name": "forge",
|
||||
"path": "../forge"
|
||||
},
|
||||
{
|
||||
"name": "frontend",
|
||||
"path": "../frontend"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_server",
|
||||
"path": "../rnd/autogpt_server"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_builder",
|
||||
"path": "../rnd/autogpt_builder"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
"path": "../rnd/market"
|
||||
},
|
||||
{
|
||||
"name": "lib",
|
||||
"path": "../rnd/autogpt_libs"
|
||||
},
|
||||
{
|
||||
"name": "infra",
|
||||
"path": "../rnd/infra"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
}
|
||||
],
|
||||
"settings": {
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
},
|
||||
"extensions": {
|
||||
"recommendations": [
|
||||
"charliermarsh.ruff",
|
||||
"dart-code.flutter",
|
||||
"ms-python.black-formatter",
|
||||
"ms-python.vscode-pylance",
|
||||
"prisma.prisma",
|
||||
"qwtel.sqlite-viewer"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
# AutoGPT Contribution Guide
|
||||
If you are reading this, you are probably looking for the full **[contribution guide]**,
|
||||
If you are reading this, you are probably looking for our **[contribution guide]**,
|
||||
which is part of our [wiki].
|
||||
|
||||
Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
|
||||
@@ -15,17 +15,15 @@ Also check out our [🚀 Roadmap][roadmap] for information about our priorities
|
||||
2. We encourage you to collaborate with fellow community members on some of our bigger
|
||||
[todo's][roadmap]!
|
||||
* We highly recommend to post your idea and discuss it in the [dev channel].
|
||||
3. Create a draft PR when starting work on bigger changes.
|
||||
4. Adhere to the [Code Guidelines]
|
||||
4. Create a draft PR when starting work on bigger changes.
|
||||
3. Please also consider contributing something other than code; see the
|
||||
[contribution guide] for options.
|
||||
5. Clearly explain your changes when submitting a PR.
|
||||
6. Don't submit broken code: test/validate your changes.
|
||||
7. Avoid making unnecessary changes, especially if they're purely based on your personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
8. Please also consider contributing something other than code; see the
|
||||
[contribution guide] for options.
|
||||
|
||||
[dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
|
||||
[code guidelines]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing#code-guidelines
|
||||
|
||||
If you wish to involve with the project (beyond just contributing PRs), please read the
|
||||
wiki page about [Catalyzing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Catalyzing).
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
|
||||
> For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
|
||||
|
||||
Welcome to the Quickstart Guide! This guide will walk you through setting up, building, and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the steps to jumpstart your journey in AI development with AutoGPT.
|
||||
Welcome to the Quickstart Guide! This guide will walk you through the process of setting up and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the necessary steps to jumpstart your journey in the world of AI development with AutoGPT.
|
||||
|
||||
## System Requirements
|
||||
|
||||
This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux (WSL). If you use a Windows system, you must install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
|
||||
This project supports Linux (Debian based), Mac, and Windows Subsystem for Linux (WSL). If you are using a Windows system, you will need to install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
|
||||
|
||||
|
||||
## Getting Setup
|
||||
@@ -18,11 +18,11 @@ This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux
|
||||
- In the top-right corner of the page, click Fork.
|
||||
|
||||

|
||||
- On the next page, select your GitHub account to create the fork.
|
||||
- On the next page, select your GitHub account to create the fork under.
|
||||
- Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
|
||||
|
||||
2. **Clone the Repository**
|
||||
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
|
||||
To clone the repository, you need to have Git installed on your system. If you don't have Git installed, you can download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
|
||||
- Open your terminal.
|
||||
- Navigate to the directory where you want to clone the repository.
|
||||
- Run the git clone command for the fork you just created
|
||||
@@ -34,11 +34,11 @@ This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux
|
||||

|
||||
|
||||
4. **Setup the Project**
|
||||
Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
|
||||
Next we need to setup the required dependencies. We have a tool for helping you do all the tasks you need to on the repo.
|
||||
It can be accessed by running the `run` command by typing `./run` in the terminal.
|
||||
|
||||
The first command you need to use is `./run setup.` This will guide you through setting up your system.
|
||||
Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
|
||||
The first command you need to use is `./run setup` This will guide you through the process of setting up your system.
|
||||
Initially you will get instructions for installing flutter, chrome and setting up your github access token like the following image:
|
||||
|
||||

|
||||
|
||||
@@ -47,7 +47,7 @@ This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux
|
||||
If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
|
||||
|
||||
#### Update WSL
|
||||
Run the following command in Powershell or Command Prompt:
|
||||
Run the following command in Powershell or Command Prompt to:
|
||||
1. Enable the optional WSL and Virtual Machine Platform components.
|
||||
2. Download and install the latest Linux kernel.
|
||||
3. Set WSL 2 as the default.
|
||||
@@ -73,7 +73,7 @@ dos2unix ./run
|
||||
After executing the above commands, running `./run setup` should work successfully.
|
||||
|
||||
#### Store Project Files within the WSL File System
|
||||
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids path translations and permissions issues and provides a more consistent development environment.
|
||||
If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids issues related to path translations and permissions and provides a more consistent development environment.
|
||||
|
||||
You can keep running the command to get feedback on where you are up to with your setup.
|
||||
When setup has been completed, the command will return an output like this:
|
||||
@@ -83,7 +83,7 @@ When setup has been completed, the command will return an output like this:
|
||||
## Creating Your Agent
|
||||
|
||||
After completing the setup, the next step is to create your agent template.
|
||||
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with your chosen name.
|
||||
Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with a name of your choosing.
|
||||
|
||||
Tips for naming your agent:
|
||||
* Give it its own unique name, or name it after yourself
|
||||
@@ -101,21 +101,21 @@ This starts the agent on the URL: `http://localhost:8000/`
|
||||
|
||||

|
||||
|
||||
The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
|
||||
The frontend can be accessed from `http://localhost:8000/`, you will first need to login using either a google account or your github account.
|
||||
|
||||

|
||||
|
||||
Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
|
||||
Upon logging in you will get a page that looks something like this. With your task history down the left hand side of the page and the 'chat' window to send tasks to your agent.
|
||||
|
||||

|
||||
|
||||
When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
|
||||
When you have finished with your agent, or if you just need to restart it, use Ctl-C to end the session then you can re-run the start command.
|
||||
|
||||
If you are having issues and want to ensure the agent has been stopped, there is a `./run agent stop` command, which will kill the process using port 8000, which should be the agent.
|
||||
If you are having issues and want to ensure the agent has been stopped there is a `./run agent stop` command which will kill the process using port 8000, which should be the agent.
|
||||
|
||||
## Benchmarking your Agent
|
||||
|
||||
The benchmarking system can also be accessed using the CLI too:
|
||||
The benchmarking system can also be accessed using the cli too:
|
||||
|
||||
```bash
|
||||
agpt % ./run benchmark
|
||||
@@ -163,7 +163,7 @@ The benchmark has been split into different categories of skills you can test yo
|
||||

|
||||
|
||||
|
||||
Finally, you can run the benchmark with
|
||||
Finally you can run the benchmark with
|
||||
|
||||
```bash
|
||||
./run benchmark start YOUR_AGENT_NAME
|
||||
46
README.md
46
README.md
@@ -1,43 +1,17 @@
|
||||
# AutoGPT: Build & Use AI Agents
|
||||
# AutoGPT: build & use AI agents
|
||||
|
||||
[](https://discord.gg/autogpt)  
|
||||
[](https://twitter.com/Auto_GPT)  
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
**AutoGPT** is a powerful tool that lets you create and run intelligent agents. These agents can perform various tasks automatically, making your life easier.
|
||||
**AutoGPT** is a generalist LLM based AI agent that can autonomously accomplish minor tasks.
|
||||
|
||||
## How to Get Started
|
||||
**Examples**:
|
||||
|
||||
https://github.com/user-attachments/assets/8508f4dc-b362-4cab-900f-644964a96cdf
|
||||
- Look up and summarize this research paper
|
||||
- Write a marketing for food supplements
|
||||
- Write a blog post detailing the news in AI
|
||||
|
||||
### 🧱 AutoGPT Builder
|
||||
|
||||
The AutoGPT Builder is the frontend. It allows you to design agents using an easy flowchart style. You build your agent by connecting blocks, where each block performs a single action. It's simple and intuitive!
|
||||
|
||||
[Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
|
||||
|
||||
### 💽 AutoGPT Server
|
||||
|
||||
The AutoGPT Server is the backend. This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously.
|
||||
|
||||
### 🐙 Example Agents
|
||||
|
||||
Here are two examples of what you can do with AutoGPT:
|
||||
|
||||
1. **Reddit Marketing Agent**
|
||||
- This agent reads comments on Reddit.
|
||||
- It looks for people asking about your product.
|
||||
- It then automatically responds to them.
|
||||
|
||||
2. **YouTube Content Repurposing Agent**
|
||||
- This agent subscribes to your YouTube channel.
|
||||
- When you post a new video, it transcribes it.
|
||||
- It uses AI to write a search engine optimized blog post.
|
||||
- Then, it publishes this blog post to your Medium account.
|
||||
|
||||
These examples show just a glimpse of what you can achieve with AutoGPT!
|
||||
|
||||
---
|
||||
Our mission is to provide the tools, so that you can focus on what matters:
|
||||
|
||||
- 🏗️ **Building** - Lay the foundation for something amazing.
|
||||
@@ -49,13 +23,11 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
|
||||
**📖 [Documentation](https://docs.agpt.co)**
|
||||
 | 
|
||||
**🚀 [Contributing](CONTRIBUTING.md)**
|
||||
 | 
|
||||
**🛠️ [Build your own Agent - Quickstart](QUICKSTART.md)**
|
||||
|
||||
## 🧱 Building blocks
|
||||
|
||||
---
|
||||
## 🤖 AutoGPT Classic
|
||||
> Below is information about the classic version of AutoGPT.
|
||||
|
||||
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
|
||||
### 🏗️ Forge
|
||||
|
||||
**Forge your own agent!** – Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 49 KiB |
@@ -11,15 +11,12 @@
|
||||
## GROQ_API_KEY - Groq API Key (Example: gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)
|
||||
# GROQ_API_KEY=
|
||||
|
||||
## LLAMAFILE_API_BASE - Llamafile API base URL
|
||||
# LLAMAFILE_API_BASE=http://localhost:8080/v1
|
||||
|
||||
## TELEMETRY_OPT_IN - Share telemetry on errors and other issues with the AutoGPT team, e.g. through Sentry.
|
||||
## This helps us to spot and solve problems earlier & faster. (Default: DISABLED)
|
||||
# TELEMETRY_OPT_IN=true
|
||||
|
||||
## COMPONENT_CONFIG_FILE - Path to the json config file (Default: None)
|
||||
# COMPONENT_CONFIG_FILE=
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
|
||||
### Workspace ###
|
||||
|
||||
@@ -47,6 +44,9 @@
|
||||
|
||||
### Miscellaneous ###
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
@@ -96,21 +96,59 @@
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-3-small
|
||||
|
||||
################################################################################
|
||||
### SHELL EXECUTION
|
||||
################################################################################
|
||||
|
||||
## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist)
|
||||
# SHELL_COMMAND_CONTROL=denylist
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
|
||||
## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by AutoGPT (Default: sudo,su)
|
||||
# SHELL_DENYLIST=sudo,su
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
|
||||
## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by AutoGPT (Default: None)
|
||||
# SHELL_ALLOWLIST=
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### Common
|
||||
|
||||
## IMAGE_PROVIDER - Image provider (Default: dalle)
|
||||
# IMAGE_PROVIDER=dalle
|
||||
|
||||
## IMAGE_SIZE - Image size (Default: 256)
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
# SD_WEBUI_AUTH=
|
||||
|
||||
## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860)
|
||||
# SD_WEBUI_URL=http://localhost:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface)
|
||||
# AUDIO_TO_TEXT_PROVIDER=huggingface
|
||||
|
||||
## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
################################################################################
|
||||
### GITHUB
|
||||
################################################################################
|
||||
@@ -125,6 +163,18 @@
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
# HEADLESS_BROWSER=True
|
||||
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome)
|
||||
# USE_WEB_BROWSER=chrome
|
||||
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000)
|
||||
# BROWSE_CHUNK_MAX_LENGTH=3000
|
||||
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm)
|
||||
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
|
||||
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
@@ -148,6 +198,13 @@
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
|
||||
################################################################################
|
||||
### CHAT MESSAGES
|
||||
################################################################################
|
||||
|
||||
## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# CHAT_MESSAGES_ENABLED=False
|
||||
|
||||
################################################################################
|
||||
### LOGGING
|
||||
################################################################################
|
||||
|
||||
5
autogpt/.gitattributes
vendored
Normal file
5
autogpt/.gitattributes
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# Exclude VCR cassettes from stats
|
||||
tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
# Mark documentation as such
|
||||
docs/**.md linguist-documentation
|
||||
3
autogpt/.vscode/settings.json
vendored
3
autogpt/.vscode/settings.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
}
|
||||
@@ -68,6 +68,10 @@ Options:
|
||||
continuous mode
|
||||
--speak Enable Speak Mode
|
||||
--debug Enable Debug Mode
|
||||
-b, --browser-name TEXT Specifies which web-browser to use when
|
||||
using selenium to scrape the web.
|
||||
--allow-downloads Dangerous: Allows AutoGPT to download files
|
||||
natively.
|
||||
--skip-news Specifies whether to suppress the output of
|
||||
latest news on startup.
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
@@ -86,7 +90,6 @@ Options:
|
||||
--override-directives If specified, --constraint, --resource and
|
||||
--best-practice will override the AI's
|
||||
directives instead of being appended to them
|
||||
--component-config-file TEXT Path to the json configuration file.
|
||||
--help Show this message and exit.
|
||||
```
|
||||
</details>
|
||||
@@ -108,6 +111,10 @@ Usage: python -m autogpt serve [OPTIONS]
|
||||
|
||||
Options:
|
||||
--debug Enable Debug Mode
|
||||
-b, --browser-name TEXT Specifies which web-browser to use when using
|
||||
selenium to scrape the web.
|
||||
--allow-downloads Dangerous: Allows AutoGPT to download files
|
||||
natively.
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
plugins.
|
||||
--help Show this message and exit.
|
||||
|
||||
@@ -2,17 +2,17 @@ from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
|
||||
def create_agent(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
ai_profile: Optional[AIProfile] = None,
|
||||
@@ -38,7 +38,7 @@ def create_agent(
|
||||
|
||||
def configure_agent_with_state(
|
||||
state: AgentSettings,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
@@ -51,7 +51,7 @@ def configure_agent_with_state(
|
||||
|
||||
|
||||
def _configure_agent(
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
agent_id: str = "",
|
||||
@@ -80,7 +80,7 @@ def _configure_agent(
|
||||
settings=agent_state,
|
||||
llm_provider=llm_provider,
|
||||
file_storage=file_storage,
|
||||
app_config=app_config,
|
||||
legacy_config=app_config,
|
||||
)
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ def create_agent_state(
|
||||
task: str,
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
) -> AgentSettings:
|
||||
return AgentSettings(
|
||||
agent_id=agent_id,
|
||||
@@ -104,5 +104,5 @@ def create_agent_state(
|
||||
allow_fs_access=not app_config.restrict_to_workspace,
|
||||
use_functions_api=app_config.openai_functions,
|
||||
),
|
||||
history=Agent.default_settings.history.model_copy(deep=True),
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ from forge.file_storage.base import FileStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
from autogpt.app.config import AppConfig
|
||||
from forge.config.config import Config
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from .configurators import _configure_agent
|
||||
@@ -16,7 +16,7 @@ from .profile_generator import generate_agent_profile_for_task
|
||||
async def generate_agent_for_task(
|
||||
agent_id: str,
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
) -> Agent:
|
||||
|
||||
@@ -3,6 +3,7 @@ import logging
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.llm.prompting import ChatPrompt, LanguageModelClassification, PromptStrategy
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.llm.providers.schema import (
|
||||
@@ -13,13 +14,11 @@ from forge.llm.providers.schema import (
|
||||
from forge.models.config import SystemConfiguration, UserConfigurable
|
||||
from forge.models.json_schema import JSONSchema
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentProfileGeneratorConfiguration(SystemConfiguration):
|
||||
llm_classification: LanguageModelClassification = UserConfigurable(
|
||||
model_classification: LanguageModelClassification = UserConfigurable(
|
||||
default=LanguageModelClassification.SMART_MODEL
|
||||
)
|
||||
_example_call: object = {
|
||||
@@ -137,7 +136,7 @@ class AgentProfileGeneratorConfiguration(SystemConfiguration):
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
).model_dump()
|
||||
).dict()
|
||||
)
|
||||
|
||||
|
||||
@@ -148,21 +147,21 @@ class AgentProfileGenerator(PromptStrategy):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_classification: LanguageModelClassification,
|
||||
model_classification: LanguageModelClassification,
|
||||
system_prompt: str,
|
||||
user_prompt_template: str,
|
||||
create_agent_function: dict,
|
||||
):
|
||||
self._llm_classification = llm_classification
|
||||
self._model_classification = model_classification
|
||||
self._system_prompt_message = system_prompt
|
||||
self._user_prompt_template = user_prompt_template
|
||||
self._create_agent_function = CompletionModelFunction.model_validate(
|
||||
self._create_agent_function = CompletionModelFunction.parse_obj(
|
||||
create_agent_function
|
||||
)
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
return self._llm_classification
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return self._model_classification
|
||||
|
||||
def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt:
|
||||
system_message = ChatMessage.system(self._system_prompt_message)
|
||||
@@ -213,7 +212,7 @@ class AgentProfileGenerator(PromptStrategy):
|
||||
|
||||
async def generate_agent_profile_for_task(
|
||||
task: str,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
llm_provider: MultiProvider,
|
||||
) -> tuple[AIProfile, AIDirectives]:
|
||||
"""Generates an AIConfig object from the given string.
|
||||
@@ -222,7 +221,7 @@ async def generate_agent_profile_for_task(
|
||||
AIConfig: The AIConfig object tailored to the user's input
|
||||
"""
|
||||
agent_profile_generator = AgentProfileGenerator(
|
||||
**AgentProfileGenerator.default_configuration.model_dump() # HACK
|
||||
**AgentProfileGenerator.default_configuration.dict() # HACK
|
||||
)
|
||||
|
||||
prompt = agent_profile_generator.build_prompt(task)
|
||||
|
||||
@@ -26,10 +26,10 @@ class MyAgent(Agent):
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
legacy_config: Config,
|
||||
):
|
||||
# Call the parent constructor to bring in the default components
|
||||
super().__init__(settings, llm_provider, file_storage, app_config)
|
||||
super().__init__(settings, llm_provider, file_storage, legacy_config)
|
||||
# Add your custom component
|
||||
self.my_component = MyComponent()
|
||||
```
|
||||
|
||||
@@ -18,11 +18,7 @@ from forge.components.action_history import (
|
||||
ActionHistoryComponent,
|
||||
EpisodicActionHistory,
|
||||
)
|
||||
from forge.components.action_history.action_history import ActionHistoryConfiguration
|
||||
from forge.components.code_executor.code_executor import (
|
||||
CodeExecutorComponent,
|
||||
CodeExecutorConfiguration,
|
||||
)
|
||||
from forge.components.code_executor.code_executor import CodeExecutorComponent
|
||||
from forge.components.context.context import AgentContext, ContextComponent
|
||||
from forge.components.file_manager import FileManagerComponent
|
||||
from forge.components.git_operations import GitOperationsComponent
|
||||
@@ -62,7 +58,7 @@ from .prompt_strategies.one_shot import (
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.app.config import AppConfig
|
||||
from forge.config.config import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -95,14 +91,12 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
app_config: AppConfig,
|
||||
legacy_config: Config,
|
||||
):
|
||||
super().__init__(settings)
|
||||
|
||||
self.llm_provider = llm_provider
|
||||
prompt_config = OneShotAgentPromptStrategy.default_configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
prompt_config = OneShotAgentPromptStrategy.default_configuration.copy(deep=True)
|
||||
prompt_config.use_functions_api = (
|
||||
settings.config.use_functions_api
|
||||
# Anthropic currently doesn't support tools + prefilling :(
|
||||
@@ -113,41 +107,33 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
|
||||
# Components
|
||||
self.system = SystemComponent()
|
||||
self.history = (
|
||||
ActionHistoryComponent(
|
||||
settings.history,
|
||||
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
llm_provider,
|
||||
ActionHistoryConfiguration(
|
||||
llm_name=app_config.fast_llm, max_tokens=self.send_token_limit
|
||||
),
|
||||
)
|
||||
.run_after(WatchdogComponent)
|
||||
.run_after(SystemComponent)
|
||||
)
|
||||
if not app_config.noninteractive_mode:
|
||||
self.user_interaction = UserInteractionComponent()
|
||||
self.file_manager = FileManagerComponent(file_storage, settings)
|
||||
self.history = ActionHistoryComponent(
|
||||
settings.history,
|
||||
self.send_token_limit,
|
||||
lambda x: self.llm_provider.count_tokens(x, self.llm.name),
|
||||
legacy_config,
|
||||
llm_provider,
|
||||
).run_after(WatchdogComponent)
|
||||
self.user_interaction = UserInteractionComponent(legacy_config)
|
||||
self.file_manager = FileManagerComponent(settings, file_storage)
|
||||
self.code_executor = CodeExecutorComponent(
|
||||
self.file_manager.workspace,
|
||||
CodeExecutorConfiguration(
|
||||
docker_container_name=f"{settings.agent_id}_sandbox"
|
||||
),
|
||||
settings,
|
||||
legacy_config,
|
||||
)
|
||||
self.git_ops = GitOperationsComponent()
|
||||
self.image_gen = ImageGeneratorComponent(self.file_manager.workspace)
|
||||
self.web_search = WebSearchComponent()
|
||||
self.web_selenium = WebSeleniumComponent(
|
||||
llm_provider,
|
||||
app_config.app_data_dir,
|
||||
self.git_ops = GitOperationsComponent(legacy_config)
|
||||
self.image_gen = ImageGeneratorComponent(
|
||||
self.file_manager.workspace, legacy_config
|
||||
)
|
||||
self.web_search = WebSearchComponent(legacy_config)
|
||||
self.web_selenium = WebSeleniumComponent(legacy_config, llm_provider, self.llm)
|
||||
self.context = ContextComponent(self.file_manager.workspace, settings.context)
|
||||
self.watchdog = WatchdogComponent(settings.config, settings.history).run_after(
|
||||
ContextComponent
|
||||
)
|
||||
|
||||
self.event_history = settings.history
|
||||
self.app_config = app_config
|
||||
self.legacy_config = legacy_config
|
||||
|
||||
async def propose_action(self) -> OneShotAgentActionProposal:
|
||||
"""Proposes the next action to execute, based on the task and current state.
|
||||
@@ -162,7 +148,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
constraints = await self.run_pipeline(DirectiveProvider.get_constraints)
|
||||
best_practices = await self.run_pipeline(DirectiveProvider.get_best_practices)
|
||||
|
||||
directives = self.state.directives.model_copy(deep=True)
|
||||
directives = self.state.directives.copy(deep=True)
|
||||
directives.resources += resources
|
||||
directives.constraints += constraints
|
||||
directives.best_practices += best_practices
|
||||
@@ -174,19 +160,13 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
# Get messages
|
||||
messages = await self.run_pipeline(MessageProvider.get_messages)
|
||||
|
||||
include_os_info = (
|
||||
self.code_executor.config.execute_local_commands
|
||||
if hasattr(self, "code_executor")
|
||||
else False
|
||||
)
|
||||
|
||||
prompt: ChatPrompt = self.prompt_strategy.build_prompt(
|
||||
messages=messages,
|
||||
task=self.state.task,
|
||||
ai_profile=self.state.ai_profile,
|
||||
ai_directives=directives,
|
||||
commands=function_specs_from_commands(self.commands),
|
||||
include_os_info=include_os_info,
|
||||
include_os_info=self.legacy_config.execute_local_commands,
|
||||
)
|
||||
|
||||
logger.debug(f"Executing prompt:\n{dump_prompt(prompt)}")
|
||||
@@ -297,7 +277,7 @@ class Agent(BaseAgent[OneShotAgentActionProposal], Configurable[AgentSettings]):
|
||||
command
|
||||
for command in self.commands
|
||||
if not any(
|
||||
name in self.app_config.disabled_commands for name in command.names
|
||||
name in self.legacy_config.disabled_commands for name in command.names
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
@@ -28,13 +28,15 @@ _RESPONSE_INTERFACE_NAME = "AssistantResponse"
|
||||
|
||||
class AssistantThoughts(ModelWithSummary):
|
||||
observations: str = Field(
|
||||
description="Relevant observations from your last action (if any)"
|
||||
..., description="Relevant observations from your last action (if any)"
|
||||
)
|
||||
text: str = Field(description="Thoughts")
|
||||
reasoning: str = Field(description="Reasoning behind the thoughts")
|
||||
self_criticism: str = Field(description="Constructive self-criticism")
|
||||
plan: list[str] = Field(description="Short list that conveys the long-term plan")
|
||||
speak: str = Field(description="Summary of thoughts, to say to user")
|
||||
text: str = Field(..., description="Thoughts")
|
||||
reasoning: str = Field(..., description="Reasoning behind the thoughts")
|
||||
self_criticism: str = Field(..., description="Constructive self-criticism")
|
||||
plan: list[str] = Field(
|
||||
..., description="Short list that conveys the long-term plan"
|
||||
)
|
||||
speak: str = Field(..., description="Summary of thoughts, to say to user")
|
||||
|
||||
def summary(self) -> str:
|
||||
return self.text
|
||||
@@ -94,13 +96,11 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
logger: Logger,
|
||||
):
|
||||
self.config = configuration
|
||||
self.response_schema = JSONSchema.from_dict(
|
||||
OneShotAgentActionProposal.model_json_schema()
|
||||
)
|
||||
self.response_schema = JSONSchema.from_dict(OneShotAgentActionProposal.schema())
|
||||
self.logger = logger
|
||||
|
||||
@property
|
||||
def llm_classification(self) -> LanguageModelClassification:
|
||||
def model_classification(self) -> LanguageModelClassification:
|
||||
return LanguageModelClassification.FAST_MODEL # FIXME: dynamic switching
|
||||
|
||||
def build_prompt(
|
||||
@@ -182,7 +182,7 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
)
|
||||
|
||||
def response_format_instruction(self, use_functions_api: bool) -> tuple[str, str]:
|
||||
response_schema = self.response_schema.model_copy(deep=True)
|
||||
response_schema = self.response_schema.copy(deep=True)
|
||||
assert response_schema.properties
|
||||
if use_functions_api and "use_tool" in response_schema.properties:
|
||||
del response_schema.properties["use_tool"]
|
||||
@@ -274,8 +274,5 @@ class OneShotAgentPromptStrategy(PromptStrategy):
|
||||
raise InvalidAgentResponseError("Assistant did not use a tool")
|
||||
assistant_reply_dict["use_tool"] = response.tool_calls[0].function
|
||||
|
||||
parsed_response = OneShotAgentActionProposal.model_validate(
|
||||
assistant_reply_dict
|
||||
)
|
||||
parsed_response.raw_message = response.copy()
|
||||
parsed_response = OneShotAgentActionProposal.parse_obj(assistant_reply_dict)
|
||||
return parsed_response
|
||||
|
||||
@@ -23,6 +23,7 @@ from forge.agent_protocol.models import (
|
||||
TaskRequestBody,
|
||||
TaskStepsListResponse,
|
||||
)
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage import FileStorage
|
||||
from forge.llm.providers import ModelProviderBudget, MultiProvider
|
||||
from forge.models.action import ActionErrorResult, ActionSuccessResult
|
||||
@@ -34,7 +35,6 @@ from sentry_sdk import set_user
|
||||
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.app.config import AppConfig
|
||||
from autogpt.app.utils import is_port_free
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -45,7 +45,7 @@ class AgentProtocolServer:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
database: AgentDB,
|
||||
file_storage: FileStorage,
|
||||
llm_provider: MultiProvider,
|
||||
@@ -314,7 +314,7 @@ class AgentProtocolServer:
|
||||
""
|
||||
if tool_result is None
|
||||
else (
|
||||
orjson.loads(tool_result.model_dump_json())
|
||||
orjson.loads(tool_result.json())
|
||||
if not isinstance(tool_result, ActionErrorResult)
|
||||
else {
|
||||
"error": str(tool_result.error),
|
||||
@@ -327,7 +327,7 @@ class AgentProtocolServer:
|
||||
if last_proposal and tool_result
|
||||
else {}
|
||||
),
|
||||
**assistant_response.model_dump(),
|
||||
**assistant_response.dict(),
|
||||
}
|
||||
|
||||
task_cumulative_cost = agent.llm_provider.get_incurred_cost()
|
||||
@@ -451,9 +451,7 @@ class AgentProtocolServer:
|
||||
"""
|
||||
task_llm_budget = self._task_budgets[task.task_id]
|
||||
|
||||
task_llm_provider_config = self.llm_provider._configuration.model_copy(
|
||||
deep=True
|
||||
)
|
||||
task_llm_provider_config = self.llm_provider._configuration.copy(deep=True)
|
||||
_extra_request_headers = task_llm_provider_config.extra_request_headers
|
||||
_extra_request_headers["AP-TaskID"] = task.task_id
|
||||
if step_id:
|
||||
@@ -461,7 +459,7 @@ class AgentProtocolServer:
|
||||
if task.additional_input and (user_id := task.additional_input.get("user_id")):
|
||||
_extra_request_headers["AutoGPT-UserID"] = user_id
|
||||
|
||||
settings = self.llm_provider._settings.model_copy()
|
||||
settings = self.llm_provider._settings.copy()
|
||||
settings.budget = task_llm_budget
|
||||
settings.configuration = task_llm_provider_config
|
||||
task_llm_provider = self.llm_provider.__class__(
|
||||
|
||||
@@ -28,6 +28,24 @@ def cli(ctx: click.Context):
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows AutoGPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
# TODO: this is a hidden option for now, necessary for integration testing.
|
||||
# We should make this public once we're ready to roll out agent specific workspaces.
|
||||
"--workspace-directory",
|
||||
"-w",
|
||||
type=click.Path(file_okay=False),
|
||||
hidden=True,
|
||||
)
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
@@ -110,15 +128,13 @@ def cli(ctx: click.Context):
|
||||
),
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
@click.option(
|
||||
"--component-config-file",
|
||||
help="Path to a json configuration file",
|
||||
type=click.Path(exists=True, dir_okay=False, resolve_path=True, path_type=Path),
|
||||
)
|
||||
def run(
|
||||
continuous: bool,
|
||||
continuous_limit: Optional[int],
|
||||
speak: bool,
|
||||
browser_name: Optional[str],
|
||||
allow_downloads: bool,
|
||||
workspace_directory: Optional[Path],
|
||||
install_plugin_deps: bool,
|
||||
skip_news: bool,
|
||||
skip_reprompt: bool,
|
||||
@@ -132,7 +148,6 @@ def run(
|
||||
log_level: Optional[str],
|
||||
log_format: Optional[str],
|
||||
log_file_format: Optional[str],
|
||||
component_config_file: Optional[Path],
|
||||
) -> None:
|
||||
"""
|
||||
Sets up and runs an agent, based on the task specified by the user, or resumes an
|
||||
@@ -150,7 +165,10 @@ def run(
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
workspace_directory=workspace_directory,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
override_ai_name=ai_name,
|
||||
override_ai_role=ai_role,
|
||||
@@ -158,11 +176,20 @@ def run(
|
||||
constraints=list(constraint),
|
||||
best_practices=list(best_practice),
|
||||
override_directives=override_directives,
|
||||
component_config_file=component_config_file,
|
||||
)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows AutoGPT to download files natively.",
|
||||
)
|
||||
@click.option(
|
||||
"--install-plugin-deps",
|
||||
is_flag=True,
|
||||
@@ -190,6 +217,8 @@ def run(
|
||||
type=click.Choice([i.value for i in LogFormatName]),
|
||||
)
|
||||
def serve(
|
||||
browser_name: Optional[str],
|
||||
allow_downloads: bool,
|
||||
install_plugin_deps: bool,
|
||||
debug: bool,
|
||||
log_level: Optional[str],
|
||||
@@ -208,6 +237,8 @@ def serve(
|
||||
log_level=log_level,
|
||||
log_format=log_format,
|
||||
log_file_format=log_file_format,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
install_plugin_deps=install_plugin_deps,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,221 +0,0 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import forge
|
||||
from forge.config.base import BaseConfig
|
||||
from forge.llm.providers import CHAT_MODELS, ModelName
|
||||
from forge.llm.providers.openai import OpenAICredentials, OpenAIModelName
|
||||
from forge.logging.config import LoggingConfig
|
||||
from forge.models.config import Configurable, UserConfigurable
|
||||
from pydantic import SecretStr, ValidationInfo, field_validator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
PROJECT_ROOT = Path(forge.__file__).parent.parent
|
||||
AZURE_CONFIG_FILE = Path("azure.yaml")
|
||||
|
||||
GPT_4_MODEL = OpenAIModelName.GPT4
|
||||
GPT_3_MODEL = OpenAIModelName.GPT3
|
||||
|
||||
|
||||
class AppConfig(BaseConfig):
|
||||
name: str = "Auto-GPT configuration"
|
||||
description: str = "Default configuration for the Auto-GPT application."
|
||||
|
||||
########################
|
||||
# Application Settings #
|
||||
########################
|
||||
project_root: Path = PROJECT_ROOT
|
||||
app_data_dir: Path = project_root / "data"
|
||||
skip_news: bool = False
|
||||
skip_reprompt: bool = False
|
||||
authorise_key: str = UserConfigurable(default="y", from_env="AUTHORISE_COMMAND_KEY")
|
||||
exit_key: str = UserConfigurable(default="n", from_env="EXIT_KEY")
|
||||
noninteractive_mode: bool = False
|
||||
logging: LoggingConfig = LoggingConfig()
|
||||
component_config_file: Optional[Path] = UserConfigurable(
|
||||
default=None, from_env="COMPONENT_CONFIG_FILE"
|
||||
)
|
||||
|
||||
##########################
|
||||
# Agent Control Settings #
|
||||
##########################
|
||||
# Model configuration
|
||||
fast_llm: ModelName = UserConfigurable(
|
||||
default=OpenAIModelName.GPT3,
|
||||
from_env="FAST_LLM",
|
||||
)
|
||||
smart_llm: ModelName = UserConfigurable(
|
||||
default=OpenAIModelName.GPT4_TURBO,
|
||||
from_env="SMART_LLM",
|
||||
)
|
||||
temperature: float = UserConfigurable(default=0, from_env="TEMPERATURE")
|
||||
openai_functions: bool = UserConfigurable(
|
||||
default=False, from_env=lambda: os.getenv("OPENAI_FUNCTIONS", "False") == "True"
|
||||
)
|
||||
embedding_model: str = UserConfigurable(
|
||||
default="text-embedding-3-small", from_env="EMBEDDING_MODEL"
|
||||
)
|
||||
|
||||
# Run loop configuration
|
||||
continuous_mode: bool = False
|
||||
continuous_limit: int = 0
|
||||
|
||||
############
|
||||
# Commands #
|
||||
############
|
||||
# General
|
||||
disabled_commands: list[str] = UserConfigurable(
|
||||
default_factory=list,
|
||||
from_env=lambda: _safe_split(os.getenv("DISABLED_COMMANDS")),
|
||||
)
|
||||
|
||||
# File ops
|
||||
restrict_to_workspace: bool = UserConfigurable(
|
||||
default=True,
|
||||
from_env=lambda: os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True",
|
||||
)
|
||||
|
||||
###############
|
||||
# Credentials #
|
||||
###############
|
||||
# OpenAI
|
||||
openai_credentials: Optional[OpenAICredentials] = None
|
||||
azure_config_file: Optional[Path] = UserConfigurable(
|
||||
default=AZURE_CONFIG_FILE, from_env="AZURE_CONFIG_FILE"
|
||||
)
|
||||
|
||||
@field_validator("openai_functions")
|
||||
def validate_openai_functions(cls, value: bool, info: ValidationInfo):
|
||||
if value:
|
||||
smart_llm = info.data["smart_llm"]
|
||||
assert CHAT_MODELS[smart_llm].has_function_call_api, (
|
||||
f"Model {smart_llm} does not support tool calling. "
|
||||
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
class ConfigBuilder(Configurable[AppConfig]):
|
||||
default_settings = AppConfig()
|
||||
|
||||
@classmethod
|
||||
def build_config_from_env(cls, project_root: Path = PROJECT_ROOT) -> AppConfig:
|
||||
"""Initialize the Config class"""
|
||||
|
||||
config = cls.build_agent_configuration()
|
||||
config.project_root = project_root
|
||||
|
||||
# Make relative paths absolute
|
||||
for k in {
|
||||
"azure_config_file", # TODO: move from project root
|
||||
}:
|
||||
setattr(config, k, project_root / getattr(config, k))
|
||||
|
||||
if (
|
||||
config.openai_credentials
|
||||
and config.openai_credentials.api_type == SecretStr("azure")
|
||||
and (config_file := config.azure_config_file)
|
||||
):
|
||||
config.openai_credentials.load_azure_config(config_file)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
async def assert_config_has_required_llm_api_keys(config: AppConfig) -> None:
|
||||
"""
|
||||
Check if API keys (if required) are set for the configured SMART_LLM and FAST_LLM.
|
||||
"""
|
||||
from forge.llm.providers.anthropic import AnthropicModelName
|
||||
from forge.llm.providers.groq import GroqModelName
|
||||
from pydantic import ValidationError
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(AnthropicModelName):
|
||||
from forge.llm.providers.anthropic import AnthropicCredentials
|
||||
|
||||
try:
|
||||
credentials = AnthropicCredentials.from_env()
|
||||
except ValidationError as e:
|
||||
if "api_key" in str(e):
|
||||
logger.error(
|
||||
"Set your Anthropic API key in .env or as an environment variable"
|
||||
)
|
||||
logger.info(
|
||||
"For further instructions: "
|
||||
"https://docs.agpt.co/autogpt/setup/#anthropic"
|
||||
)
|
||||
|
||||
raise ValueError("Anthropic is unavailable: can't load credentials") from e
|
||||
|
||||
key_pattern = r"^sk-ant-api03-[\w\-]{95}"
|
||||
|
||||
# If key is set, but it looks invalid
|
||||
if not re.search(key_pattern, credentials.api_key.get_secret_value()):
|
||||
logger.warning(
|
||||
"Possibly invalid Anthropic API key! "
|
||||
f"Configured Anthropic API key does not match pattern '{key_pattern}'. "
|
||||
"If this is a valid key, please report this warning to the maintainers."
|
||||
)
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(GroqModelName):
|
||||
from forge.llm.providers.groq import GroqProvider
|
||||
from groq import AuthenticationError
|
||||
|
||||
try:
|
||||
groq = GroqProvider()
|
||||
await groq.get_available_models()
|
||||
except ValidationError as e:
|
||||
if "api_key" not in str(e):
|
||||
raise
|
||||
|
||||
logger.error("Set your Groq API key in .env or as an environment variable")
|
||||
logger.info(
|
||||
"For further instructions: https://docs.agpt.co/autogpt/setup/#groq"
|
||||
)
|
||||
raise ValueError("Groq is unavailable: can't load credentials")
|
||||
except AuthenticationError as e:
|
||||
logger.error("The Groq API key is invalid!")
|
||||
logger.info(
|
||||
"For instructions to get and set a new API key: "
|
||||
"https://docs.agpt.co/autogpt/setup/#groq"
|
||||
)
|
||||
raise ValueError("Groq is unavailable: invalid API key") from e
|
||||
|
||||
if set((config.smart_llm, config.fast_llm)).intersection(OpenAIModelName):
|
||||
from forge.llm.providers.openai import OpenAIProvider
|
||||
from openai import AuthenticationError
|
||||
|
||||
try:
|
||||
openai = OpenAIProvider()
|
||||
await openai.get_available_models()
|
||||
except ValidationError as e:
|
||||
if "api_key" not in str(e):
|
||||
raise
|
||||
|
||||
logger.error(
|
||||
"Set your OpenAI API key in .env or as an environment variable"
|
||||
)
|
||||
logger.info(
|
||||
"For further instructions: https://docs.agpt.co/autogpt/setup/#openai"
|
||||
)
|
||||
raise ValueError("OpenAI is unavailable: can't load credentials")
|
||||
except AuthenticationError as e:
|
||||
logger.error("The OpenAI API key is invalid!")
|
||||
logger.info(
|
||||
"For instructions to get and set a new API key: "
|
||||
"https://docs.agpt.co/autogpt/setup/#openai"
|
||||
)
|
||||
raise ValueError("OpenAI is unavailable: invalid API key") from e
|
||||
|
||||
|
||||
def _safe_split(s: Union[str, None], sep: str = ",") -> list[str]:
|
||||
"""Split a string by a separator. Return an empty list if the string is None."""
|
||||
if s is None:
|
||||
return []
|
||||
return s.split(sep)
|
||||
@@ -5,18 +5,20 @@ import logging
|
||||
from typing import Literal, Optional
|
||||
|
||||
import click
|
||||
from colorama import Back, Style
|
||||
from forge.config.config import GPT_3_MODEL, Config
|
||||
from forge.llm.providers import ModelName, MultiProvider
|
||||
|
||||
from autogpt.app.config import GPT_3_MODEL, AppConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def apply_overrides_to_config(
|
||||
config: AppConfig,
|
||||
config: Config,
|
||||
continuous: bool = False,
|
||||
continuous_limit: Optional[int] = None,
|
||||
skip_reprompt: bool = False,
|
||||
browser_name: Optional[str] = None,
|
||||
allow_downloads: bool = False,
|
||||
skip_news: bool = False,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
@@ -31,6 +33,8 @@ async def apply_overrides_to_config(
|
||||
log_level (int): The global log level for the application.
|
||||
log_format (str): The format for the log(s).
|
||||
log_file_format (str): Override the format for the log file.
|
||||
browser_name (str): The name of the browser to use for scraping the web.
|
||||
allow_downloads (bool): Whether to allow AutoGPT to download files natively.
|
||||
skips_news (bool): Whether to suppress the output of latest news on startup.
|
||||
"""
|
||||
config.continuous_mode = False
|
||||
@@ -51,33 +55,44 @@ async def apply_overrides_to_config(
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
# Check availability of configured LLMs; fallback to other LLM if unavailable
|
||||
config.fast_llm, config.smart_llm = await check_models(
|
||||
(config.fast_llm, "fast_llm"), (config.smart_llm, "smart_llm")
|
||||
)
|
||||
config.fast_llm = await check_model(config.fast_llm, "fast_llm")
|
||||
config.smart_llm = await check_model(config.smart_llm, "smart_llm")
|
||||
|
||||
if skip_reprompt:
|
||||
config.skip_reprompt = True
|
||||
|
||||
if browser_name:
|
||||
config.selenium_web_browser = browser_name
|
||||
|
||||
if allow_downloads:
|
||||
logger.warning(
|
||||
msg=f"{Back.LIGHTYELLOW_EX}"
|
||||
"AutoGPT will now be able to download and save files to your machine."
|
||||
f"{Back.RESET}"
|
||||
" It is recommended that you monitor any files it downloads carefully.",
|
||||
)
|
||||
logger.warning(
|
||||
msg=f"{Back.RED + Style.BRIGHT}"
|
||||
"NEVER OPEN FILES YOU AREN'T SURE OF!"
|
||||
f"{Style.RESET_ALL}",
|
||||
)
|
||||
config.allow_downloads = True
|
||||
|
||||
if skip_news:
|
||||
config.skip_news = True
|
||||
|
||||
|
||||
async def check_models(
|
||||
*models: tuple[ModelName, Literal["smart_llm", "fast_llm"]]
|
||||
) -> tuple[ModelName, ...]:
|
||||
async def check_model(
|
||||
model_name: ModelName, model_type: Literal["smart_llm", "fast_llm"]
|
||||
) -> ModelName:
|
||||
"""Check if model is available for use. If not, return gpt-3.5-turbo."""
|
||||
multi_provider = MultiProvider()
|
||||
available_models = await multi_provider.get_available_chat_models()
|
||||
models = await multi_provider.get_available_chat_models()
|
||||
|
||||
checked_models: list[ModelName] = []
|
||||
for model, model_type in models:
|
||||
if any(model == m.name for m in available_models):
|
||||
checked_models.append(model)
|
||||
else:
|
||||
logger.warning(
|
||||
f"You don't have access to {model}. "
|
||||
f"Setting {model_type} to {GPT_3_MODEL}."
|
||||
)
|
||||
checked_models.append(GPT_3_MODEL)
|
||||
if any(model_name == m.name for m in models):
|
||||
return model_name
|
||||
|
||||
return tuple(checked_models)
|
||||
logger.warning(
|
||||
f"You don't have access to {model_name}. Setting {model_type} to {GPT_3_MODEL}."
|
||||
)
|
||||
return GPT_3_MODEL
|
||||
|
||||
@@ -21,6 +21,7 @@ from forge.components.code_executor.code_executor import (
|
||||
)
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config, ConfigBuilder, assert_config_has_openai_api_key
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.llm.providers import MultiProvider
|
||||
from forge.logging.config import configure_logging
|
||||
@@ -33,11 +34,6 @@ from forge.utils.exceptions import AgentTerminated, InvalidAgentResponseError
|
||||
from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent
|
||||
from autogpt.agents.agent_manager import AgentManager
|
||||
from autogpt.agents.prompt_strategies.one_shot import AssistantThoughts
|
||||
from autogpt.app.config import (
|
||||
AppConfig,
|
||||
ConfigBuilder,
|
||||
assert_config_has_required_llm_api_keys,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agents.agent import Agent
|
||||
@@ -66,7 +62,10 @@ async def run_auto_gpt(
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
browser_name: Optional[str] = None,
|
||||
allow_downloads: bool = False,
|
||||
skip_news: bool = False,
|
||||
workspace_directory: Optional[Path] = None,
|
||||
install_plugin_deps: bool = False,
|
||||
override_ai_name: Optional[str] = None,
|
||||
override_ai_role: Optional[str] = None,
|
||||
@@ -74,7 +73,6 @@ async def run_auto_gpt(
|
||||
constraints: Optional[list[str]] = None,
|
||||
best_practices: Optional[list[str]] = None,
|
||||
override_directives: bool = False,
|
||||
component_config_file: Optional[Path] = None,
|
||||
):
|
||||
# Set up configuration
|
||||
config = ConfigBuilder.build_config_from_env()
|
||||
@@ -100,13 +98,16 @@ async def run_auto_gpt(
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
# TODO: fill in llm values here
|
||||
assert_config_has_openai_api_key(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
continuous=continuous,
|
||||
continuous_limit=continuous_limit,
|
||||
skip_reprompt=skip_reprompt,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
skip_news=skip_news,
|
||||
)
|
||||
|
||||
@@ -131,12 +132,15 @@ async def run_auto_gpt(
|
||||
print_python_version_info(logger)
|
||||
print_attribute("Smart LLM", config.smart_llm)
|
||||
print_attribute("Fast LLM", config.fast_llm)
|
||||
print_attribute("Browser", config.selenium_web_browser)
|
||||
if config.continuous_mode:
|
||||
print_attribute("Continuous Mode", "ENABLED", title_color=Fore.YELLOW)
|
||||
if continuous_limit:
|
||||
print_attribute("Continuous Limit", config.continuous_limit)
|
||||
if config.tts_config.speak_mode:
|
||||
print_attribute("Speak Mode", "ENABLED")
|
||||
if config.allow_downloads:
|
||||
print_attribute("Native Downloading", "ENABLED")
|
||||
if we_are_running_in_a_docker_container() or is_docker_available():
|
||||
print_attribute("Code Execution", "ENABLED")
|
||||
else:
|
||||
@@ -323,14 +327,6 @@ async def run_auto_gpt(
|
||||
# )
|
||||
# ).add_done_callback(update_agent_directives)
|
||||
|
||||
# Load component configuration from file
|
||||
if _config_file := component_config_file or config.component_config_file:
|
||||
try:
|
||||
logger.info(f"Loading component configuration from {_config_file}")
|
||||
agent.load_component_configs(_config_file.read_text())
|
||||
except Exception as e:
|
||||
logger.error(f"Could not load component configuration: {e}")
|
||||
|
||||
#################
|
||||
# Run the Agent #
|
||||
#################
|
||||
@@ -357,6 +353,8 @@ async def run_auto_gpt_server(
|
||||
log_level: Optional[str] = None,
|
||||
log_format: Optional[str] = None,
|
||||
log_file_format: Optional[str] = None,
|
||||
browser_name: Optional[str] = None,
|
||||
allow_downloads: bool = False,
|
||||
install_plugin_deps: bool = False,
|
||||
):
|
||||
from .agent_protocol_server import AgentProtocolServer
|
||||
@@ -382,10 +380,13 @@ async def run_auto_gpt_server(
|
||||
tts_config=config.tts_config,
|
||||
)
|
||||
|
||||
await assert_config_has_required_llm_api_keys(config)
|
||||
# TODO: fill in llm values here
|
||||
assert_config_has_openai_api_key(config)
|
||||
|
||||
await apply_overrides_to_config(
|
||||
config=config,
|
||||
browser_name=browser_name,
|
||||
allow_downloads=allow_downloads,
|
||||
)
|
||||
|
||||
llm_provider = _configure_llm_provider(config)
|
||||
@@ -410,7 +411,7 @@ async def run_auto_gpt_server(
|
||||
)
|
||||
|
||||
|
||||
def _configure_llm_provider(config: AppConfig) -> MultiProvider:
|
||||
def _configure_llm_provider(config: Config) -> MultiProvider:
|
||||
multi_provider = MultiProvider()
|
||||
for model in [config.smart_llm, config.fast_llm]:
|
||||
# Ensure model providers for configured LLMs are available
|
||||
@@ -450,15 +451,15 @@ async def run_interaction_loop(
|
||||
None
|
||||
"""
|
||||
# These contain both application config and agent config, so grab them here.
|
||||
app_config = agent.app_config
|
||||
legacy_config = agent.legacy_config
|
||||
ai_profile = agent.state.ai_profile
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
cycle_budget = cycles_remaining = _get_cycle_budget(
|
||||
app_config.continuous_mode, app_config.continuous_limit
|
||||
legacy_config.continuous_mode, legacy_config.continuous_limit
|
||||
)
|
||||
spinner = Spinner(
|
||||
"Thinking...", plain_output=app_config.logging.plain_console_output
|
||||
"Thinking...", plain_output=legacy_config.logging.plain_console_output
|
||||
)
|
||||
stop_reason = None
|
||||
|
||||
@@ -507,25 +508,22 @@ async def run_interaction_loop(
|
||||
########
|
||||
handle_stop_signal()
|
||||
# Have the agent determine the next action to take.
|
||||
if not (_ep := agent.event_history.current_episode) or _ep.result:
|
||||
with spinner:
|
||||
try:
|
||||
action_proposal = await agent.propose_action()
|
||||
except InvalidAgentResponseError as e:
|
||||
logger.warning(f"The agent's thoughts could not be parsed: {e}")
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures >= 3:
|
||||
logger.error(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row. Terminating..."
|
||||
)
|
||||
raise AgentTerminated(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row."
|
||||
)
|
||||
continue
|
||||
else:
|
||||
action_proposal = _ep.action
|
||||
with spinner:
|
||||
try:
|
||||
action_proposal = await agent.propose_action()
|
||||
except InvalidAgentResponseError as e:
|
||||
logger.warning(f"The agent's thoughts could not be parsed: {e}")
|
||||
consecutive_failures += 1
|
||||
if consecutive_failures >= 3:
|
||||
logger.error(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row. Terminating..."
|
||||
)
|
||||
raise AgentTerminated(
|
||||
"The agent failed to output valid thoughts"
|
||||
f" {consecutive_failures} times in a row."
|
||||
)
|
||||
continue
|
||||
|
||||
consecutive_failures = 0
|
||||
|
||||
@@ -536,7 +534,7 @@ async def run_interaction_loop(
|
||||
update_user(
|
||||
ai_profile,
|
||||
action_proposal,
|
||||
speak_mode=app_config.tts_config.speak_mode,
|
||||
speak_mode=legacy_config.tts_config.speak_mode,
|
||||
)
|
||||
|
||||
##################
|
||||
@@ -545,7 +543,7 @@ async def run_interaction_loop(
|
||||
handle_stop_signal()
|
||||
if cycles_remaining == 1: # Last cycle
|
||||
feedback_type, feedback, new_cycles_remaining = await get_user_feedback(
|
||||
app_config,
|
||||
legacy_config,
|
||||
ai_profile,
|
||||
)
|
||||
|
||||
@@ -656,7 +654,7 @@ def update_user(
|
||||
|
||||
|
||||
async def get_user_feedback(
|
||||
config: AppConfig,
|
||||
config: Config,
|
||||
ai_profile: AIProfile,
|
||||
) -> tuple[UserFeedback, str, int | None]:
|
||||
"""Gets the user's feedback on the assistant's reply.
|
||||
|
||||
@@ -4,10 +4,9 @@ from typing import Optional
|
||||
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.logging.utils import print_attribute
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
from .input import clean_input
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -47,7 +46,7 @@ def apply_overrides_to_ai_settings(
|
||||
async def interactively_revise_ai_settings(
|
||||
ai_profile: AIProfile,
|
||||
directives: AIDirectives,
|
||||
app_config: AppConfig,
|
||||
app_config: Config,
|
||||
):
|
||||
"""Interactively revise the AI settings.
|
||||
|
||||
|
||||
914
autogpt/poetry.lock
generated
914
autogpt/poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -20,24 +20,44 @@ serve = "autogpt.app.cli:serve"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
anthropic = "^0.25.1"
|
||||
autogpt-forge = { path = "../forge", develop = true }
|
||||
# autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "forge"}
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
charset-normalizer = "^3.1.0"
|
||||
click = "*"
|
||||
colorama = "^0.4.6"
|
||||
distro = "^1.8.0"
|
||||
en-core-web-sm = { url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl" }
|
||||
fastapi = "^0.109.1"
|
||||
gitpython = "^3.1.32"
|
||||
ftfy = "^6.1.1"
|
||||
google-api-python-client = "*"
|
||||
hypercorn = "^0.14.4"
|
||||
inflection = "*"
|
||||
jsonschema = "*"
|
||||
numpy = "*"
|
||||
openai = "^1.7.2"
|
||||
orjson = "^3.8.10"
|
||||
pydantic = "^2.7.2"
|
||||
Pillow = "*"
|
||||
pydantic = "*"
|
||||
python-docx = "*"
|
||||
python-dotenv = "^1.0.0"
|
||||
pyyaml = "^6.0"
|
||||
readability-lxml = "^0.8.1"
|
||||
requests = "*"
|
||||
sentry-sdk = "^1.40.4"
|
||||
spacy = "^3.7.4"
|
||||
tenacity = "^8.2.2"
|
||||
|
||||
# OpenAI and Generic plugins import
|
||||
openapi-python-client = "^0.14.0"
|
||||
|
||||
# Benchmarking
|
||||
agbenchmark = { path = "../benchmark", optional = true }
|
||||
# agbenchmark = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "benchmark", optional = true}
|
||||
psycopg2-binary = "^2.9.9"
|
||||
multidict = "6.0.5"
|
||||
cx-freeze = "7.0.0"
|
||||
|
||||
[tool.poetry.extras]
|
||||
benchmark = ["agbenchmark"]
|
||||
@@ -45,28 +65,27 @@ benchmark = ["agbenchmark"]
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
black = "^23.12.1"
|
||||
flake8 = "^7.0.0"
|
||||
gitpython = "^3.1.32"
|
||||
isort = "^5.13.1"
|
||||
pre-commit = "*"
|
||||
pyright = "^1.1.364"
|
||||
|
||||
# Type stubs
|
||||
types-beautifulsoup4 = "*"
|
||||
types-colorama = "*"
|
||||
types-Markdown = "*"
|
||||
types-Pillow = "*"
|
||||
|
||||
# Testing
|
||||
asynctest = "*"
|
||||
coverage = "*"
|
||||
pytest = "*"
|
||||
pytest-asyncio = "*"
|
||||
pytest-benchmark = "*"
|
||||
pytest-cov = "*"
|
||||
pytest-integration = "*"
|
||||
pytest-mock = "*"
|
||||
pytest-recording = "*"
|
||||
pytest-xdist = "*"
|
||||
|
||||
[tool.poetry.group.build]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.build.dependencies]
|
||||
cx-freeze = { git = "https://github.com/ntindle/cx_Freeze.git", rev = "main" }
|
||||
# HACK: switch to cx-freeze release package after #2442 and #2472 are merged: https://github.com/marcelotduarte/cx_Freeze/pulls?q=is:pr+%232442+OR+%232472+
|
||||
# cx-freeze = { version = "^7.2.0", optional = true }
|
||||
vcrpy = { git = "https://github.com/Significant-Gravitas/vcrpy.git", rev = "master" }
|
||||
|
||||
|
||||
[build-system]
|
||||
@@ -89,3 +108,7 @@ skip_glob = ["data"]
|
||||
pythonVersion = "3.10"
|
||||
exclude = ["data/**", "**/node_modules", "**/__pycache__", "**/.*"]
|
||||
ignore = ["../forge/**"]
|
||||
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
markers = ["slow", "requires_openai_api_key", "requires_huggingface_api_key"]
|
||||
|
||||
@@ -19,7 +19,7 @@ from autogpt.app.utils import coroutine
|
||||
help="Path to the git repository",
|
||||
)
|
||||
@coroutine
|
||||
async def generate_release_notes(repo_path: Optional[str | Path] = None):
|
||||
async def generate_release_notes(repo_path: Optional[Path] = None):
|
||||
logger = logging.getLogger(generate_release_notes.name) # pyright: ignore
|
||||
|
||||
repo = Repo(repo_path, search_parent_directories=True)
|
||||
|
||||
3
autogpt/scripts/llamafile/.gitignore
vendored
3
autogpt/scripts/llamafile/.gitignore
vendored
@@ -1,3 +0,0 @@
|
||||
*.llamafile
|
||||
*.llamafile.exe
|
||||
llamafile.exe
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Use llamafile to serve a (quantized) mistral-7b-instruct-v0.2 model
|
||||
|
||||
Usage:
|
||||
cd <repo-root>/autogpt
|
||||
./scripts/llamafile/serve.py
|
||||
"""
|
||||
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
|
||||
LLAMAFILE = Path("mistral-7b-instruct-v0.2.Q5_K_M.llamafile")
|
||||
LLAMAFILE_URL = f"https://huggingface.co/jartine/Mistral-7B-Instruct-v0.2-llamafile/resolve/main/{LLAMAFILE.name}" # noqa
|
||||
LLAMAFILE_EXE = Path("llamafile.exe")
|
||||
LLAMAFILE_EXE_URL = "https://github.com/Mozilla-Ocho/llamafile/releases/download/0.8.6/llamafile-0.8.6" # noqa
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option(
|
||||
"--llamafile",
|
||||
type=click.Path(dir_okay=False, path_type=Path),
|
||||
help=f"Name of the llamafile to serve. Default: {LLAMAFILE.name}",
|
||||
)
|
||||
@click.option("--llamafile_url", help="Download URL for the llamafile you want to use")
|
||||
@click.option(
|
||||
"--host", help="Specify the address for the llamafile server to listen on"
|
||||
)
|
||||
@click.option(
|
||||
"--port", type=int, help="Specify the port for the llamafile server to listen on"
|
||||
)
|
||||
@click.option(
|
||||
"--force-gpu",
|
||||
is_flag=True,
|
||||
hidden=platform.system() != "Darwin",
|
||||
help="Run the model using only the GPU (AMD or Nvidia). "
|
||||
"Otherwise, both CPU and GPU may be (partially) used.",
|
||||
)
|
||||
def main(
|
||||
llamafile: Optional[Path] = None,
|
||||
llamafile_url: Optional[str] = None,
|
||||
host: Optional[str] = None,
|
||||
port: Optional[int] = None,
|
||||
force_gpu: bool = False,
|
||||
):
|
||||
print(f"type(llamafile) = {type(llamafile)}")
|
||||
if not llamafile:
|
||||
if not llamafile_url:
|
||||
llamafile = LLAMAFILE
|
||||
else:
|
||||
llamafile = Path(llamafile_url.rsplit("/", 1)[1])
|
||||
if llamafile.suffix != ".llamafile":
|
||||
click.echo(
|
||||
click.style(
|
||||
"The given URL does not end with '.llamafile' -> "
|
||||
"can't get filename from URL. "
|
||||
"Specify the filename using --llamafile.",
|
||||
fg="red",
|
||||
),
|
||||
err=True,
|
||||
)
|
||||
return
|
||||
|
||||
if llamafile == LLAMAFILE and not llamafile_url:
|
||||
llamafile_url = LLAMAFILE_URL
|
||||
elif llamafile_url != LLAMAFILE_URL:
|
||||
if not click.prompt(
|
||||
click.style(
|
||||
"You seem to have specified a different URL for the default model "
|
||||
f"({llamafile.name}). Are you sure this is correct? "
|
||||
"If you want to use a different model, also specify --llamafile.",
|
||||
fg="yellow",
|
||||
),
|
||||
type=bool,
|
||||
):
|
||||
return
|
||||
|
||||
# Go to autogpt/scripts/llamafile/
|
||||
os.chdir(Path(__file__).resolve().parent)
|
||||
|
||||
on_windows = platform.system() == "Windows"
|
||||
|
||||
if not llamafile.is_file():
|
||||
if not llamafile_url:
|
||||
click.echo(
|
||||
click.style(
|
||||
"Please use --lamafile_url to specify a download URL for "
|
||||
f"'{llamafile.name}'. "
|
||||
"This will only be necessary once, so we can download the model.",
|
||||
fg="red",
|
||||
),
|
||||
err=True,
|
||||
)
|
||||
return
|
||||
|
||||
download_file(llamafile_url, llamafile)
|
||||
|
||||
if not on_windows:
|
||||
llamafile.chmod(0o755)
|
||||
subprocess.run([llamafile, "--version"], check=True)
|
||||
|
||||
if not on_windows:
|
||||
base_command = [f"./{llamafile}"]
|
||||
else:
|
||||
# Windows does not allow executables over 4GB, so we have to download a
|
||||
# model-less llamafile.exe and run that instead.
|
||||
if not LLAMAFILE_EXE.is_file():
|
||||
download_file(LLAMAFILE_EXE_URL, LLAMAFILE_EXE)
|
||||
LLAMAFILE_EXE.chmod(0o755)
|
||||
subprocess.run([f".\\{LLAMAFILE_EXE}", "--version"], check=True)
|
||||
|
||||
base_command = [f".\\{LLAMAFILE_EXE}", "-m", llamafile]
|
||||
|
||||
if host:
|
||||
base_command.extend(["--host", host])
|
||||
if port:
|
||||
base_command.extend(["--port", str(port)])
|
||||
if force_gpu:
|
||||
base_command.extend(["-ngl", "9999"])
|
||||
|
||||
subprocess.run(
|
||||
[
|
||||
*base_command,
|
||||
"--server",
|
||||
"--nobrowser",
|
||||
"--ctx-size",
|
||||
"0",
|
||||
"--n-predict",
|
||||
"1024",
|
||||
],
|
||||
check=True,
|
||||
)
|
||||
|
||||
# note: --ctx-size 0 means the prompt context size will be set directly from the
|
||||
# underlying model configuration. This may cause slow response times or consume
|
||||
# a lot of memory.
|
||||
|
||||
|
||||
def download_file(url: str, to_file: Path) -> None:
|
||||
print(f"Downloading {to_file.name}...")
|
||||
import urllib.request
|
||||
|
||||
urllib.request.urlretrieve(url, to_file, reporthook=report_download_progress)
|
||||
print()
|
||||
|
||||
|
||||
def report_download_progress(chunk_number: int, chunk_size: int, total_size: int):
|
||||
if total_size != -1:
|
||||
downloaded_size = chunk_number * chunk_size
|
||||
percent = min(1, downloaded_size / total_size)
|
||||
bar = "#" * int(40 * percent)
|
||||
print(
|
||||
f"\rDownloading: [{bar:<40}] {percent:.0%}"
|
||||
f" - {downloaded_size/1e6:.1f}/{total_size/1e6:.1f} MB",
|
||||
end="",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,9 +1,7 @@
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from pkgutil import iter_modules
|
||||
from typing import Union
|
||||
from shutil import which
|
||||
|
||||
from cx_Freeze import Executable, setup # type: ignore
|
||||
from cx_Freeze import Executable, setup
|
||||
|
||||
packages = [
|
||||
m.name
|
||||
@@ -13,47 +11,11 @@ packages = [
|
||||
and ("poetry" in m.module_finder.path) # type: ignore
|
||||
]
|
||||
|
||||
# set the icon based on the platform
|
||||
icon = "../../assets/gpt_dark_RGB.ico"
|
||||
if platform.system() == "Darwin":
|
||||
icon = "../../assets/gpt_dark_RGB.icns"
|
||||
elif platform.system() == "Linux":
|
||||
icon = "../../assets/gpt_dark_RGB.png"
|
||||
|
||||
|
||||
def txt_to_rtf(input_file: Union[str, Path], output_file: Union[str, Path]) -> None:
|
||||
"""
|
||||
Convert a text file to RTF format.
|
||||
|
||||
Args:
|
||||
input_file (Union[str, Path]): Path to the input text file.
|
||||
output_file (Union[str, Path]): Path to the output RTF file.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
input_path = Path(input_file)
|
||||
output_path = Path(output_file)
|
||||
|
||||
with input_path.open("r", encoding="utf-8") as txt_file:
|
||||
content = txt_file.read()
|
||||
|
||||
# RTF header
|
||||
rtf = r"{\rtf1\ansi\deff0 {\fonttbl {\f0 Times New Roman;}}\f0\fs24 "
|
||||
|
||||
# Replace newlines with RTF newline
|
||||
rtf += content.replace("\n", "\\par ")
|
||||
|
||||
# Close RTF document
|
||||
rtf += "}"
|
||||
|
||||
with output_path.open("w", encoding="utf-8") as rtf_file:
|
||||
rtf_file.write(rtf)
|
||||
|
||||
|
||||
# Convert LICENSE to LICENSE.rtf
|
||||
license_file = "LICENSE.rtf"
|
||||
txt_to_rtf("../LICENSE", license_file)
|
||||
icon = (
|
||||
"../../assets/gpt_dark_RGB.icns"
|
||||
if which("sips")
|
||||
else "../../assets/gpt_dark_RGB.ico"
|
||||
)
|
||||
|
||||
|
||||
setup(
|
||||
@@ -93,7 +55,6 @@ setup(
|
||||
"target_name": "AutoGPT",
|
||||
"add_to_path": True,
|
||||
"install_icon": "../assets/gpt_dark_RGB.ico",
|
||||
"license_file": license_file,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -6,6 +6,7 @@ from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config, ConfigBuilder
|
||||
from forge.file_storage.local import (
|
||||
FileStorage,
|
||||
FileStorageConfiguration,
|
||||
@@ -15,11 +16,11 @@ from forge.llm.providers import MultiProvider
|
||||
from forge.logging.config import configure_logging
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.config import AppConfig, ConfigBuilder
|
||||
from autogpt.app.main import _configure_llm_provider
|
||||
|
||||
pytest_plugins = [
|
||||
"tests.integration.agent_factory",
|
||||
"tests.vcr",
|
||||
]
|
||||
|
||||
|
||||
@@ -61,7 +62,7 @@ def config(
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def setup_logger():
|
||||
def setup_logger(config: Config):
|
||||
configure_logging(
|
||||
debug=True,
|
||||
log_dir=Path(__file__).parent / "logs",
|
||||
@@ -70,14 +71,12 @@ def setup_logger():
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def llm_provider(config: AppConfig) -> MultiProvider:
|
||||
def llm_provider(config: Config) -> MultiProvider:
|
||||
return _configure_llm_provider(config)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent(
|
||||
config: AppConfig, llm_provider: MultiProvider, storage: FileStorage
|
||||
) -> Agent:
|
||||
def agent(config: Config, llm_provider: MultiProvider, storage: FileStorage) -> Agent:
|
||||
ai_profile = AIProfile(
|
||||
ai_name="Base",
|
||||
ai_role="A base AI",
|
||||
@@ -95,13 +94,13 @@ def agent(
|
||||
allow_fs_access=not config.restrict_to_workspace,
|
||||
use_functions_api=config.openai_functions,
|
||||
),
|
||||
history=Agent.default_settings.history.model_copy(deep=True),
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
settings=agent_settings,
|
||||
llm_provider=llm_provider,
|
||||
file_storage=storage,
|
||||
app_config=config,
|
||||
legacy_config=config,
|
||||
)
|
||||
return agent
|
||||
|
||||
@@ -2,15 +2,15 @@ from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
from forge.file_storage import FileStorageBackendName, get_storage
|
||||
from forge.llm.providers import MultiProvider
|
||||
|
||||
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
|
||||
from autogpt.app.config import AppConfig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def dummy_agent(config: AppConfig, llm_provider: MultiProvider):
|
||||
def dummy_agent(config: Config, llm_provider: MultiProvider):
|
||||
ai_profile = AIProfile(
|
||||
ai_name="Dummy Agent",
|
||||
ai_role="Dummy Role",
|
||||
@@ -28,7 +28,7 @@ def dummy_agent(config: AppConfig, llm_provider: MultiProvider):
|
||||
smart_llm=config.smart_llm,
|
||||
use_functions_api=config.openai_functions,
|
||||
),
|
||||
history=Agent.default_settings.history.model_copy(deep=True),
|
||||
history=Agent.default_settings.history.copy(deep=True),
|
||||
)
|
||||
|
||||
local = config.file_storage_backend == FileStorageBackendName.LOCAL
|
||||
@@ -44,7 +44,7 @@ def dummy_agent(config: AppConfig, llm_provider: MultiProvider):
|
||||
settings=agent_settings,
|
||||
llm_provider=llm_provider,
|
||||
file_storage=file_storage,
|
||||
app_config=config,
|
||||
legacy_config=config,
|
||||
)
|
||||
|
||||
return agent
|
||||
|
||||
@@ -4,20 +4,20 @@ import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.utils.exceptions import InvalidArgumentError, OperationNotAllowedError
|
||||
|
||||
from .code_executor import (
|
||||
from forge.components.code_executor.code_executor import (
|
||||
ALLOWLIST_CONTROL,
|
||||
CodeExecutorComponent,
|
||||
is_docker_available,
|
||||
we_are_running_in_a_docker_container,
|
||||
)
|
||||
from forge.utils.exceptions import InvalidArgumentError, OperationNotAllowedError
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def code_executor_component(storage: FileStorage):
|
||||
return CodeExecutorComponent(storage)
|
||||
def code_executor_component(agent: Agent):
|
||||
return agent.code_executor
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -26,8 +26,10 @@ def random_code(random_string) -> str:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def python_test_file(storage: FileStorage, random_code: str):
|
||||
temp_file = tempfile.NamedTemporaryFile(dir=storage.root, suffix=".py")
|
||||
def python_test_file(agent: Agent, random_code: str):
|
||||
temp_file = tempfile.NamedTemporaryFile(
|
||||
dir=agent.file_manager.workspace.root, suffix=".py"
|
||||
)
|
||||
temp_file.write(str.encode(random_code))
|
||||
temp_file.flush()
|
||||
|
||||
@@ -36,8 +38,10 @@ def python_test_file(storage: FileStorage, random_code: str):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def python_test_args_file(storage: FileStorage):
|
||||
temp_file = tempfile.NamedTemporaryFile(dir=storage.root, suffix=".py")
|
||||
def python_test_args_file(agent: Agent):
|
||||
temp_file = tempfile.NamedTemporaryFile(
|
||||
dir=agent.file_manager.workspace.root, suffix=".py"
|
||||
)
|
||||
temp_file.write(str.encode("import sys\nprint(sys.argv[1], sys.argv[2])"))
|
||||
temp_file.flush()
|
||||
|
||||
@@ -54,6 +58,7 @@ def test_execute_python_file(
|
||||
code_executor_component: CodeExecutorComponent,
|
||||
python_test_file: Path,
|
||||
random_string: str,
|
||||
agent: Agent,
|
||||
):
|
||||
if not (is_docker_available() or we_are_running_in_a_docker_container()):
|
||||
pytest.skip("Docker is not available")
|
||||
@@ -66,6 +71,7 @@ def test_execute_python_file_args(
|
||||
code_executor_component: CodeExecutorComponent,
|
||||
python_test_args_file: Path,
|
||||
random_string: str,
|
||||
agent: Agent,
|
||||
):
|
||||
if not (is_docker_available() or we_are_running_in_a_docker_container()):
|
||||
pytest.skip("Docker is not available")
|
||||
@@ -83,6 +89,7 @@ async def test_execute_python_code(
|
||||
code_executor_component: CodeExecutorComponent,
|
||||
random_code: str,
|
||||
random_string: str,
|
||||
agent: Agent,
|
||||
):
|
||||
if not (is_docker_available() or we_are_running_in_a_docker_container()):
|
||||
pytest.skip("Docker is not available")
|
||||
@@ -91,12 +98,16 @@ async def test_execute_python_code(
|
||||
assert result.replace("\r", "") == f"Hello {random_string}!\n"
|
||||
|
||||
|
||||
def test_execute_python_file_invalid(code_executor_component: CodeExecutorComponent):
|
||||
def test_execute_python_file_invalid(
|
||||
code_executor_component: CodeExecutorComponent, agent: Agent
|
||||
):
|
||||
with pytest.raises(InvalidArgumentError):
|
||||
code_executor_component.execute_python_file(Path("not_python.txt"))
|
||||
|
||||
|
||||
def test_execute_python_file_not_found(code_executor_component: CodeExecutorComponent):
|
||||
def test_execute_python_file_not_found(
|
||||
code_executor_component: CodeExecutorComponent, agent: Agent
|
||||
):
|
||||
with pytest.raises(
|
||||
FileNotFoundError,
|
||||
match=r"python: can't open file '([a-zA-Z]:)?[/\\\-\w]*notexist.py': "
|
||||
@@ -106,56 +117,52 @@ def test_execute_python_file_not_found(code_executor_component: CodeExecutorComp
|
||||
|
||||
|
||||
def test_execute_shell(
|
||||
code_executor_component: CodeExecutorComponent, random_string: str
|
||||
code_executor_component: CodeExecutorComponent, random_string: str, agent: Agent
|
||||
):
|
||||
code_executor_component.config.shell_command_control = "allowlist"
|
||||
code_executor_component.config.shell_allowlist = ["echo"]
|
||||
result = code_executor_component.execute_shell(f"echo 'Hello {random_string}!'")
|
||||
assert f"Hello {random_string}!" in result
|
||||
|
||||
|
||||
def test_execute_shell_local_commands_not_allowed(
|
||||
code_executor_component: CodeExecutorComponent, random_string: str
|
||||
code_executor_component: CodeExecutorComponent, random_string: str, agent: Agent
|
||||
):
|
||||
with pytest.raises(OperationNotAllowedError, match="not allowed"):
|
||||
code_executor_component.execute_shell(f"echo 'Hello {random_string}!'")
|
||||
result = code_executor_component.execute_shell(f"echo 'Hello {random_string}!'")
|
||||
assert f"Hello {random_string}!" in result
|
||||
|
||||
|
||||
def test_execute_shell_denylist_should_deny(
|
||||
code_executor_component: CodeExecutorComponent, random_string: str
|
||||
code_executor_component: CodeExecutorComponent, agent: Agent, random_string: str
|
||||
):
|
||||
code_executor_component.config.shell_command_control = "denylist"
|
||||
code_executor_component.config.shell_denylist = ["echo"]
|
||||
agent.legacy_config.shell_denylist = ["echo"]
|
||||
|
||||
with pytest.raises(OperationNotAllowedError, match="not allowed"):
|
||||
code_executor_component.execute_shell(f"echo 'Hello {random_string}!'")
|
||||
|
||||
|
||||
def test_execute_shell_denylist_should_allow(
|
||||
code_executor_component: CodeExecutorComponent, random_string: str
|
||||
code_executor_component: CodeExecutorComponent, agent: Agent, random_string: str
|
||||
):
|
||||
code_executor_component.config.shell_command_control = "denylist"
|
||||
code_executor_component.config.shell_denylist = ["cat"]
|
||||
agent.legacy_config.shell_denylist = ["cat"]
|
||||
|
||||
result = code_executor_component.execute_shell(f"echo 'Hello {random_string}!'")
|
||||
assert "Hello" in result and random_string in result
|
||||
|
||||
|
||||
def test_execute_shell_allowlist_should_deny(
|
||||
code_executor_component: CodeExecutorComponent, random_string: str
|
||||
code_executor_component: CodeExecutorComponent, agent: Agent, random_string: str
|
||||
):
|
||||
code_executor_component.config.shell_command_control = "allowlist"
|
||||
code_executor_component.config.shell_allowlist = ["cat"]
|
||||
agent.legacy_config.shell_command_control = ALLOWLIST_CONTROL
|
||||
agent.legacy_config.shell_allowlist = ["cat"]
|
||||
|
||||
with pytest.raises(OperationNotAllowedError, match="not allowed"):
|
||||
code_executor_component.execute_shell(f"echo 'Hello {random_string}!'")
|
||||
|
||||
|
||||
def test_execute_shell_allowlist_should_allow(
|
||||
code_executor_component: CodeExecutorComponent, random_string: str
|
||||
code_executor_component: CodeExecutorComponent, agent: Agent, random_string: str
|
||||
):
|
||||
code_executor_component.config.shell_command_control = "allowlist"
|
||||
code_executor_component.config.shell_allowlist = ["echo"]
|
||||
agent.legacy_config.shell_command_control = ALLOWLIST_CONTROL
|
||||
agent.legacy_config.shell_allowlist = ["echo"]
|
||||
|
||||
result = code_executor_component.execute_shell(f"echo 'Hello {random_string}!'")
|
||||
assert "Hello" in result and random_string in result
|
||||
@@ -4,33 +4,15 @@ from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from PIL import Image
|
||||
from pydantic import SecretStr, ValidationError
|
||||
|
||||
from forge.components.image_gen import ImageGeneratorComponent
|
||||
from forge.components.image_gen.image_gen import ImageGeneratorConfiguration
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.llm.providers.openai import OpenAICredentials
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def image_gen_component(storage: FileStorage):
|
||||
try:
|
||||
cred = OpenAICredentials.from_env()
|
||||
except ValidationError:
|
||||
cred = OpenAICredentials(api_key=SecretStr("test"))
|
||||
|
||||
return ImageGeneratorComponent(storage, openai_credentials=cred)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def huggingface_image_gen_component(storage: FileStorage):
|
||||
config = ImageGeneratorConfiguration(
|
||||
image_provider="huggingface",
|
||||
huggingface_api_token=SecretStr("1"),
|
||||
huggingface_image_model="CompVis/stable-diffusion-v1-4",
|
||||
)
|
||||
return ImageGeneratorComponent(storage, config=config)
|
||||
def image_gen_component(agent: Agent):
|
||||
return agent.image_gen
|
||||
|
||||
|
||||
@pytest.fixture(params=[256, 512, 1024])
|
||||
@@ -39,14 +21,20 @@ def image_size(request):
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.mark.requires_openai_api_key
|
||||
@pytest.mark.vcr
|
||||
def test_dalle(
|
||||
image_gen_component: ImageGeneratorComponent,
|
||||
agent: Agent,
|
||||
storage,
|
||||
image_size,
|
||||
cached_openai_client,
|
||||
):
|
||||
"""Test DALL-E image generation."""
|
||||
generate_and_validate(
|
||||
image_gen_component,
|
||||
agent,
|
||||
storage,
|
||||
image_provider="dalle",
|
||||
image_size=image_size,
|
||||
)
|
||||
@@ -56,18 +44,23 @@ def test_dalle(
|
||||
reason="The image is too big to be put in a cassette for a CI pipeline. "
|
||||
"We're looking into a solution."
|
||||
)
|
||||
@pytest.mark.requires_huggingface_api_key
|
||||
@pytest.mark.parametrize(
|
||||
"image_model",
|
||||
["CompVis/stable-diffusion-v1-4", "stabilityai/stable-diffusion-2-1"],
|
||||
)
|
||||
def test_huggingface(
|
||||
image_gen_component: ImageGeneratorComponent,
|
||||
agent: Agent,
|
||||
storage,
|
||||
image_size,
|
||||
image_model,
|
||||
):
|
||||
"""Test HuggingFace image generation."""
|
||||
generate_and_validate(
|
||||
image_gen_component,
|
||||
agent,
|
||||
storage,
|
||||
image_provider="huggingface",
|
||||
image_size=image_size,
|
||||
hugging_face_image_model=image_model,
|
||||
@@ -75,10 +68,14 @@ def test_huggingface(
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason="SD WebUI call does not work.")
|
||||
def test_sd_webui(image_gen_component: ImageGeneratorComponent, image_size):
|
||||
def test_sd_webui(
|
||||
image_gen_component: ImageGeneratorComponent, agent: Agent, storage, image_size
|
||||
):
|
||||
"""Test SD WebUI image generation."""
|
||||
generate_and_validate(
|
||||
image_gen_component,
|
||||
agent,
|
||||
storage,
|
||||
image_provider="sd_webui",
|
||||
image_size=image_size,
|
||||
)
|
||||
@@ -86,7 +83,7 @@ def test_sd_webui(image_gen_component: ImageGeneratorComponent, image_size):
|
||||
|
||||
@pytest.mark.xfail(reason="SD WebUI call does not work.")
|
||||
def test_sd_webui_negative_prompt(
|
||||
image_gen_component: ImageGeneratorComponent, image_size
|
||||
image_gen_component: ImageGeneratorComponent, storage, image_size
|
||||
):
|
||||
gen_image = functools.partial(
|
||||
image_gen_component.generate_image_with_sd_webui,
|
||||
@@ -117,15 +114,17 @@ def lst(txt):
|
||||
|
||||
def generate_and_validate(
|
||||
image_gen_component: ImageGeneratorComponent,
|
||||
agent: Agent,
|
||||
storage,
|
||||
image_size,
|
||||
image_provider,
|
||||
hugging_face_image_model=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Generate an image and validate the output."""
|
||||
image_gen_component.config.image_provider = image_provider
|
||||
agent.legacy_config.image_provider = image_provider
|
||||
if hugging_face_image_model:
|
||||
image_gen_component.config.huggingface_image_model = hugging_face_image_model
|
||||
agent.legacy_config.huggingface_image_model = hugging_face_image_model
|
||||
prompt = "astronaut riding a horse"
|
||||
|
||||
image_path = lst(image_gen_component.generate_image(prompt, image_size, **kwargs))
|
||||
@@ -150,7 +149,9 @@ def generate_and_validate(
|
||||
)
|
||||
@pytest.mark.parametrize("delay", [10, 0])
|
||||
def test_huggingface_fail_request_with_delay(
|
||||
huggingface_image_gen_component: ImageGeneratorComponent,
|
||||
image_gen_component: ImageGeneratorComponent,
|
||||
agent: Agent,
|
||||
storage,
|
||||
image_size,
|
||||
image_model,
|
||||
return_text,
|
||||
@@ -172,12 +173,14 @@ def test_huggingface_fail_request_with_delay(
|
||||
mock_post.return_value.ok = False
|
||||
mock_post.return_value.text = return_text
|
||||
|
||||
huggingface_image_gen_component.config.huggingface_image_model = image_model
|
||||
agent.legacy_config.image_provider = "huggingface"
|
||||
agent.legacy_config.huggingface_api_token = "mock-api-key"
|
||||
agent.legacy_config.huggingface_image_model = image_model
|
||||
prompt = "astronaut riding a horse"
|
||||
|
||||
with patch("time.sleep") as mock_sleep:
|
||||
# Verify request fails.
|
||||
result = huggingface_image_gen_component.generate_image(prompt, image_size)
|
||||
result = image_gen_component.generate_image(prompt, image_size)
|
||||
assert result == "Error creating image."
|
||||
|
||||
# Verify retry was called with delay if delay is in return_text
|
||||
@@ -188,8 +191,10 @@ def test_huggingface_fail_request_with_delay(
|
||||
|
||||
|
||||
def test_huggingface_fail_request_no_delay(
|
||||
mocker, huggingface_image_gen_component: ImageGeneratorComponent
|
||||
mocker, image_gen_component: ImageGeneratorComponent, agent: Agent
|
||||
):
|
||||
agent.legacy_config.huggingface_api_token = "1"
|
||||
|
||||
# Mock requests.post
|
||||
mock_post = mocker.patch("requests.post")
|
||||
mock_post.return_value.status_code = 500
|
||||
@@ -201,9 +206,10 @@ def test_huggingface_fail_request_no_delay(
|
||||
# Mock time.sleep
|
||||
mock_sleep = mocker.patch("time.sleep")
|
||||
|
||||
result = huggingface_image_gen_component.generate_image(
|
||||
"astronaut riding a horse", 512
|
||||
)
|
||||
agent.legacy_config.image_provider = "huggingface"
|
||||
agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
|
||||
result = image_gen_component.generate_image("astronaut riding a horse", 512)
|
||||
|
||||
assert result == "Error creating image."
|
||||
|
||||
@@ -212,8 +218,10 @@ def test_huggingface_fail_request_no_delay(
|
||||
|
||||
|
||||
def test_huggingface_fail_request_bad_json(
|
||||
mocker, huggingface_image_gen_component: ImageGeneratorComponent
|
||||
mocker, image_gen_component: ImageGeneratorComponent, agent: Agent
|
||||
):
|
||||
agent.legacy_config.huggingface_api_token = "1"
|
||||
|
||||
# Mock requests.post
|
||||
mock_post = mocker.patch("requests.post")
|
||||
mock_post.return_value.status_code = 500
|
||||
@@ -223,9 +231,10 @@ def test_huggingface_fail_request_bad_json(
|
||||
# Mock time.sleep
|
||||
mock_sleep = mocker.patch("time.sleep")
|
||||
|
||||
result = huggingface_image_gen_component.generate_image(
|
||||
"astronaut riding a horse", 512
|
||||
)
|
||||
agent.legacy_config.image_provider = "huggingface"
|
||||
agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
|
||||
result = image_gen_component.generate_image("astronaut riding a horse", 512)
|
||||
|
||||
assert result == "Error creating image."
|
||||
|
||||
@@ -234,14 +243,17 @@ def test_huggingface_fail_request_bad_json(
|
||||
|
||||
|
||||
def test_huggingface_fail_request_bad_image(
|
||||
mocker, huggingface_image_gen_component: ImageGeneratorComponent
|
||||
mocker, image_gen_component: ImageGeneratorComponent, agent: Agent
|
||||
):
|
||||
agent.legacy_config.huggingface_api_token = "1"
|
||||
|
||||
# Mock requests.post
|
||||
mock_post = mocker.patch("requests.post")
|
||||
mock_post.return_value.status_code = 200
|
||||
|
||||
result = huggingface_image_gen_component.generate_image(
|
||||
"astronaut riding a horse", 512
|
||||
)
|
||||
agent.legacy_config.image_provider = "huggingface"
|
||||
agent.legacy_config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||
|
||||
result = image_gen_component.generate_image("astronaut riding a horse", 512)
|
||||
|
||||
assert result == "Error creating image."
|
||||
@@ -3,8 +3,8 @@ from unittest.mock import patch
|
||||
import pytest
|
||||
from forge.config.ai_directives import AIDirectives
|
||||
from forge.config.ai_profile import AIProfile
|
||||
from forge.config.config import Config
|
||||
|
||||
from autogpt.app.config import AppConfig
|
||||
from autogpt.app.setup import (
|
||||
apply_overrides_to_ai_settings,
|
||||
interactively_revise_ai_settings,
|
||||
@@ -39,7 +39,7 @@ async def test_apply_overrides_to_ai_settings():
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_interactively_revise_ai_settings(config: AppConfig):
|
||||
async def test_interactively_revise_ai_settings(config: Config):
|
||||
ai_profile = AIProfile(ai_name="Test AI", ai_role="Test Role")
|
||||
directives = AIDirectives(
|
||||
resources=["Resource1"],
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from forge.components.web.selenium import BrowsingError, WebSeleniumComponent
|
||||
|
||||
from forge.llm.providers.multi import MultiProvider
|
||||
|
||||
from . import BrowsingError, WebSeleniumComponent
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def web_selenium_component(app_data_dir: Path):
|
||||
return WebSeleniumComponent(MultiProvider(), app_data_dir)
|
||||
def web_selenium_component(agent: Agent):
|
||||
return agent.web_selenium
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.requires_openai_api_key
|
||||
@pytest.mark.asyncio
|
||||
async def test_browse_website_nonexistent_url(
|
||||
web_selenium_component: WebSeleniumComponent,
|
||||
web_selenium_component: WebSeleniumComponent, cached_openai_client: None
|
||||
):
|
||||
url = "https://auto-gpt-thinks-this-website-does-not-exist.com"
|
||||
question = "How to execute a barrel roll"
|
||||
@@ -8,15 +8,15 @@ from typing import Any
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
from forge.config.config import GPT_3_MODEL, GPT_4_MODEL, Config, ConfigBuilder
|
||||
from openai.pagination import AsyncPage
|
||||
from openai.types import Model
|
||||
from pydantic import SecretStr
|
||||
|
||||
from autogpt.app.config import GPT_3_MODEL, GPT_4_MODEL, AppConfig, ConfigBuilder
|
||||
from autogpt.app.configurator import apply_overrides_to_config
|
||||
|
||||
|
||||
def test_initial_values(config: AppConfig) -> None:
|
||||
def test_initial_values(config: Config) -> None:
|
||||
"""
|
||||
Test if the initial values of the config class attributes are set correctly.
|
||||
"""
|
||||
@@ -29,7 +29,7 @@ def test_initial_values(config: AppConfig) -> None:
|
||||
@pytest.mark.asyncio
|
||||
@mock.patch("openai.resources.models.AsyncModels.list")
|
||||
async def test_fallback_to_gpt3_if_gpt4_not_available(
|
||||
mock_list_models: Any, config: AppConfig
|
||||
mock_list_models: Any, config: Config
|
||||
) -> None:
|
||||
"""
|
||||
Test if models update to gpt-3.5-turbo if gpt-4 is not available.
|
||||
@@ -51,7 +51,7 @@ async def test_fallback_to_gpt3_if_gpt4_not_available(
|
||||
assert config.smart_llm == GPT_3_MODEL
|
||||
|
||||
|
||||
def test_missing_azure_config(config: AppConfig) -> None:
|
||||
def test_missing_azure_config(config: Config) -> None:
|
||||
assert config.openai_credentials is not None
|
||||
|
||||
config_file = config.app_data_dir / "azure_config.yaml"
|
||||
@@ -68,7 +68,7 @@ def test_missing_azure_config(config: AppConfig) -> None:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def config_with_azure(config: AppConfig):
|
||||
def config_with_azure(config: Config):
|
||||
config_file = config.app_data_dir / "azure_config.yaml"
|
||||
config_file.write_text(
|
||||
f"""
|
||||
@@ -91,7 +91,7 @@ azure_model_map:
|
||||
del os.environ["AZURE_CONFIG_FILE"]
|
||||
|
||||
|
||||
def test_azure_config(config_with_azure: AppConfig) -> None:
|
||||
def test_azure_config(config_with_azure: Config) -> None:
|
||||
assert (credentials := config_with_azure.openai_credentials) is not None
|
||||
assert credentials.api_type == SecretStr("azure")
|
||||
assert credentials.api_version == SecretStr("2023-06-01-preview")
|
||||
|
||||
@@ -2,11 +2,9 @@ import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from forge.agent.base import BaseAgentSettings
|
||||
from forge.file_storage import FileStorage
|
||||
|
||||
from . import FileManagerComponent
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -15,13 +13,8 @@ def file_content():
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def file_manager_component(storage: FileStorage):
|
||||
return FileManagerComponent(
|
||||
storage,
|
||||
BaseAgentSettings(
|
||||
agent_id="TestAgent", name="TestAgent", description="Test Agent description"
|
||||
),
|
||||
)
|
||||
def file_manager_component(agent: Agent):
|
||||
return agent.file_manager
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
@@ -48,14 +41,15 @@ def test_nested_file(storage: FileStorage):
|
||||
async def test_read_file(
|
||||
test_file_path: Path,
|
||||
file_content,
|
||||
file_manager_component: FileManagerComponent,
|
||||
file_manager_component,
|
||||
agent: Agent,
|
||||
):
|
||||
await file_manager_component.workspace.write_file(test_file_path.name, file_content)
|
||||
await agent.file_manager.workspace.write_file(test_file_path.name, file_content)
|
||||
content = file_manager_component.read_file(test_file_path.name)
|
||||
assert content.replace("\r", "") == file_content
|
||||
|
||||
|
||||
def test_read_file_not_found(file_manager_component: FileManagerComponent):
|
||||
def test_read_file_not_found(file_manager_component):
|
||||
filename = "does_not_exist.txt"
|
||||
with pytest.raises(FileNotFoundError):
|
||||
file_manager_component.read_file(filename)
|
||||
@@ -63,12 +57,12 @@ def test_read_file_not_found(file_manager_component: FileManagerComponent):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_write_to_file_relative_path(
|
||||
test_file_name: Path, file_manager_component: FileManagerComponent
|
||||
test_file_name: Path, file_manager_component, agent: Agent
|
||||
):
|
||||
new_content = "This is new content.\n"
|
||||
await file_manager_component.write_to_file(test_file_name, new_content)
|
||||
with open(
|
||||
file_manager_component.workspace.get_path(test_file_name), "r", encoding="utf-8"
|
||||
agent.file_manager.workspace.get_path(test_file_name), "r", encoding="utf-8"
|
||||
) as f:
|
||||
content = f.read()
|
||||
assert content == new_content
|
||||
@@ -76,7 +70,7 @@ async def test_write_to_file_relative_path(
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_write_to_file_absolute_path(
|
||||
test_file_path: Path, file_manager_component: FileManagerComponent
|
||||
test_file_path: Path, file_manager_component
|
||||
):
|
||||
new_content = "This is new content.\n"
|
||||
await file_manager_component.write_to_file(test_file_path, new_content)
|
||||
@@ -86,18 +80,18 @@ async def test_write_to_file_absolute_path(
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_files(file_manager_component: FileManagerComponent):
|
||||
async def test_list_files(file_manager_component, agent: Agent):
|
||||
# Create files A and B
|
||||
file_a_name = "file_a.txt"
|
||||
file_b_name = "file_b.txt"
|
||||
test_directory = Path("test_directory")
|
||||
|
||||
await file_manager_component.workspace.write_file(file_a_name, "This is file A.")
|
||||
await file_manager_component.workspace.write_file(file_b_name, "This is file B.")
|
||||
await agent.file_manager.workspace.write_file(file_a_name, "This is file A.")
|
||||
await agent.file_manager.workspace.write_file(file_b_name, "This is file B.")
|
||||
|
||||
# Create a subdirectory and place a copy of file_a in it
|
||||
file_manager_component.workspace.make_dir(test_directory)
|
||||
await file_manager_component.workspace.write_file(
|
||||
agent.file_manager.workspace.make_dir(test_directory)
|
||||
await agent.file_manager.workspace.write_file(
|
||||
test_directory / file_a_name, "This is file A in the subdirectory."
|
||||
)
|
||||
|
||||
@@ -107,10 +101,10 @@ async def test_list_files(file_manager_component: FileManagerComponent):
|
||||
assert os.path.join(test_directory, file_a_name) in files
|
||||
|
||||
# Clean up
|
||||
file_manager_component.workspace.delete_file(file_a_name)
|
||||
file_manager_component.workspace.delete_file(file_b_name)
|
||||
file_manager_component.workspace.delete_file(test_directory / file_a_name)
|
||||
file_manager_component.workspace.delete_dir(test_directory)
|
||||
agent.file_manager.workspace.delete_file(file_a_name)
|
||||
agent.file_manager.workspace.delete_file(file_b_name)
|
||||
agent.file_manager.workspace.delete_file(test_directory / file_a_name)
|
||||
agent.file_manager.workspace.delete_dir(test_directory)
|
||||
|
||||
# Case 2: Search for a file that does not exist and make sure we don't throw
|
||||
non_existent_file = "non_existent_file.txt"
|
||||
@@ -4,12 +4,11 @@ from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from forge.file_storage.gcs import GCSFileStorage, GCSFileStorageConfiguration
|
||||
from google.auth.exceptions import GoogleAuthError
|
||||
from google.cloud import storage
|
||||
from google.cloud.exceptions import NotFound
|
||||
|
||||
from .gcs import GCSFileStorage, GCSFileStorageConfiguration
|
||||
|
||||
try:
|
||||
storage.Client()
|
||||
except GoogleAuthError:
|
||||
@@ -1,11 +1,11 @@
|
||||
import pytest
|
||||
from forge.components.git_operations import GitOperationsComponent
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.utils.exceptions import CommandExecutionError
|
||||
from git.exc import GitCommandError
|
||||
from git.repo.base import Repo
|
||||
|
||||
from forge.file_storage.base import FileStorage
|
||||
from forge.utils.exceptions import CommandExecutionError
|
||||
|
||||
from . import GitOperationsComponent
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -14,14 +14,15 @@ def mock_clone_from(mocker):
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def git_ops_component():
|
||||
return GitOperationsComponent()
|
||||
def git_ops_component(agent: Agent):
|
||||
return agent.git_ops
|
||||
|
||||
|
||||
def test_clone_auto_gpt_repository(
|
||||
git_ops_component: GitOperationsComponent,
|
||||
storage: FileStorage,
|
||||
mock_clone_from,
|
||||
agent: Agent,
|
||||
):
|
||||
mock_clone_from.return_value = None
|
||||
|
||||
@@ -36,7 +37,7 @@ def test_clone_auto_gpt_repository(
|
||||
|
||||
assert clone_result == expected_output
|
||||
mock_clone_from.assert_called_once_with(
|
||||
url=f"{scheme}{git_ops_component.config.github_username}:{git_ops_component.config.github_api_key}@{repo}", # noqa: E501
|
||||
url=f"{scheme}{agent.legacy_config.github_username}:{agent.legacy_config.github_api_key}@{repo}", # noqa: E501
|
||||
to_path=clone_path,
|
||||
)
|
||||
|
||||
@@ -45,6 +46,7 @@ def test_clone_repository_error(
|
||||
git_ops_component: GitOperationsComponent,
|
||||
storage: FileStorage,
|
||||
mock_clone_from,
|
||||
agent: Agent,
|
||||
):
|
||||
url = "https://github.com/this-repository/does-not-exist.git"
|
||||
clone_path = storage.get_path("does-not-exist")
|
||||
@@ -1,8 +1,7 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
|
||||
from .parsing import json_loads
|
||||
from forge.json.parsing import json_loads
|
||||
|
||||
_JSON_FIXABLE: list[tuple[str, str]] = [
|
||||
# Missing comma
|
||||
@@ -1,8 +1,7 @@
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from .local import FileStorageConfiguration, LocalFileStorage
|
||||
from forge.file_storage.local import FileStorageConfiguration, LocalFileStorage
|
||||
|
||||
_ACCESSIBLE_PATHS = [
|
||||
Path("."),
|
||||
@@ -1,6 +1,5 @@
|
||||
import pytest
|
||||
|
||||
from .utils import remove_color_codes
|
||||
from forge.logging.utils import remove_color_codes
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -5,8 +5,7 @@ from pathlib import Path
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from .s3 import S3FileStorage, S3FileStorageConfiguration
|
||||
from forge.file_storage.s3 import S3FileStorage, S3FileStorageConfiguration
|
||||
|
||||
if not (os.getenv("S3_ENDPOINT_URL") and os.getenv("AWS_ACCESS_KEY_ID")):
|
||||
pytest.skip("S3 environment variables are not set", allow_module_level=True)
|
||||
@@ -9,8 +9,7 @@ import docx
|
||||
import pytest
|
||||
import yaml
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from .file_operations import decode_textual_file, is_file_binary_fn
|
||||
from forge.utils.file_operations import decode_textual_file, is_file_binary_fn
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import pytest
|
||||
from forge.utils.url_validator import validate_url
|
||||
from pytest import raises
|
||||
|
||||
from .url_validator import validate_url
|
||||
|
||||
|
||||
@validate_url
|
||||
def dummy_method(url):
|
||||
@@ -1,23 +1,16 @@
|
||||
import json
|
||||
|
||||
import pytest
|
||||
from googleapiclient.errors import HttpError
|
||||
from httplib2 import Response
|
||||
from pydantic import SecretStr
|
||||
|
||||
from forge.components.web.search import WebSearchComponent
|
||||
from forge.utils.exceptions import ConfigurationError
|
||||
from googleapiclient.errors import HttpError
|
||||
|
||||
from . import WebSearchComponent
|
||||
from autogpt.agents.agent import Agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def web_search_component():
|
||||
component = WebSearchComponent()
|
||||
if component.config.google_api_key is None:
|
||||
component.config.google_api_key = SecretStr("test")
|
||||
if component.config.google_custom_search_engine_id is None:
|
||||
component.config.google_custom_search_engine_id = SecretStr("test")
|
||||
return component
|
||||
def web_search_component(agent: Agent):
|
||||
return agent.web_search
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -138,11 +131,16 @@ def test_google_official_search_errors(
|
||||
error_msg,
|
||||
web_search_component: WebSearchComponent,
|
||||
):
|
||||
class resp:
|
||||
def __init__(self, _status, _reason):
|
||||
self.status = _status
|
||||
self.reason = _reason
|
||||
|
||||
response_content = {
|
||||
"error": {"code": http_code, "message": error_msg, "reason": "backendError"}
|
||||
}
|
||||
error = HttpError(
|
||||
resp=Response({"status": http_code, "reason": error_msg}),
|
||||
resp=resp(http_code, error_msg),
|
||||
content=str.encode(json.dumps(response_content)),
|
||||
uri="https://www.googleapis.com/customsearch/v1?q=invalid+query&cx",
|
||||
)
|
||||
2
benchmark/.vscode/settings.json
vendored
2
benchmark/.vscode/settings.json
vendored
@@ -2,5 +2,5 @@
|
||||
"[python]": {
|
||||
"editor.defaultFormatter": "ms-python.black-formatter"
|
||||
},
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
"python.formatting.provider": "none"
|
||||
}
|
||||
|
||||
@@ -97,9 +97,7 @@ def start():
|
||||
help="Write log output to a file instead of the terminal.",
|
||||
)
|
||||
# @click.argument(
|
||||
# "agent_path",
|
||||
# type=click.Path(exists=True, file_okay=False, path_type=Path),
|
||||
# required=False,
|
||||
# "agent_path", type=click.Path(exists=True, file_okay=False), required=False
|
||||
# )
|
||||
def run(
|
||||
maintain: bool,
|
||||
@@ -278,9 +276,7 @@ def list_challenges(include_unavailable: bool, only_names: bool, output_json: bo
|
||||
return
|
||||
|
||||
if output_json:
|
||||
click.echo(
|
||||
json.dumps([json.loads(c.info.model_dump_json()) for c in challenges])
|
||||
)
|
||||
click.echo(json.dumps([json.loads(c.info.json()) for c in challenges]))
|
||||
return
|
||||
|
||||
headers = tuple(
|
||||
@@ -328,7 +324,7 @@ def info(name: str, json: bool):
|
||||
continue
|
||||
|
||||
if json:
|
||||
click.echo(challenge.info.model_dump_json())
|
||||
click.echo(challenge.info.json())
|
||||
break
|
||||
|
||||
pretty_print_model(challenge.info)
|
||||
|
||||
@@ -16,7 +16,7 @@ from agent_protocol_client import AgentApi, ApiClient, ApiException, Configurati
|
||||
from agent_protocol_client.models import Task, TaskRequestBody
|
||||
from fastapi import APIRouter, FastAPI, HTTPException, Request, Response
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from pydantic import BaseModel, ConfigDict, ValidationError
|
||||
from pydantic import BaseModel, Extra, ValidationError
|
||||
|
||||
from agbenchmark.challenges import ChallengeInfo
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
@@ -52,9 +52,7 @@ while challenge_spec_files:
|
||||
|
||||
logger.debug(f"Loading {challenge_relpath}...")
|
||||
try:
|
||||
challenge_info = ChallengeInfo.model_validate_json(
|
||||
challenge_spec_file.read_text()
|
||||
)
|
||||
challenge_info = ChallengeInfo.parse_file(challenge_spec_file)
|
||||
except ValidationError as e:
|
||||
if logging.getLogger().level == logging.DEBUG:
|
||||
logger.warning(f"Spec file {challenge_relpath} failed to load:\n{e}")
|
||||
@@ -66,7 +64,7 @@ while challenge_spec_files:
|
||||
challenge_info.eval_id = str(uuid.uuid4())
|
||||
# this will sort all the keys of the JSON systematically
|
||||
# so that the order is always the same
|
||||
write_pretty_json(challenge_info.model_dump(), challenge_spec_file)
|
||||
write_pretty_json(challenge_info.dict(), challenge_spec_file)
|
||||
|
||||
CHALLENGES[challenge_info.eval_id] = challenge_info
|
||||
|
||||
@@ -113,7 +111,8 @@ class CreateReportRequest(BaseModel):
|
||||
# category: Optional[str] = []
|
||||
mock: Optional[bool] = False
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
class Config:
|
||||
extra = Extra.forbid # this will forbid any extra fields
|
||||
|
||||
|
||||
updates_list = []
|
||||
@@ -154,7 +153,7 @@ def setup_fastapi_app(agbenchmark_config: AgentBenchmarkConfig) -> FastAPI:
|
||||
pids = find_agbenchmark_without_uvicorn()
|
||||
logger.info(f"pids already running with agbenchmark: {pids}")
|
||||
|
||||
logger.debug(f"Request to /reports: {body.model_dump()}")
|
||||
logger.debug(f"Request to /reports: {body.dict()}")
|
||||
|
||||
# Start the benchmark in a separate thread
|
||||
benchmark_process = Process(
|
||||
@@ -327,9 +326,7 @@ def setup_fastapi_app(agbenchmark_config: AgentBenchmarkConfig) -> FastAPI:
|
||||
config={},
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"Returning evaluation data:\n{eval_info.model_dump_json(indent=4)}"
|
||||
)
|
||||
logger.debug(f"Returning evaluation data:\n{eval_info.json(indent=4)}")
|
||||
return eval_info
|
||||
except ApiException as e:
|
||||
logger.error(f"Error {e} whilst trying to evaluate task: {task_id}")
|
||||
|
||||
@@ -15,13 +15,7 @@ from agent_protocol_client import Configuration as ClientConfig
|
||||
from agent_protocol_client import Step
|
||||
from colorama import Fore, Style
|
||||
from openai import _load_client as get_openai_client
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
StringConstraints,
|
||||
ValidationInfo,
|
||||
field_validator,
|
||||
)
|
||||
from pydantic import BaseModel, Field, constr, validator
|
||||
|
||||
from agbenchmark.agent_api_interface import download_agent_artifacts_into_folder
|
||||
from agbenchmark.agent_interface import copy_challenge_artifacts_into_workspace
|
||||
@@ -52,9 +46,7 @@ class BuiltinChallengeSpec(BaseModel):
|
||||
|
||||
class Info(BaseModel):
|
||||
difficulty: DifficultyLevel
|
||||
description: Annotated[
|
||||
str, StringConstraints(pattern=r"^Tests if the agent can.*")
|
||||
]
|
||||
description: Annotated[str, constr(regex=r"^Tests if the agent can.*")]
|
||||
side_effects: list[str] = Field(default_factory=list)
|
||||
|
||||
info: Info
|
||||
@@ -68,26 +60,23 @@ class BuiltinChallengeSpec(BaseModel):
|
||||
|
||||
class Eval(BaseModel):
|
||||
type: str
|
||||
scoring: Optional[Literal["percentage", "scale", "binary"]] = None
|
||||
template: Optional[
|
||||
Literal["rubric", "reference", "question", "custom"]
|
||||
] = None
|
||||
examples: Optional[str] = None
|
||||
scoring: Optional[Literal["percentage", "scale", "binary"]]
|
||||
template: Optional[Literal["rubric", "reference", "question", "custom"]]
|
||||
examples: Optional[str]
|
||||
|
||||
@field_validator("scoring", "template")
|
||||
def validate_eval_fields(cls, value, info: ValidationInfo):
|
||||
field_name = info.field_name
|
||||
if "type" in info.data and info.data["type"] == "llm":
|
||||
if value is None:
|
||||
@validator("scoring", "template", always=True)
|
||||
def validate_eval_fields(cls, v, values, field):
|
||||
if "type" in values and values["type"] == "llm":
|
||||
if v is None:
|
||||
raise ValueError(
|
||||
f"{field_name} must be provided when eval type is 'llm'"
|
||||
f"{field.name} must be provided when eval type is 'llm'"
|
||||
)
|
||||
else:
|
||||
if value is not None:
|
||||
if v is not None:
|
||||
raise ValueError(
|
||||
f"{field_name} should only exist when eval type is 'llm'"
|
||||
f"{field.name} should only exist when eval type is 'llm'"
|
||||
)
|
||||
return value
|
||||
return v
|
||||
|
||||
eval: Eval
|
||||
|
||||
@@ -153,7 +142,7 @@ class BuiltinChallenge(BaseChallenge):
|
||||
|
||||
@classmethod
|
||||
def from_challenge_spec_file(cls, spec_file: Path) -> type["BuiltinChallenge"]:
|
||||
challenge_spec = BuiltinChallengeSpec.model_validate_json(spec_file.read_text())
|
||||
challenge_spec = BuiltinChallengeSpec.parse_file(spec_file)
|
||||
challenge_spec.spec_file = spec_file
|
||||
return cls.from_challenge_spec(challenge_spec)
|
||||
|
||||
@@ -198,7 +187,7 @@ class BuiltinChallenge(BaseChallenge):
|
||||
task_id = step.task_id
|
||||
|
||||
n_steps += 1
|
||||
steps.append(step.model_copy())
|
||||
steps.append(step.copy())
|
||||
if step.additional_output:
|
||||
agent_task_cost = step.additional_output.get(
|
||||
"task_total_cost",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic import BaseModel, validator
|
||||
|
||||
|
||||
# Models for the request and response payloads
|
||||
@@ -10,7 +10,7 @@ class ShipPlacement(BaseModel):
|
||||
start: dict # {"row": int, "column": str}
|
||||
direction: str
|
||||
|
||||
@field_validator("start")
|
||||
@validator("start")
|
||||
def validate_start(cls, start):
|
||||
row, column = start.get("row"), start.get("column")
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic import BaseModel, validator
|
||||
|
||||
|
||||
# Models for the request and response payloads
|
||||
@@ -10,7 +10,7 @@ class ShipPlacement(BaseModel):
|
||||
start: dict # {"row": int, "column": str}
|
||||
direction: str
|
||||
|
||||
@field_validator("start")
|
||||
@validator("start")
|
||||
def validate_start(cls, start):
|
||||
row, column = start.get("row"), start.get("column")
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from typing import ClassVar, Iterator, Literal
|
||||
import pytest
|
||||
import requests
|
||||
from agent_protocol_client import AgentApi, Step
|
||||
from pydantic import BaseModel, ValidationError, ValidationInfo, field_validator
|
||||
from pydantic import BaseModel, ValidationError, validator
|
||||
|
||||
from agbenchmark.config import AgentBenchmarkConfig
|
||||
from agbenchmark.utils.data_types import Category, EvalResult
|
||||
@@ -183,7 +183,7 @@ class WebArenaChallengeSpec(BaseModel):
|
||||
"""The JungleGym site (base URL) at which to start"""
|
||||
require_login: bool
|
||||
require_reset: bool
|
||||
storage_state: str | None = None
|
||||
storage_state: str | None
|
||||
|
||||
intent: str
|
||||
intent_template: str
|
||||
@@ -195,36 +195,36 @@ class WebArenaChallengeSpec(BaseModel):
|
||||
|
||||
class EvalSet(BaseModel):
|
||||
class StringMatchEvalSet(BaseModel):
|
||||
exact_match: str | None = None
|
||||
fuzzy_match: list[str] | None = None
|
||||
must_include: list[str] | None = None
|
||||
exact_match: str | None
|
||||
fuzzy_match: list[str] | None
|
||||
must_include: list[str] | None
|
||||
|
||||
reference_answers: StringMatchEvalSet | None = None
|
||||
reference_answers: StringMatchEvalSet | None
|
||||
"""For string_match eval, a set of criteria to judge the final answer"""
|
||||
reference_answer_raw_annotation: str | None = None
|
||||
string_note: str | None = None
|
||||
annotation_note: str | None = None
|
||||
reference_answer_raw_annotation: str | None
|
||||
string_note: str | None
|
||||
annotation_note: str | None
|
||||
|
||||
reference_url: str | None = None
|
||||
reference_url: str | None
|
||||
"""For url_match eval, the last URL that should be visited"""
|
||||
url_note: str | None = None
|
||||
url_note: str | None
|
||||
|
||||
program_html: list[ProgramHtmlEval]
|
||||
"""For program_html eval, a list of criteria to judge the site state by"""
|
||||
|
||||
eval_types: list[EvalType]
|
||||
|
||||
@field_validator("eval_types")
|
||||
def check_eval_parameters(cls, value: list[EvalType], info: ValidationInfo):
|
||||
if "string_match" in value and not info.data["reference_answers"]:
|
||||
@validator("eval_types")
|
||||
def check_eval_parameters(cls, v: list[EvalType], values):
|
||||
if "string_match" in v and not values.get("reference_answers"):
|
||||
raise ValueError("'string_match' eval_type requires reference_answers")
|
||||
if "url_match" in value and not info.data["reference_url"]:
|
||||
if "url_match" in v and not values.get("reference_url"):
|
||||
raise ValueError("'url_match' eval_type requires reference_url")
|
||||
if "program_html" in value and not info.data["program_html"]:
|
||||
if "program_html" in v and not values.get("program_html"):
|
||||
raise ValueError(
|
||||
"'program_html' eval_type requires at least one program_html eval"
|
||||
)
|
||||
return value
|
||||
return v
|
||||
|
||||
@property
|
||||
def evaluators(self) -> list[_Eval]:
|
||||
@@ -292,7 +292,7 @@ class WebArenaChallenge(BaseChallenge):
|
||||
results = requests.get(source_url).json()["data"]
|
||||
if not results:
|
||||
raise ValueError(f"Could not fetch challenge {source_uri}")
|
||||
return cls.from_challenge_spec(WebArenaChallengeSpec.model_validate(results[0]))
|
||||
return cls.from_challenge_spec(WebArenaChallengeSpec.parse_obj(results[0]))
|
||||
|
||||
@classmethod
|
||||
def from_challenge_spec(
|
||||
@@ -500,7 +500,7 @@ def load_webarena_challenges(
|
||||
skipped = 0
|
||||
for entry in challenge_dicts:
|
||||
try:
|
||||
challenge_spec = WebArenaChallengeSpec.model_validate(entry)
|
||||
challenge_spec = WebArenaChallengeSpec.parse_obj(entry)
|
||||
except ValidationError as e:
|
||||
failed += 1
|
||||
logger.warning(f"Error validating WebArena challenge entry: {entry}")
|
||||
|
||||
@@ -4,8 +4,7 @@ from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field, ValidationInfo, field_validator
|
||||
from pydantic_settings import BaseSettings
|
||||
from pydantic import BaseSettings, Field, validator
|
||||
|
||||
|
||||
def _calculate_info_test_path(base_path: Path, benchmark_start_time: datetime) -> Path:
|
||||
@@ -58,7 +57,7 @@ class AgentBenchmarkConfig(BaseSettings, extra="allow"):
|
||||
subject application exposes an Agent Protocol compliant API.
|
||||
"""
|
||||
|
||||
agbenchmark_config_dir: Path = Field(exclude=True)
|
||||
agbenchmark_config_dir: Path = Field(..., exclude=True)
|
||||
"""Path to the agbenchmark_config folder of the subject agent application."""
|
||||
|
||||
categories: list[str] | None = None
|
||||
@@ -102,11 +101,11 @@ class AgentBenchmarkConfig(BaseSettings, extra="allow"):
|
||||
def config_file(self) -> Path:
|
||||
return self.agbenchmark_config_dir / "config.json"
|
||||
|
||||
@field_validator("reports_folder", mode="before")
|
||||
def set_reports_folder(cls, value: Path, info: ValidationInfo):
|
||||
if not value:
|
||||
return info.data["agbenchmark_config_dir"] / "reports"
|
||||
return value
|
||||
@validator("reports_folder", pre=True, always=True)
|
||||
def set_reports_folder(cls, v, values):
|
||||
if not v:
|
||||
return values["agbenchmark_config_dir"] / "reports"
|
||||
return v
|
||||
|
||||
def get_report_dir(self, benchmark_start_time: datetime) -> Path:
|
||||
return _calculate_info_test_path(self.reports_folder, benchmark_start_time)
|
||||
|
||||
@@ -111,19 +111,17 @@ class SessionReportManager(BaseReportManager):
|
||||
def save(self) -> None:
|
||||
with self.report_file.open("w") as f:
|
||||
if self.report:
|
||||
f.write(self.report.model_dump_json(indent=4))
|
||||
f.write(self.report.json(indent=4))
|
||||
else:
|
||||
json.dump(
|
||||
{k: v.model_dump() for k, v in self.tests.items()}, f, indent=4
|
||||
)
|
||||
json.dump({k: v.dict() for k, v in self.tests.items()}, f, indent=4)
|
||||
|
||||
def load(self) -> None:
|
||||
super().load()
|
||||
|
||||
if "tests" in self.tests:
|
||||
self.report = Report.model_validate(self.tests)
|
||||
self.report = Report.parse_obj(self.tests)
|
||||
else:
|
||||
self.tests = {n: Test.model_validate(d) for n, d in self.tests.items()}
|
||||
self.tests = {n: Test.parse_obj(d) for n, d in self.tests.items()}
|
||||
|
||||
def add_test_report(self, test_name: str, test_report: Test) -> None:
|
||||
if self.report:
|
||||
@@ -157,7 +155,7 @@ class SessionReportManager(BaseReportManager):
|
||||
total_cost=self.get_total_costs(),
|
||||
),
|
||||
tests=copy.copy(self.tests),
|
||||
config=config.model_dump(exclude={"reports_folder"}, exclude_none=True),
|
||||
config=config.dict(exclude={"reports_folder"}, exclude_none=True),
|
||||
)
|
||||
|
||||
agent_categories = get_highest_achieved_difficulty_per_category(self.report)
|
||||
|
||||
@@ -27,7 +27,7 @@ def get_reports_data(report_path: str) -> dict[str, Any]:
|
||||
with open(Path(subdir) / file, "r") as f:
|
||||
# Load the JSON data from the file
|
||||
json_data = json.load(f)
|
||||
converted_data = Report.model_validate(json_data)
|
||||
converted_data = Report.parse_obj(json_data)
|
||||
# get the last directory name in the path as key
|
||||
reports_data[subdir_name] = converted_data
|
||||
|
||||
|
||||
@@ -6,13 +6,7 @@ import logging
|
||||
from typing import Annotated, Any, Dict, List
|
||||
|
||||
from agent_protocol_client import Step
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
StringConstraints,
|
||||
ValidationInfo,
|
||||
field_validator,
|
||||
)
|
||||
from pydantic import BaseModel, Field, constr, validator
|
||||
|
||||
datetime_format = r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00$"
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -36,20 +30,20 @@ class TestResult(BaseModel):
|
||||
cost: float | None = None
|
||||
"""The (known) cost incurred by the run, e.g. from using paid LLM APIs"""
|
||||
|
||||
@field_validator("fail_reason")
|
||||
def success_xor_fail_reason(cls, value, info: ValidationInfo):
|
||||
if bool(value) == bool(info.data["success"]):
|
||||
@validator("fail_reason")
|
||||
def success_xor_fail_reason(cls, v: str | None, values: dict[str, Any]):
|
||||
if bool(v) == bool(values["success"]):
|
||||
logger.error(
|
||||
"Error validating `success ^ fail_reason` on TestResult: "
|
||||
f"success = {repr(info.data['success'])}; "
|
||||
f"fail_reason = {repr(value)}"
|
||||
f"success = {repr(values['success'])}; "
|
||||
f"fail_reason = {repr(v)}"
|
||||
)
|
||||
if value:
|
||||
success = info.data["success"]
|
||||
if v:
|
||||
success = values["success"]
|
||||
assert not success, "fail_reason must only be specified if success=False"
|
||||
else:
|
||||
assert info.data["success"], "fail_reason is required if success=False"
|
||||
return value
|
||||
assert values["success"], "fail_reason is required if success=False"
|
||||
return v
|
||||
|
||||
|
||||
class TestMetrics(BaseModel):
|
||||
@@ -94,7 +88,7 @@ class Test(BaseModel):
|
||||
class ReportBase(BaseModel):
|
||||
command: str
|
||||
completion_time: str | None = None
|
||||
benchmark_start_time: Annotated[str, StringConstraints(pattern=datetime_format)]
|
||||
benchmark_start_time: Annotated[str, constr(regex=datetime_format)]
|
||||
metrics: MetricsOverall
|
||||
config: Dict[str, str | dict[str, str]]
|
||||
agent_git_commit_sha: str | None = None
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Model definitions for use in the API"""
|
||||
from typing import Annotated
|
||||
|
||||
from pydantic import BaseModel, StringConstraints
|
||||
from pydantic import BaseModel, constr
|
||||
|
||||
datetime_format = r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00$"
|
||||
|
||||
@@ -37,7 +37,7 @@ class RunDetails(BaseModel):
|
||||
run_id: str | None = None
|
||||
command: str
|
||||
completion_time: str | None = None
|
||||
benchmark_start_time: Annotated[str, StringConstraints(pattern=datetime_format)]
|
||||
benchmark_start_time: Annotated[str, constr(regex=datetime_format)]
|
||||
|
||||
|
||||
class BenchmarkRun(BaseModel):
|
||||
|
||||
@@ -45,7 +45,7 @@ def update_regression_tests(
|
||||
# if the last 3 tests were successful, add to the regression tests
|
||||
test_report.metrics.is_regression = True
|
||||
SingletonReportManager().REGRESSION_MANAGER.add_test(
|
||||
test_name, test_report.model_dump(include={"difficulty", "data_path"})
|
||||
test_name, test_report.dict(include={"difficulty", "data_path"})
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -7,9 +7,10 @@ from pydantic import BaseModel, Field
|
||||
|
||||
class TaskRequestBody(BaseModel):
|
||||
input: str = Field(
|
||||
...,
|
||||
min_length=1,
|
||||
description="Input prompt for the task.",
|
||||
examples=["Write the words you receive to the file 'output.txt'."],
|
||||
example="Write the words you receive to the file 'output.txt'.",
|
||||
)
|
||||
additional_input: Optional[dict[str, Any]] = Field(default_factory=dict)
|
||||
|
||||
|
||||
@@ -220,7 +220,7 @@ class DependencyManager(object):
|
||||
labels = {}
|
||||
for item in self.items:
|
||||
assert item.cls and issubclass(item.cls, BaseChallenge)
|
||||
data = item.cls.info.model_dump()
|
||||
data = item.cls.info.dict()
|
||||
|
||||
node_name = get_name(item)
|
||||
data["name"] = node_name
|
||||
|
||||
@@ -135,7 +135,7 @@ def pretty_print_model(model: BaseModel, include_header: bool = True) -> None:
|
||||
if include_header:
|
||||
# Try to find the ID and/or name attribute of the model
|
||||
id, name = None, None
|
||||
for attr, value in model.model_dump().items():
|
||||
for attr, value in model.dict().items():
|
||||
if attr == "id" or attr.endswith("_id"):
|
||||
id = value
|
||||
if attr.endswith("name"):
|
||||
@@ -148,8 +148,8 @@ def pretty_print_model(model: BaseModel, include_header: bool = True) -> None:
|
||||
)
|
||||
indent = " " * 2
|
||||
|
||||
k_col_width = max(len(k) for k in model.model_dump().keys())
|
||||
for k, v in model.model_dump().items():
|
||||
k_col_width = max(len(k) for k in model.dict().keys())
|
||||
for k, v in model.dict().items():
|
||||
v_fmt = repr(v)
|
||||
if v is None or v == "":
|
||||
v_fmt = click.style(v_fmt, fg="black")
|
||||
|
||||
400
benchmark/poetry.lock
generated
400
benchmark/poetry.lock
generated
@@ -5,22 +5,17 @@ name = "agent-protocol-client"
|
||||
version = "1.1.0"
|
||||
description = "Agent Communication Protocol Client"
|
||||
optional = false
|
||||
python-versions = "^3.7"
|
||||
files = []
|
||||
develop = false
|
||||
python-versions = ">=3.7,<4.0"
|
||||
files = [
|
||||
{file = "agent_protocol_client-1.1.0-py3-none-any.whl", hash = "sha256:0e8c6c97244189666ed18e320410abddce8c9dfb75437da1e590bbef3b6268be"},
|
||||
{file = "agent_protocol_client-1.1.0.tar.gz", hash = "sha256:aa7e1042de1249477fdc29c2df08a44f2233dade9c02c1279e37c98e9d3a0d72"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aiohttp = "^3.8.4"
|
||||
pydantic = ">=1.10.5, <3.0.0"
|
||||
python-dateutil = "^2.8.2"
|
||||
urllib3 = "^1.25.3"
|
||||
|
||||
[package.source]
|
||||
type = "git"
|
||||
url = "https://github.com/Significant-Gravitas/agent-protocol.git"
|
||||
reference = "HEAD"
|
||||
resolved_reference = "beb098517b0b9e255024d1b57df236f0329f4b1c"
|
||||
subdirectory = "packages/client/python"
|
||||
aiohttp = ">=3.8.4,<4.0.0"
|
||||
pydantic = ">=1.10.5,<2.0.0"
|
||||
python-dateutil = ">=2.8.2,<3.0.0"
|
||||
urllib3 = ">=1.25.3,<2.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "aiohttp"
|
||||
@@ -132,17 +127,6 @@ files = [
|
||||
[package.dependencies]
|
||||
frozenlist = ">=1.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
version = "0.7.0"
|
||||
description = "Reusable constraint types to use with typing.Annotated"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
|
||||
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anyio"
|
||||
version = "4.2.0"
|
||||
@@ -1456,101 +1440,85 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "multidict"
|
||||
version = "6.0.5"
|
||||
version = "6.0.4"
|
||||
description = "multidict implementation"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-win32.whl", hash = "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319"},
|
||||
{file = "multidict-6.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-win32.whl", hash = "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c"},
|
||||
{file = "multidict-6.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5"},
|
||||
{file = "multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-win32.whl", hash = "sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee"},
|
||||
{file = "multidict-6.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-win32.whl", hash = "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241"},
|
||||
{file = "multidict-6.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-win32.whl", hash = "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b"},
|
||||
{file = "multidict-6.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755"},
|
||||
{file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"},
|
||||
{file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"},
|
||||
{file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"},
|
||||
{file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"},
|
||||
{file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"},
|
||||
{file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"},
|
||||
{file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"},
|
||||
{file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2080,132 +2048,55 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.7.4"
|
||||
description = "Data validation using Python type hints"
|
||||
version = "1.10.13"
|
||||
description = "Data validation and settings management using python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "pydantic-2.7.4-py3-none-any.whl", hash = "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"},
|
||||
{file = "pydantic-2.7.4.tar.gz", hash = "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52"},
|
||||
{file = "pydantic-1.10.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:efff03cc7a4f29d9009d1c96ceb1e7a70a65cfe86e89d34e4a5f2ab1e5693737"},
|
||||
{file = "pydantic-1.10.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ecea2b9d80e5333303eeb77e180b90e95eea8f765d08c3d278cd56b00345d01"},
|
||||
{file = "pydantic-1.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1740068fd8e2ef6eb27a20e5651df000978edce6da6803c2bef0bc74540f9548"},
|
||||
{file = "pydantic-1.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84bafe2e60b5e78bc64a2941b4c071a4b7404c5c907f5f5a99b0139781e69ed8"},
|
||||
{file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bc0898c12f8e9c97f6cd44c0ed70d55749eaf783716896960b4ecce2edfd2d69"},
|
||||
{file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:654db58ae399fe6434e55325a2c3e959836bd17a6f6a0b6ca8107ea0571d2e17"},
|
||||
{file = "pydantic-1.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:75ac15385a3534d887a99c713aa3da88a30fbd6204a5cd0dc4dab3d770b9bd2f"},
|
||||
{file = "pydantic-1.10.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c553f6a156deb868ba38a23cf0df886c63492e9257f60a79c0fd8e7173537653"},
|
||||
{file = "pydantic-1.10.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5e08865bc6464df8c7d61439ef4439829e3ab62ab1669cddea8dd00cd74b9ffe"},
|
||||
{file = "pydantic-1.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31647d85a2013d926ce60b84f9dd5300d44535a9941fe825dc349ae1f760df9"},
|
||||
{file = "pydantic-1.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:210ce042e8f6f7c01168b2d84d4c9eb2b009fe7bf572c2266e235edf14bacd80"},
|
||||
{file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8ae5dd6b721459bfa30805f4c25880e0dd78fc5b5879f9f7a692196ddcb5a580"},
|
||||
{file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f8e81fc5fb17dae698f52bdd1c4f18b6ca674d7068242b2aff075f588301bbb0"},
|
||||
{file = "pydantic-1.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:61d9dce220447fb74f45e73d7ff3b530e25db30192ad8d425166d43c5deb6df0"},
|
||||
{file = "pydantic-1.10.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b03e42ec20286f052490423682016fd80fda830d8e4119f8ab13ec7464c0132"},
|
||||
{file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f59ef915cac80275245824e9d771ee939133be38215555e9dc90c6cb148aaeb5"},
|
||||
{file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a1f9f747851338933942db7af7b6ee8268568ef2ed86c4185c6ef4402e80ba8"},
|
||||
{file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:97cce3ae7341f7620a0ba5ef6cf043975cd9d2b81f3aa5f4ea37928269bc1b87"},
|
||||
{file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854223752ba81e3abf663d685f105c64150873cc6f5d0c01d3e3220bcff7d36f"},
|
||||
{file = "pydantic-1.10.13-cp37-cp37m-win_amd64.whl", hash = "sha256:b97c1fac8c49be29486df85968682b0afa77e1b809aff74b83081cc115e52f33"},
|
||||
{file = "pydantic-1.10.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c958d053453a1c4b1c2062b05cd42d9d5c8eb67537b8d5a7e3c3032943ecd261"},
|
||||
{file = "pydantic-1.10.13-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c5370a7edaac06daee3af1c8b1192e305bc102abcbf2a92374b5bc793818599"},
|
||||
{file = "pydantic-1.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6f6e7305244bddb4414ba7094ce910560c907bdfa3501e9db1a7fd7eaea127"},
|
||||
{file = "pydantic-1.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3a3c792a58e1622667a2837512099eac62490cdfd63bd407993aaf200a4cf1f"},
|
||||
{file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c636925f38b8db208e09d344c7aa4f29a86bb9947495dd6b6d376ad10334fb78"},
|
||||
{file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:678bcf5591b63cc917100dc50ab6caebe597ac67e8c9ccb75e698f66038ea953"},
|
||||
{file = "pydantic-1.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:6cf25c1a65c27923a17b3da28a0bdb99f62ee04230c931d83e888012851f4e7f"},
|
||||
{file = "pydantic-1.10.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8ef467901d7a41fa0ca6db9ae3ec0021e3f657ce2c208e98cd511f3161c762c6"},
|
||||
{file = "pydantic-1.10.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968ac42970f57b8344ee08837b62f6ee6f53c33f603547a55571c954a4225691"},
|
||||
{file = "pydantic-1.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9849f031cf8a2f0a928fe885e5a04b08006d6d41876b8bbd2fc68a18f9f2e3fd"},
|
||||
{file = "pydantic-1.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56e3ff861c3b9c6857579de282ce8baabf443f42ffba355bf070770ed63e11e1"},
|
||||
{file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f00790179497767aae6bcdc36355792c79e7bbb20b145ff449700eb076c5f96"},
|
||||
{file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:75b297827b59bc229cac1a23a2f7a4ac0031068e5be0ce385be1462e7e17a35d"},
|
||||
{file = "pydantic-1.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:e70ca129d2053fb8b728ee7d1af8e553a928d7e301a311094b8a0501adc8763d"},
|
||||
{file = "pydantic-1.10.13-py3-none-any.whl", hash = "sha256:b87326822e71bd5f313e7d3bfdc77ac3247035ac10b0c0618bd99dcf95b1e687"},
|
||||
{file = "pydantic-1.10.13.tar.gz", hash = "sha256:32c8b48dcd3b2ac4e78b0ba4af3a2c2eb6048cb75202f0ea7b34feb740efc340"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.4.0"
|
||||
pydantic-core = "2.18.4"
|
||||
typing-extensions = ">=4.6.1"
|
||||
typing-extensions = ">=4.2.0"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.18.4"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:f76d0ad001edd426b92233d45c746fd08f467d56100fd8f30e9ace4b005266e4"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:59ff3e89f4eaf14050c8022011862df275b552caef8082e37b542b066ce1ff26"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a55b5b16c839df1070bc113c1f7f94a0af4433fcfa1b41799ce7606e5c79ce0a"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4d0dcc59664fcb8974b356fe0a18a672d6d7cf9f54746c05f43275fc48636851"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8951eee36c57cd128f779e641e21eb40bc5073eb28b2d23f33eb0ef14ffb3f5d"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4701b19f7e3a06ea655513f7938de6f108123bf7c86bbebb1196eb9bd35cf724"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00a3f196329e08e43d99b79b286d60ce46bed10f2280d25a1718399457e06be"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97736815b9cc893b2b7f663628e63f436018b75f44854c8027040e05230eeddb"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6891a2ae0e8692679c07728819b6e2b822fb30ca7445f67bbf6509b25a96332c"},
|
||||
{file = "pydantic_core-2.18.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bc4ff9805858bd54d1a20efff925ccd89c9d2e7cf4986144b30802bf78091c3e"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win32.whl", hash = "sha256:1b4de2e51bbcb61fdebd0ab86ef28062704f62c82bbf4addc4e37fa4b00b7cbc"},
|
||||
{file = "pydantic_core-2.18.4-cp310-none-win_amd64.whl", hash = "sha256:6a750aec7bf431517a9fd78cb93c97b9b0c496090fee84a47a0d23668976b4b0"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:942ba11e7dfb66dc70f9ae66b33452f51ac7bb90676da39a7345e99ffb55402d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b2ebef0e0b4454320274f5e83a41844c63438fdc874ea40a8b5b4ecb7693f1c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a642295cd0c8df1b86fc3dced1d067874c353a188dc8e0f744626d49e9aa51c4"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f09baa656c904807e832cf9cce799c6460c450c4ad80803517032da0cd062e2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98906207f29bc2c459ff64fa007afd10a8c8ac080f7e4d5beff4c97086a3dabd"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19894b95aacfa98e7cb093cd7881a0c76f55731efad31073db4521e2b6ff5b7d"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fbbdc827fe5e42e4d196c746b890b3d72876bdbf160b0eafe9f0334525119c8"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f85d05aa0918283cf29a30b547b4df2fbb56b45b135f9e35b6807cb28bc47951"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e85637bc8fe81ddb73fda9e56bab24560bdddfa98aa64f87aaa4e4b6730c23d2"},
|
||||
{file = "pydantic_core-2.18.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2f5966897e5461f818e136b8451d0551a2e77259eb0f73a837027b47dc95dab9"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win32.whl", hash = "sha256:44c7486a4228413c317952e9d89598bcdfb06399735e49e0f8df643e1ccd0558"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_amd64.whl", hash = "sha256:8a7164fe2005d03c64fd3b85649891cd4953a8de53107940bf272500ba8a788b"},
|
||||
{file = "pydantic_core-2.18.4-cp311-none-win_arm64.whl", hash = "sha256:4e99bc050fe65c450344421017f98298a97cefc18c53bb2f7b3531eb39bc7805"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6f5c4d41b2771c730ea1c34e458e781b18cc668d194958e0112455fff4e402b2"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2fdf2156aa3d017fddf8aea5adfba9f777db1d6022d392b682d2a8329e087cef"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4748321b5078216070b151d5271ef3e7cc905ab170bbfd27d5c83ee3ec436695"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:847a35c4d58721c5dc3dba599878ebbdfd96784f3fb8bb2c356e123bdcd73f34"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c40d4eaad41f78e3bbda31b89edc46a3f3dc6e171bf0ecf097ff7a0ffff7cb1"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:21a5e440dbe315ab9825fcd459b8814bb92b27c974cbc23c3e8baa2b76890077"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01dd777215e2aa86dfd664daed5957704b769e726626393438f9c87690ce78c3"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4b06beb3b3f1479d32befd1f3079cc47b34fa2da62457cdf6c963393340b56e9"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:564d7922e4b13a16b98772441879fcdcbe82ff50daa622d681dd682175ea918c"},
|
||||
{file = "pydantic_core-2.18.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0eb2a4f660fcd8e2b1c90ad566db2b98d7f3f4717c64fe0a83e0adb39766d5b8"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win32.whl", hash = "sha256:8b8bab4c97248095ae0c4455b5a1cd1cdd96e4e4769306ab19dda135ea4cdb07"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_amd64.whl", hash = "sha256:14601cdb733d741b8958224030e2bfe21a4a881fb3dd6fbb21f071cabd48fa0a"},
|
||||
{file = "pydantic_core-2.18.4-cp312-none-win_arm64.whl", hash = "sha256:c1322d7dd74713dcc157a2b7898a564ab091ca6c58302d5c7b4c07296e3fd00f"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:823be1deb01793da05ecb0484d6c9e20baebb39bd42b5d72636ae9cf8350dbd2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebef0dd9bf9b812bf75bda96743f2a6c5734a02092ae7f721c048d156d5fabae"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1d6df168efb88d7d522664693607b80b4080be6750c913eefb77e34c12c71a"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f9899c94762343f2cc2fc64c13e7cae4c3cc65cdfc87dd810a31654c9b7358cc"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99457f184ad90235cfe8461c4d70ab7dd2680e28821c29eca00252ba90308c78"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18f469a3d2a2fdafe99296a87e8a4c37748b5080a26b806a707f25a902c040a8"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cdf28938ac6b8b49ae5e92f2735056a7ba99c9b110a474473fd71185c1af5d"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:938cb21650855054dc54dfd9120a851c974f95450f00683399006aa6e8abb057"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:44cd83ab6a51da80fb5adbd9560e26018e2ac7826f9626bc06ca3dc074cd198b"},
|
||||
{file = "pydantic_core-2.18.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:972658f4a72d02b8abfa2581d92d59f59897d2e9f7e708fdabe922f9087773af"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win32.whl", hash = "sha256:1d886dc848e60cb7666f771e406acae54ab279b9f1e4143babc9c2258213daa2"},
|
||||
{file = "pydantic_core-2.18.4-cp38-none-win_amd64.whl", hash = "sha256:bb4462bd43c2460774914b8525f79b00f8f407c945d50881568f294c1d9b4443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:44a688331d4a4e2129140a8118479443bd6f1905231138971372fcde37e43528"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a2fdd81edd64342c85ac7cf2753ccae0b79bf2dfa063785503cb85a7d3593223"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86110d7e1907ab36691f80b33eb2da87d780f4739ae773e5fc83fb272f88825f"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:46387e38bd641b3ee5ce247563b60c5ca098da9c56c75c157a05eaa0933ed154"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:123c3cec203e3f5ac7b000bd82235f1a3eced8665b63d18be751f115588fea30"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1803ac5c32ec324c5261c7209e8f8ce88e83254c4e1aebdc8b0a39f9ddb443"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53db086f9f6ab2b4061958d9c276d1dbe3690e8dd727d6abf2321d6cce37fa94"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:abc267fa9837245cc28ea6929f19fa335f3dc330a35d2e45509b6566dc18be23"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a0d829524aaefdebccb869eed855e2d04c21d2d7479b6cada7ace5448416597b"},
|
||||
{file = "pydantic_core-2.18.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:509daade3b8649f80d4e5ff21aa5673e4ebe58590b25fe42fac5f0f52c6f034a"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win32.whl", hash = "sha256:ca26a1e73c48cfc54c4a76ff78df3727b9d9f4ccc8dbee4ae3f73306a591676d"},
|
||||
{file = "pydantic_core-2.18.4-cp39-none-win_amd64.whl", hash = "sha256:c67598100338d5d985db1b3d21f3619ef392e185e71b8d52bceacc4a7771ea7e"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:574d92eac874f7f4db0ca653514d823a0d22e2354359d0759e3f6a406db5d55d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1f4d26ceb5eb9eed4af91bebeae4b06c3fb28966ca3a8fb765208cf6b51102ab"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77450e6d20016ec41f43ca4a6c63e9fdde03f0ae3fe90e7c27bdbeaece8b1ed4"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d323a01da91851a4f17bf592faf46149c9169d68430b3146dcba2bb5e5719abc"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43d447dd2ae072a0065389092a231283f62d960030ecd27565672bd40746c507"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:578e24f761f3b425834f297b9935e1ce2e30f51400964ce4801002435a1b41ef"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:81b5efb2f126454586d0f40c4d834010979cb80785173d1586df845a632e4e6d"},
|
||||
{file = "pydantic_core-2.18.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ab86ce7c8f9bea87b9d12c7f0af71102acbf5ecbc66c17796cff45dae54ef9a5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:90afc12421df2b1b4dcc975f814e21bc1754640d502a2fbcc6d41e77af5ec312"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:51991a89639a912c17bef4b45c87bd83593aee0437d8102556af4885811d59f5"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:293afe532740370aba8c060882f7d26cfd00c94cae32fd2e212a3a6e3b7bc15e"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48ece5bde2e768197a2d0f6e925f9d7e3e826f0ad2271120f8144a9db18d5c8"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eae237477a873ab46e8dd748e515c72c0c804fb380fbe6c85533c7de51f23a8f"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:834b5230b5dfc0c1ec37b2fda433b271cbbc0e507560b5d1588e2cc1148cf1ce"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e858ac0a25074ba4bce653f9b5d0a85b7456eaddadc0ce82d3878c22489fa4ee"},
|
||||
{file = "pydantic_core-2.18.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2fd41f6eff4c20778d717af1cc50eca52f5afe7805ee530a4fbd0bae284f16e9"},
|
||||
{file = "pydantic_core-2.18.4.tar.gz", hash = "sha256:ec3beeada09ff865c344ff3bc2f427f5e6c26401cc6113d77e372c3fdac73864"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-settings"
|
||||
version = "2.3.4"
|
||||
description = "Settings management using Pydantic"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_settings-2.3.4-py3-none-any.whl", hash = "sha256:11ad8bacb68a045f00e4f862c7a718c8a9ec766aa8fd4c32e39a0594b207b53a"},
|
||||
{file = "pydantic_settings-2.3.4.tar.gz", hash = "sha256:c5802e3d62b78e82522319bbc9b8f8ffb28ad1c988a99311d04f2a6051fca0a7"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pydantic = ">=2.7.0"
|
||||
python-dotenv = ">=0.21.0"
|
||||
|
||||
[package.extras]
|
||||
toml = ["tomli (>=2.0.1)"]
|
||||
yaml = ["pyyaml (>=6.0.1)"]
|
||||
dotenv = ["python-dotenv (>=0.10.4)"]
|
||||
email = ["email-validator (>=1.0.3)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyflakes"
|
||||
@@ -2301,21 +2192,21 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no
|
||||
|
||||
[[package]]
|
||||
name = "pytest-asyncio"
|
||||
version = "0.23.7"
|
||||
version = "0.21.1"
|
||||
description = "Pytest support for asyncio"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "pytest_asyncio-0.23.7-py3-none-any.whl", hash = "sha256:009b48127fbe44518a547bddd25611551b0e43ccdbf1e67d12479f569832c20b"},
|
||||
{file = "pytest_asyncio-0.23.7.tar.gz", hash = "sha256:5f5c72948f4c49e7db4f29f2521d4031f1c27f86e57b046126654083d4770268"},
|
||||
{file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"},
|
||||
{file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pytest = ">=7.0.0,<9"
|
||||
pytest = ">=7.0.0"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
|
||||
testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
|
||||
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-cov"
|
||||
@@ -2737,6 +2628,31 @@ exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
|
||||
trio = ">=0.11"
|
||||
wsproto = ">=0.14"
|
||||
|
||||
[[package]]
|
||||
name = "types-requests"
|
||||
version = "2.31.0.6"
|
||||
description = "Typing stubs for requests"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0"},
|
||||
{file = "types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
types-urllib3 = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-urllib3"
|
||||
version = "1.26.25.14"
|
||||
description = "Typing stubs for urllib3"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"},
|
||||
{file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.9.0"
|
||||
@@ -2948,4 +2864,4 @@ multidict = ">=4.0"
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.10"
|
||||
content-hash = "26bd75befe5223095b65be293086edf52f34f9043e49107c80a105dc0387dd6a"
|
||||
content-hash = "4a980e6d8f54a2f7f6a3c55d4f40ac3a4b27b5ac6573dd2a39e11213a4b126dd"
|
||||
|
||||
@@ -9,47 +9,43 @@ packages = [{ include = "agbenchmark" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
agent-protocol-client = {git = "https://github.com/Significant-Gravitas/agent-protocol.git", subdirectory = "packages/client/python"}
|
||||
click = "^8.1.3"
|
||||
click-default-group = "^1.2.4"
|
||||
colorama = "^0.4.6"
|
||||
fastapi = "^0.109.1"
|
||||
gitpython = "^3.1.32"
|
||||
httpx = "^0.24.0"
|
||||
matplotlib = "^3.7.2"
|
||||
# Multidict 6.0.4 fails to install and is a dependency of aiohttp which is a depenedency of agent-protocol-client
|
||||
multidict = "^6.0.5"
|
||||
networkx = "^3.1"
|
||||
pytest = "^7.3.2"
|
||||
requests = "^2.31.0"
|
||||
openai = "^1.7.2"
|
||||
pandas = "^2.0.3"
|
||||
pydantic = "^1.10.9"
|
||||
python-dotenv = "^1.0.0"
|
||||
click = "^8.1.3"
|
||||
types-requests = "^2.31.0.1"
|
||||
pexpect = "^4.8.0"
|
||||
psutil = "^5.9.5"
|
||||
pydantic = "^2.7.2"
|
||||
pydantic-settings = "^2.3.4"
|
||||
pytest = "^7.3.2"
|
||||
pytest-asyncio = "^0.23.3"
|
||||
python-dotenv = "^1.0.0"
|
||||
python-multipart = "^0.0.7"
|
||||
matplotlib = "^3.7.2"
|
||||
pandas = "^2.0.3"
|
||||
gitpython = "^3.1.32"
|
||||
networkx = "^3.1"
|
||||
colorama = "^0.4.6"
|
||||
pyvis = "^0.3.2"
|
||||
requests = "^2.31.0"
|
||||
selenium = "^4.11.2"
|
||||
tabulate = "^0.9.0"
|
||||
pytest-asyncio = "^0.21.1"
|
||||
uvicorn = "^0.23.2"
|
||||
fastapi = "^0.109.1"
|
||||
python-multipart = "^0.0.7"
|
||||
toml = "^0.10.2"
|
||||
uvicorn = ">=0.23.2,<1"
|
||||
# helicone = "^1.0.9" # incompatible with openai@^1.0.0
|
||||
httpx = "^0.24.0"
|
||||
agent-protocol-client = "^1.1.0"
|
||||
click-default-group = "^1.2.4"
|
||||
tabulate = "^0.9.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
black = "^23.12.1"
|
||||
flake8 = "^7.0.0"
|
||||
isort = "^5.13.1"
|
||||
pyright = "^1.1.364"
|
||||
pre-commit = "^3.3.3"
|
||||
|
||||
# Testing
|
||||
pytest-cov = "^5.0.0"
|
||||
|
||||
# Dependencies for stuff in reports/
|
||||
pandas = "^2.0.3"
|
||||
gspread = "^5.10.0"
|
||||
oauth2client = "^4.1.3"
|
||||
pre-commit = "^3.3.3"
|
||||
pytest-cov = "^5.0.0"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
agbenchmark = "agbenchmark.__main__:cli"
|
||||
|
||||
@@ -1,24 +1,20 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from agbenchmark.reports.processing.report_types import Report
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.argument(
|
||||
"report_json_file", type=click.Path(exists=True, dir_okay=False, path_type=Path)
|
||||
)
|
||||
def print_markdown_report(report_json_file: Path):
|
||||
@click.argument("report_json_file", type=click.Path(exists=True, dir_okay=False))
|
||||
def print_markdown_report(report_json_file: str):
|
||||
"""
|
||||
Generates a Markdown report from a given report.json file.
|
||||
|
||||
:param report_json_file: Path to the report.json file.
|
||||
:return: A string containing the Markdown formatted report.
|
||||
"""
|
||||
report = Report.model_validate_json(report_json_file.read_text())
|
||||
report = Report.parse_file(report_json_file)
|
||||
|
||||
# Header and metadata
|
||||
click.echo("# Benchmark Report")
|
||||
|
||||
@@ -14,7 +14,7 @@ from pydantic import BaseModel, Field
|
||||
class Metrics(BaseModel):
|
||||
difficulty: str
|
||||
success: bool
|
||||
success_percent: float = Field(alias="success_%")
|
||||
success_percent: float = Field(..., alias="success_%")
|
||||
run_time: Optional[str] = None
|
||||
fail_reason: Optional[str] = None
|
||||
attempted: Optional[bool] = None
|
||||
@@ -100,7 +100,7 @@ def get_reports():
|
||||
# Load the JSON data from the file
|
||||
json_data = json.load(f)
|
||||
print(f"Processing {report_file}")
|
||||
report = Report.model_validate(json_data)
|
||||
report = Report.parse_obj(json_data)
|
||||
|
||||
for test_name, test_data in report.tests.items():
|
||||
test_json = {
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
import requests
|
||||
import sys
|
||||
import time
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
|
||||
def get_environment_variables() -> Tuple[str, str, str, str, str]:
|
||||
"""Retrieve and return necessary environment variables."""
|
||||
try:
|
||||
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
|
||||
event = json.load(f)
|
||||
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
|
||||
return (
|
||||
os.environ["GITHUB_API_URL"],
|
||||
os.environ["GITHUB_REPOSITORY"],
|
||||
sha,
|
||||
os.environ["GITHUB_TOKEN"],
|
||||
os.environ["GITHUB_RUN_ID"],
|
||||
)
|
||||
except KeyError as e:
|
||||
print(f"Error: Missing required environment variable or event data: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def make_api_request(url: str, headers: Dict[str, str]) -> Dict:
|
||||
"""Make an API request and return the JSON response."""
|
||||
try:
|
||||
print("Making API request to:", url)
|
||||
response = requests.get(url, headers=headers, timeout=10)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.RequestException as e:
|
||||
print(f"Error: API request failed. {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def process_check_runs(check_runs: List[Dict]) -> Tuple[bool, bool]:
|
||||
"""Process check runs and return their status."""
|
||||
runs_in_progress = False
|
||||
all_others_passed = True
|
||||
|
||||
for run in check_runs:
|
||||
if str(run["name"]) != "Check PR Status":
|
||||
status = run["status"]
|
||||
conclusion = run["conclusion"]
|
||||
|
||||
if status == "completed":
|
||||
if conclusion not in ["success", "skipped", "neutral"]:
|
||||
all_others_passed = False
|
||||
print(
|
||||
f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}"
|
||||
)
|
||||
else:
|
||||
runs_in_progress = True
|
||||
print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
|
||||
all_others_passed = False
|
||||
else:
|
||||
print(
|
||||
f"Skipping check run {run['name']} (ID: {run['id']}) as it is the current run."
|
||||
)
|
||||
|
||||
return runs_in_progress, all_others_passed
|
||||
|
||||
|
||||
def main():
|
||||
api_url, repo, sha, github_token, current_run_id = get_environment_variables()
|
||||
|
||||
endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
|
||||
headers = {
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
if github_token:
|
||||
headers["Authorization"] = f"token {github_token}"
|
||||
|
||||
print(f"Current run ID: {current_run_id}")
|
||||
|
||||
while True:
|
||||
data = make_api_request(endpoint, headers)
|
||||
|
||||
check_runs = data["check_runs"]
|
||||
|
||||
print("Processing check runs...")
|
||||
|
||||
print(check_runs)
|
||||
|
||||
runs_in_progress, all_others_passed = process_check_runs(check_runs)
|
||||
|
||||
if not runs_in_progress:
|
||||
break
|
||||
|
||||
print(
|
||||
"Some check runs are still in progress. Waiting 3 minutes before checking again..."
|
||||
)
|
||||
time.sleep(180)
|
||||
|
||||
if all_others_passed:
|
||||
print("All other completed check runs have passed. This check passes.")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("Some check runs have failed or have not completed. This check fails.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
11
cli.py
11
cli.py
@@ -69,8 +69,6 @@ d88P 888 "Y88888 "Y888 "Y88P" "Y8888P88 888 888
|
||||
bold=True,
|
||||
)
|
||||
)
|
||||
else:
|
||||
click.echo(click.style("🎉 Setup completed!\n", fg="green"))
|
||||
|
||||
|
||||
@cli.group()
|
||||
@@ -151,11 +149,10 @@ def start(agent_name: str, no_setup: bool):
|
||||
setup_process.wait()
|
||||
click.echo()
|
||||
|
||||
# FIXME: Doesn't work: Command not found: agbenchmark
|
||||
# subprocess.Popen(["./run_benchmark", "serve"], cwd=agent_dir)
|
||||
# click.echo("⌛ (Re)starting benchmark server...")
|
||||
# wait_until_conn_ready(8080)
|
||||
# click.echo()
|
||||
subprocess.Popen(["./run_benchmark", "serve"], cwd=agent_dir)
|
||||
click.echo("⌛ (Re)starting benchmark server...")
|
||||
wait_until_conn_ready(8080)
|
||||
click.echo()
|
||||
|
||||
subprocess.Popen(["./run"], cwd=agent_dir)
|
||||
click.echo(f"⌛ (Re)starting agent '{agent_name}'...")
|
||||
|
||||
63
docs/content/AutoGPT/configuration/imagegen.md
Normal file
63
docs/content/AutoGPT/configuration/imagegen.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# 🖼 Image Generation configuration
|
||||
|
||||
| Config variable | Values | |
|
||||
| ---------------- | ------------------------------- | -------------------- |
|
||||
| `IMAGE_PROVIDER` | `dalle` `huggingface` `sdwebui` | **default: `dalle`** |
|
||||
|
||||
## DALL-e
|
||||
|
||||
In `.env`, make sure `IMAGE_PROVIDER` is commented (or set to `dalle`):
|
||||
|
||||
```ini
|
||||
# IMAGE_PROVIDER=dalle # this is the default
|
||||
```
|
||||
|
||||
Further optional configuration:
|
||||
|
||||
| Config variable | Values | |
|
||||
| ---------------- | ------------------ | -------------- |
|
||||
| `IMAGE_SIZE` | `256` `512` `1024` | default: `256` |
|
||||
|
||||
## Hugging Face
|
||||
|
||||
To use text-to-image models from Hugging Face, you need a Hugging Face API token.
|
||||
Link to the appropriate settings page: [Hugging Face > Settings > Tokens](https://huggingface.co/settings/tokens)
|
||||
|
||||
Once you have an API token, uncomment and adjust these variables in your `.env`:
|
||||
|
||||
```ini
|
||||
IMAGE_PROVIDER=huggingface
|
||||
HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
```
|
||||
|
||||
Further optional configuration:
|
||||
|
||||
| Config variable | Values | |
|
||||
| ------------------------- | ---------------------- | ---------------------------------------- |
|
||||
| `HUGGINGFACE_IMAGE_MODEL` | see [available models] | default: `CompVis/stable-diffusion-v1-4` |
|
||||
|
||||
[available models]: https://huggingface.co/models?pipeline_tag=text-to-image
|
||||
|
||||
## Stable Diffusion WebUI
|
||||
|
||||
It is possible to use your own self-hosted Stable Diffusion WebUI with AutoGPT:
|
||||
|
||||
```ini
|
||||
IMAGE_PROVIDER=sdwebui
|
||||
```
|
||||
|
||||
!!! note
|
||||
Make sure you are running WebUI with `--api` enabled.
|
||||
|
||||
Further optional configuration:
|
||||
|
||||
| Config variable | Values | |
|
||||
| --------------- | ----------------------- | -------------------------------- |
|
||||
| `SD_WEBUI_URL` | URL to your WebUI | default: `http://127.0.0.1:7860` |
|
||||
| `SD_WEBUI_AUTH` | `{username}:{password}` | *Note: do not copy the braces!* |
|
||||
|
||||
## Selenium
|
||||
|
||||
```shell
|
||||
sudo Xvfb :10 -ac -screen 0 1024x768x24 & DISPLAY=:10 <YOUR_CLIENT>
|
||||
```
|
||||
@@ -1,18 +1,21 @@
|
||||
# Configuration
|
||||
|
||||
Configuration of sensitive settings such as API credentials is done through environment variables.
|
||||
You can set configuration variables via the `.env` file. If you don't have a `.env` file, create a copy of `.env.template` in your `AutoGPT` folder and name it `.env`.
|
||||
Configuration is controlled through the `Config` object. You can set configuration variables via the `.env` file. If you don't have a `.env` file, create a copy of `.env.template` in your `AutoGPT` folder and name it `.env`.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `AUDIO_TO_TEXT_PROVIDER`: Audio To Text Provider. Only option currently is `huggingface`. Default: huggingface
|
||||
- `AUTHORISE_COMMAND_KEY`: Key response accepted when authorising commands. Default: y
|
||||
- `ANTHROPIC_API_KEY`: Set this if you want to use Anthropic models with AutoGPT
|
||||
- `AZURE_CONFIG_FILE`: Location of the Azure Config file relative to the AutoGPT root directory. Default: azure.yaml
|
||||
- `COMPONENT_CONFIG_FILE`: Path to the component configuration file (json) for an agent. Optional
|
||||
- `DISABLED_COMMANDS`: Commands to disable. Use comma separated names of commands. See the list of commands from built-in components [here](../../forge/components/components.md). Default: None
|
||||
- `BROWSE_CHUNK_MAX_LENGTH`: When browsing website, define the length of chunks to summarize. Default: 3000
|
||||
- `BROWSE_SPACY_LANGUAGE_MODEL`: [spaCy language model](https://spacy.io/usage/models) to use when creating chunks. Default: en_core_web_sm
|
||||
- `CHAT_MESSAGES_ENABLED`: Enable chat messages. Optional
|
||||
- `DISABLED_COMMANDS`: Commands to disable. Use comma separated names of commands. See the list of commands from built-in components [here](../../forge/components/built-in-components.md). Default: None
|
||||
- `ELEVENLABS_API_KEY`: ElevenLabs API Key. Optional.
|
||||
- `ELEVENLABS_VOICE_ID`: ElevenLabs Voice ID. Optional.
|
||||
- `EMBEDDING_MODEL`: LLM Model to use for embedding tasks. Default: `text-embedding-3-small`
|
||||
- `EXECUTE_LOCAL_COMMANDS`: If shell commands should be executed locally. Default: False
|
||||
- `EXIT_KEY`: Exit key accepted to exit. Default: n
|
||||
- `FAST_LLM`: LLM Model to use for most tasks. Default: `gpt-3.5-turbo-0125`
|
||||
- `GITHUB_API_KEY`: [Github API Key](https://github.com/settings/tokens). Optional.
|
||||
@@ -20,16 +23,26 @@ You can set configuration variables via the `.env` file. If you don't have a `.e
|
||||
- `GOOGLE_API_KEY`: Google API key. Optional.
|
||||
- `GOOGLE_CUSTOM_SEARCH_ENGINE_ID`: [Google custom search engine ID](https://programmablesearchengine.google.com/controlpanel/all). Optional.
|
||||
- `GROQ_API_KEY`: Set this if you want to use Groq models with AutoGPT
|
||||
- `HEADLESS_BROWSER`: Use a headless browser while AutoGPT uses a web browser. Setting to `False` will allow you to see AutoGPT operate the browser. Default: True
|
||||
- `HUGGINGFACE_API_TOKEN`: HuggingFace API, to be used for both image generation and audio to text. Optional.
|
||||
- `HUGGINGFACE_AUDIO_TO_TEXT_MODEL`: HuggingFace audio to text model. Default: CompVis/stable-diffusion-v1-4
|
||||
- `HUGGINGFACE_IMAGE_MODEL`: HuggingFace model to use for image generation. Default: CompVis/stable-diffusion-v1-4
|
||||
- `LLAMAFILE_API_BASE`: Llamafile API base URL. Default: `http://localhost:8080/v1`
|
||||
- `OPENAI_API_KEY`: Set this if you want to use OpenAI models; [OpenAI API Key](https://platform.openai.com/account/api-keys).
|
||||
- `IMAGE_PROVIDER`: Image provider. Options are `dalle`, `huggingface`, and `sdwebui`. Default: dalle
|
||||
- `IMAGE_SIZE`: Default size of image to generate. Default: 256
|
||||
- `OPENAI_API_KEY`: *REQUIRED*- Your [OpenAI API Key](https://platform.openai.com/account/api-keys).
|
||||
- `OPENAI_ORGANIZATION`: Organization ID in OpenAI. Optional.
|
||||
- `PLAIN_OUTPUT`: Plain output, which disables the spinner. Default: False
|
||||
- `RESTRICT_TO_WORKSPACE`: The restrict file reading and writing to the workspace directory. Default: True
|
||||
- `SD_WEBUI_AUTH`: Stable Diffusion Web UI username:password pair. Optional.
|
||||
- `SD_WEBUI_URL`: Stable Diffusion Web UI URL. Default: http://localhost:7860
|
||||
- `SHELL_ALLOWLIST`: List of shell commands that ARE allowed to be executed by AutoGPT. Only applies if `SHELL_COMMAND_CONTROL` is set to `allowlist`. Default: None
|
||||
- `SHELL_COMMAND_CONTROL`: Whether to use `allowlist` or `denylist` to determine what shell commands can be executed (Default: denylist)
|
||||
- `SHELL_DENYLIST`: List of shell commands that ARE NOT allowed to be executed by AutoGPT. Only applies if `SHELL_COMMAND_CONTROL` is set to `denylist`. Default: sudo,su
|
||||
- `SMART_LLM`: LLM Model to use for "smart" tasks. Default: `gpt-4-turbo-preview`
|
||||
- `STREAMELEMENTS_VOICE`: StreamElements voice to use. Default: Brian
|
||||
- `TEMPERATURE`: Value of temperature given to OpenAI. Value from 0 to 2. Lower is more deterministic, higher is more random. See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
|
||||
- `TEXT_TO_SPEECH_PROVIDER`: Text to Speech Provider. Options are `gtts`, `macos`, `elevenlabs`, and `streamelements`. Default: gtts
|
||||
- `USER_AGENT`: User-Agent given when browsing websites. Default: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
- `USE_AZURE`: Use Azure's LLM Default: False
|
||||
- `USE_WEB_BROWSER`: Which web browser to use. Options are `chrome`, `firefox`, `safari` or `edge` Default: chrome
|
||||
- `WIPE_REDIS_ON_START`: Wipes data / index on start. Default: True
|
||||
|
||||
@@ -2,36 +2,36 @@
|
||||
|
||||
!!! note
|
||||
This section is optional. Use the official Google API if search attempts return
|
||||
error 429. To use the `google` command, you need to set up your
|
||||
Google API key in your environment variables or pass it with configuration to the [`WebSearchComponent`](../../forge/components/built-in-components.md).
|
||||
error 429. To use the `google_official_search` command, you need to set up your
|
||||
Google API key in your environment variables.
|
||||
|
||||
Create your project:
|
||||
|
||||
1. Go to the [Google Cloud Console](https://console.cloud.google.com/).
|
||||
1. If you don't already have an account, create one and log in
|
||||
1. Create a new project by clicking on the *Select a Project* dropdown at the top of the
|
||||
2. If you don't already have an account, create one and log in
|
||||
3. Create a new project by clicking on the *Select a Project* dropdown at the top of the
|
||||
page and clicking *New Project*
|
||||
1. Give it a name and click *Create*
|
||||
1. Set up a custom search API and add to your .env file:
|
||||
1. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard)
|
||||
1. Click *Enable APIs and Services*
|
||||
1. Search for *Custom Search API* and click on it
|
||||
1. Click *Enable*
|
||||
1. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page
|
||||
1. Click *Create Credentials*
|
||||
1. Choose *API Key*
|
||||
1. Copy the API key
|
||||
1. Set it as the `GOOGLE_API_KEY` in your `.env` file
|
||||
1. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com)
|
||||
4. Give it a name and click *Create*
|
||||
5. Set up a custom search API and add to your .env file:
|
||||
5. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard)
|
||||
6. Click *Enable APIs and Services*
|
||||
7. Search for *Custom Search API* and click on it
|
||||
8. Click *Enable*
|
||||
9. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page
|
||||
10. Click *Create Credentials*
|
||||
11. Choose *API Key*
|
||||
12. Copy the API key
|
||||
13. Set it as the `GOOGLE_API_KEY` in your `.env` file
|
||||
14. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com)
|
||||
the Custom Search API on your project. (Might need to wait few minutes to propagate.)
|
||||
Set up a custom search engine and add to your .env file:
|
||||
1. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page
|
||||
1. Click *Add*
|
||||
1. Set up your search engine by following the prompts.
|
||||
15. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page
|
||||
16. Click *Add*
|
||||
17. Set up your search engine by following the prompts.
|
||||
You can choose to search the entire web or specific sites
|
||||
1. Once you've created your search engine, click on *Control Panel*
|
||||
1. Click *Basics*
|
||||
1. Copy the *Search engine ID*
|
||||
1. Set it as the `CUSTOM_SEARCH_ENGINE_ID` in your `.env` file
|
||||
18. Once you've created your search engine, click on *Control Panel*
|
||||
19. Click *Basics*
|
||||
20. Copy the *Search engine ID*
|
||||
21. Set it as the `CUSTOM_SEARCH_ENGINE_ID` in your `.env` file
|
||||
|
||||
_Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._
|
||||
|
||||
@@ -71,25 +71,18 @@
|
||||
- ./logs:/app/logs
|
||||
## uncomment following lines if you want to make use of these files
|
||||
## you must have them existing in the same folder as this docker-compose.yml
|
||||
## component configuration file
|
||||
#- type: bind
|
||||
# source: ./config.json
|
||||
# target: /app/config.json
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
1. Download [`.env.template`][.env.template] and save it as `.env` in the AutoGPT folder.
|
||||
2. Follow the standard [configuration instructions](../index.md#completing-the-setup),
|
||||
4. Download [`.env.template`][.env.template] and save it as `.env` in the AutoGPT folder.
|
||||
5. Follow the standard [configuration instructions](../index.md#completing-the-setup),
|
||||
from step 3 onwards and excluding `poetry install` steps.
|
||||
3. Pull the latest image from [Docker Hub]
|
||||
6. Pull the latest image from [Docker Hub]
|
||||
|
||||
```shell
|
||||
docker pull significantgravitas/auto-gpt
|
||||
```
|
||||
4. _Optional: mount configuration file._
|
||||
If you have component configuration file, for example `config.json`, place it in `autogpt/data/` directory. Or place it in `autogpt/` and uncomment the line in `docker-compose.yml` that mounts it.
|
||||
To learn more about configuring, see [Component configuration](../../forge/components/components.md#json-configuration)
|
||||
|
||||
!!! note "Docker only supports headless browsing"
|
||||
AutoGPT uses a browser in headless mode by default: `HEADLESS_BROWSER=True`.
|
||||
|
||||
@@ -50,13 +50,7 @@ Since we don't ship AutoGPT as a desktop application, you'll need to download th
|
||||
### Completing the Setup
|
||||
|
||||
Once you have cloned or downloaded the project, you can find the AutoGPT Agent in the
|
||||
`autogpt/` folder.
|
||||
Inside this folder you can configure the AutoGPT application with an `.env` file and (optionally) a JSON configuration file:
|
||||
|
||||
- `.env` for environment variables, which are mostly used for sensitive data like API keys
|
||||
- a JSON configuration file to customize certain features of AutoGPT's [Components](../../forge/components/introduction.md)
|
||||
|
||||
See the [Configuration](../configuration/options.md) reference for a list of available environment variables.
|
||||
`autogpt/` folder. In this folder:
|
||||
|
||||
1. Find the file named `.env.template`. This file may
|
||||
be hidden by default in some operating systems due to the dot prefix. To reveal
|
||||
@@ -77,9 +71,6 @@ See the [Configuration](../configuration/options.md) reference for a list of ava
|
||||
6. Save and close the `.env` file.
|
||||
7. _Optional: run `poetry install` to install all required dependencies._ The
|
||||
application also checks for and installs any required dependencies when it starts.
|
||||
8. _Optional: configure the JSON file (e.g. `config.json`) with your desired settings._
|
||||
The application will use default settings if you don't provide a JSON configuration file.
|
||||
Learn how to [set up the JSON configuration file](../../forge/components/components.md#json-configuration)
|
||||
|
||||
You should now be able to explore the CLI (`./autogpt.sh --help`) and run the application.
|
||||
|
||||
@@ -88,6 +79,7 @@ See the [user guide](../usage.md) for further instructions.
|
||||
[show hidden files/Windows]: https://support.microsoft.com/en-us/windows/view-hidden-files-and-folders-in-windows-97fbc472-c603-9d90-91d0-1166d1d9f4b5
|
||||
[show hidden files/macOS]: https://www.pcmag.com/how-to/how-to-access-your-macs-hidden-files
|
||||
|
||||
|
||||
## Setting up LLM providers
|
||||
|
||||
You can use AutoGPT with any of the following LLM providers.
|
||||
@@ -198,66 +190,3 @@ If you don't know which to choose, you can safely go with OpenAI*.
|
||||
|
||||
[groq/api-keys]: https://console.groq.com/keys
|
||||
[groq/models]: https://console.groq.com/docs/models
|
||||
|
||||
|
||||
### Llamafile
|
||||
|
||||
With llamafile you can run models locally, which means no need to set up billing,
|
||||
and guaranteed data privacy.
|
||||
|
||||
For more information and in-depth documentation, check out the [llamafile documentation].
|
||||
|
||||
!!! warning
|
||||
At the moment, llamafile only serves one model at a time. This means you can not
|
||||
set `SMART_LLM` and `FAST_LLM` to two different llamafile models.
|
||||
|
||||
!!! warning
|
||||
Due to the issues linked below, llamafiles don't work on WSL. To use a llamafile
|
||||
with AutoGPT in WSL, you will have to run the llamafile in Windows (outside WSL).
|
||||
|
||||
<details>
|
||||
<summary>Instructions</summary>
|
||||
|
||||
1. Get the `llamafile/serve.py` script through one of these two ways:
|
||||
1. Clone the AutoGPT repo somewhere in your Windows environment,
|
||||
with the script located at `autogpt/scripts/llamafile/serve.py`
|
||||
2. Download just the [serve.py] script somewhere in your Windows environment
|
||||
2. Make sure you have `click` installed: `pip install click`
|
||||
3. Run `ip route | grep default | awk '{print $3}'` *inside WSL* to get the address
|
||||
of the WSL host machine
|
||||
4. Run `python3 serve.py --host {WSL_HOST_ADDR}`, where `{WSL_HOST_ADDR}`
|
||||
is the address you found at step 3.
|
||||
If port 8080 is taken, also specify a different port using `--port {PORT}`.
|
||||
5. In WSL, set `LLAMAFILE_API_BASE=http://{WSL_HOST_ADDR}:8080/v1` in your `.env`.
|
||||
6. Follow the rest of the regular instructions below.
|
||||
|
||||
[serve.py]: https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt/scripts/llamafile/serve.py
|
||||
</details>
|
||||
|
||||
* [Mozilla-Ocho/llamafile#356](https://github.com/Mozilla-Ocho/llamafile/issues/356)
|
||||
* [Mozilla-Ocho/llamafile#100](https://github.com/Mozilla-Ocho/llamafile/issues/100)
|
||||
|
||||
!!! note
|
||||
These instructions will download and use `mistral-7b-instruct-v0.2.Q5_K_M.llamafile`.
|
||||
`mistral-7b-instruct-v0.2` is currently the only tested and supported model.
|
||||
If you want to try other models, you'll have to add them to `LlamafileModelName` in
|
||||
[`llamafile.py`][forge/llamafile.py].
|
||||
For optimal results, you may also have to add some logic to adapt the message format,
|
||||
like `LlamafileProvider._adapt_chat_messages_for_mistral_instruct(..)` does.
|
||||
|
||||
1. Run the llamafile serve script:
|
||||
```shell
|
||||
python3 ./scripts/llamafile/serve.py
|
||||
```
|
||||
The first time this is run, it will download a file containing the model + runtime,
|
||||
which may take a while and a few gigabytes of disk space.
|
||||
|
||||
To force GPU acceleration, add `--use-gpu` to the command.
|
||||
|
||||
3. In `.env`, set `SMART_LLM`/`FAST_LLM` or both to `mistral-7b-instruct-v0.2`
|
||||
|
||||
4. If the server is running on different address than `http://localhost:8080/v1`,
|
||||
set `LLAMAFILE_API_BASE` in `.env` to the right base URL
|
||||
|
||||
[llamafile documentation]: https://github.com/Mozilla-Ocho/llamafile#readme
|
||||
[forge/llamafile.py]: https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/forge/llm/providers/llamafile/llamafile.py
|
||||
|
||||
@@ -60,6 +60,10 @@ Options:
|
||||
--debug Enable Debug Mode
|
||||
--gpt3only Enable GPT3.5 Only Mode
|
||||
--gpt4only Enable GPT4 Only Mode
|
||||
-b, --browser-name TEXT Specifies which web-browser to use when
|
||||
using selenium to scrape the web.
|
||||
--allow-downloads Dangerous: Allows AutoGPT to download files
|
||||
natively.
|
||||
--skip-news Specifies whether to suppress the output of
|
||||
latest news on startup.
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
@@ -78,7 +82,6 @@ Options:
|
||||
--override-directives If specified, --constraint, --resource and
|
||||
--best-practice will override the AI's
|
||||
directives instead of being appended to them
|
||||
--component-config-file TEXT Path to the json configuration file.
|
||||
--help Show this message and exit.
|
||||
```
|
||||
</details>
|
||||
@@ -125,6 +128,10 @@ Options:
|
||||
--debug Enable Debug Mode
|
||||
--gpt3only Enable GPT3.5 Only Mode
|
||||
--gpt4only Enable GPT4 Only Mode
|
||||
-b, --browser-name TEXT Specifies which web-browser to use when using
|
||||
selenium to scrape the web.
|
||||
--allow-downloads Dangerous: Allows AutoGPT to download files
|
||||
natively.
|
||||
--install-plugin-deps Installs external dependencies for 3rd party
|
||||
plugins.
|
||||
--help Show this message and exit.
|
||||
@@ -213,5 +220,5 @@ For example, to disable python coding features, set it to the value below:
|
||||
DISABLED_COMMANDS=execute_python_code,execute_python_file
|
||||
```
|
||||
|
||||
[components]: ../forge/components/components.md
|
||||
[commands]: ../forge/components/built-in-components.md
|
||||
[components]: ./components/components.md
|
||||
[commands]: ./components/built-in-components.md
|
||||
@@ -1,34 +1,26 @@
|
||||
# Built-in Components
|
||||
|
||||
This page lists all [🧩 Components](./components.md) and [⚙️ Protocols](./protocols.md) they implement that are natively provided. They are used by the AutoGPT agent.
|
||||
Some components have additional configuration options listed in the table, see [Component configuration](./components.md/#component-configuration) to learn more.
|
||||
|
||||
!!! note
|
||||
If a configuration field uses environment variable, it still can be passed using configuration model. ### Value from the configuration takes precedence over env var! Env var will be only applied if value in the configuration is not set.
|
||||
|
||||
## `SystemComponent`
|
||||
|
||||
Essential component to allow an agent to finish.
|
||||
|
||||
### DirectiveProvider
|
||||
|
||||
**DirectiveProvider**
|
||||
- Constraints about API budget
|
||||
|
||||
### MessageProvider
|
||||
|
||||
|
||||
**MessageProvider**
|
||||
- Current time and date
|
||||
- Remaining API budget and warnings if budget is low
|
||||
|
||||
### CommandProvider
|
||||
|
||||
**CommandProvider**
|
||||
- `finish` used when task is completed
|
||||
|
||||
## `UserInteractionComponent`
|
||||
|
||||
Adds ability to interact with user in CLI.
|
||||
|
||||
### CommandProvider
|
||||
|
||||
**CommandProvider**
|
||||
- `ask_user` used to ask user for input
|
||||
|
||||
## `FileManagerComponent`
|
||||
@@ -36,21 +28,10 @@ Adds ability to interact with user in CLI.
|
||||
Adds ability to read and write persistent files to local storage, Google Cloud Storage or Amazon's S3.
|
||||
Necessary for saving and loading agent's state (preserving session).
|
||||
|
||||
### `FileManagerConfiguration`
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| ---------------- | -------------------------------------- | ----- | ---------------------------------- |
|
||||
| `storage_path` | Path to agent files, e.g. state | `str` | `agents/{agent_id}/`[^1] |
|
||||
| `workspace_path` | Path to files that agent has access to | `str` | `agents/{agent_id}/workspace/`[^1] |
|
||||
|
||||
[^1] This option is set dynamically during component construction as opposed to by default inside the configuration model, `{agent_id}` is replaced with the agent's unique identifier.
|
||||
|
||||
### DirectiveProvider
|
||||
|
||||
**DirectiveProvider**
|
||||
- Resource information that it's possible to read and write files
|
||||
|
||||
### CommandProvider
|
||||
|
||||
**CommandProvider**
|
||||
- `read_file` used to read file
|
||||
- `write_file` used to write file
|
||||
- `list_folder` lists all files in a folder
|
||||
@@ -59,20 +40,7 @@ Necessary for saving and loading agent's state (preserving session).
|
||||
|
||||
Lets the agent execute non-interactive Shell commands and Python code. Python execution works only if Docker is available.
|
||||
|
||||
### `CodeExecutorConfiguration`
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| ------------------------ | ---------------------------------------------------- | --------------------------- | ----------------- |
|
||||
| `execute_local_commands` | Enable shell command execution | `bool` | `False` |
|
||||
| `shell_command_control` | Controls which list is used | `"allowlist" \| "denylist"` | `"allowlist"` |
|
||||
| `shell_allowlist` | List of allowed shell commands | `List[str]` | `[]` |
|
||||
| `shell_denylist` | List of prohibited shell commands | `List[str]` | `[]` |
|
||||
| `docker_container_name` | Name of the Docker container used for code execution | `str` | `"agent_sandbox"` |
|
||||
|
||||
All shell command configurations are expected to be for convience only. This component is not secure and should not be used in production environments. It is recommended to use more appropriate sandboxing.
|
||||
|
||||
### CommandProvider
|
||||
|
||||
**CommandProvider**
|
||||
- `execute_shell` execute shell command
|
||||
- `execute_shell_popen` execute shell command with popen
|
||||
- `execute_python_code` execute Python code
|
||||
@@ -82,94 +50,38 @@ All shell command configurations are expected to be for convience only. This com
|
||||
|
||||
Keeps track of agent's actions and their outcomes. Provides their summary to the prompt.
|
||||
|
||||
### `ActionHistoryConfiguration`
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| ---------------------- | ------------------------------------------------------- | ----------- | ------------------ |
|
||||
| `llm_name` | Name of the llm model used to compress the history | `ModelName` | `"gpt-3.5-turbo"` |
|
||||
| `max_tokens` | Maximum number of tokens to use for the history summary | `int` | `1024` |
|
||||
| `spacy_language_model` | Language model used for summary chunking using spacy | `str` | `"en_core_web_sm"` |
|
||||
| `full_message_count` | Number of cycles to include unsummarized in the prompt | `int` | `4` |
|
||||
|
||||
### MessageProvider
|
||||
|
||||
**MessageProvider**
|
||||
- Agent's progress summary
|
||||
|
||||
### AfterParse
|
||||
|
||||
**AfterParse**
|
||||
- Register agent's action
|
||||
|
||||
### ExecutionFailure
|
||||
|
||||
**ExecutionFailuer**
|
||||
- Rewinds the agent's action, so it isn't saved
|
||||
|
||||
### AfterExecute
|
||||
|
||||
**AfterExecute**
|
||||
- Saves the agent's action result in the history
|
||||
|
||||
## `GitOperationsComponent`
|
||||
|
||||
Adds ability to iteract with git repositories and GitHub.
|
||||
|
||||
### `GitOperationsConfiguration`
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| ----------------- | ----------------------------------------- | ----- | ------- |
|
||||
| `github_username` | GitHub username, *ENV:* `GITHUB_USERNAME` | `str` | `None` |
|
||||
| `github_api_key` | GitHub API key, *ENV:* `GITHUB_API_KEY` | `str` | `None` |
|
||||
|
||||
### CommandProvider
|
||||
|
||||
**CommandProvider**
|
||||
- `clone_repository` used to clone a git repository
|
||||
|
||||
## `ImageGeneratorComponent`
|
||||
|
||||
Adds ability to generate images using various providers.
|
||||
|
||||
### Hugging Face
|
||||
|
||||
To use text-to-image models from Hugging Face, you need a Hugging Face API token.
|
||||
Link to the appropriate settings page: [Hugging Face > Settings > Tokens](https://huggingface.co/settings/tokens)
|
||||
|
||||
### Stable Diffusion WebUI
|
||||
|
||||
It is possible to use your own self-hosted Stable Diffusion WebUI with AutoGPT. ### Make sure you are running WebUI with `--api` enabled.
|
||||
|
||||
### `ImageGeneratorConfiguration`
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| ------------------------- | ------------------------------------------------------------- | --------------------------------------- | --------------------------------- |
|
||||
| `image_provider` | Image generation provider | `"dalle" \| "huggingface" \| "sdwebui"` | `"dalle"` |
|
||||
| `huggingface_image_model` | Hugging Face image model, see [available models] | `str` | `"CompVis/stable-diffusion-v1-4"` |
|
||||
| `huggingface_api_token` | Hugging Face API token, *ENV:* `HUGGINGFACE_API_TOKEN` | `str` | `None` |
|
||||
| `sd_webui_url` | URL to self-hosted Stable Diffusion WebUI | `str` | `"http://localhost:7860"` |
|
||||
| `sd_webui_auth` | Basic auth for Stable Diffusion WebUI, *ENV:* `SD_WEBUI_AUTH` | `str` of format `{username}:{password}` | `None` |
|
||||
|
||||
[available models]: https://huggingface.co/models?pipeline_tag=text-to-image
|
||||
|
||||
### CommandProvider
|
||||
Adds ability to generate images using various providers, see [Image Generation configuration](./../configuration/imagegen.md) to learn more.
|
||||
|
||||
**CommandProvider**
|
||||
- `generate_image` used to generate an image given a prompt
|
||||
|
||||
## `WebSearchComponent`
|
||||
|
||||
Allows agent to search the web. Google credentials aren't required for DuckDuckGo. [Instructions how to set up Google API key](../../AutoGPT/configuration/search.md)
|
||||
|
||||
### `WebSearchConfiguration`
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| -------------------------------- | ----------------------------------------------------------------------- | --------------------------- | ------- |
|
||||
| `google_api_key` | Google API key, *ENV:* `GOOGLE_API_KEY` | `str` | `None` |
|
||||
| `google_custom_search_engine_id` | Google Custom Search Engine ID, *ENV:* `GOOGLE_CUSTOM_SEARCH_ENGINE_ID` | `str` | `None` |
|
||||
| `duckduckgo_max_attempts` | Maximum number of attempts to search using DuckDuckGo | `int` | `3` |
|
||||
| `duckduckgo_backend` | Backend to be used for DDG sdk | `"api" \| "html" \| "lite"` | `"api"` |
|
||||
|
||||
### DirectiveProvider
|
||||
Allows agent to search the web.
|
||||
|
||||
**DirectiveProvider**
|
||||
- Resource information that it's possible to search the web
|
||||
|
||||
### CommandProvider
|
||||
|
||||
**CommandProvider**
|
||||
- `search_web` used to search the web using DuckDuckGo
|
||||
- `google` used to search the web using Google, requires API key
|
||||
|
||||
@@ -177,35 +89,20 @@ Allows agent to search the web. Google credentials aren't required for DuckDuckG
|
||||
|
||||
Allows agent to read websites using Selenium.
|
||||
|
||||
### `WebSeleniumConfiguration`
|
||||
|
||||
| Config variable | Details | Type | Default |
|
||||
| ----------------------------- | ------------------------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `llm_name` | Name of the llm model used to read websites | `ModelName` | `"gpt-3.5-turbo"` |
|
||||
| `web_browser` | Web browser used by Selenium | `"chrome" \| "firefox" \| "safari" \| "edge"` | `"chrome"` |
|
||||
| `headless` | Run browser in headless mode | `bool` | `True` |
|
||||
| `user_agent` | User agent used by the browser | `str` | `"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"` |
|
||||
| `browse_spacy_language_model` | Spacy language model used for chunking text | `str` | `"en_core_web_sm"` |
|
||||
| `selenium_proxy` | Http proxy to use with Selenium | `str` | `None` |
|
||||
|
||||
### DirectiveProvider
|
||||
|
||||
**DirectiveProvider**
|
||||
- Resource information that it's possible to read websites
|
||||
|
||||
### CommandProvider
|
||||
|
||||
**CommandProvider**
|
||||
- `read_website` used to read a specific url and look for specific topics or answer a question
|
||||
|
||||
## `ContextComponent`
|
||||
|
||||
Adds ability to keep up-to-date file and folder content in the prompt.
|
||||
|
||||
### MessageProvider
|
||||
|
||||
**MessageProvider**
|
||||
- Content of elements in the context
|
||||
|
||||
### CommandProvider
|
||||
|
||||
**CommandProvider**
|
||||
- `open_file` used to open a file into context
|
||||
- `open_folder` used to open a folder into context
|
||||
- `close_context_item` remove an item from the context
|
||||
@@ -214,6 +111,5 @@ Adds ability to keep up-to-date file and folder content in the prompt.
|
||||
|
||||
Watches if agent is looping and switches to smart mode if necessary.
|
||||
|
||||
### AfterParse
|
||||
|
||||
**AfterParse**
|
||||
- Investigates what happened and switches to smart mode if necessary
|
||||
|
||||
@@ -37,7 +37,7 @@ Since components are regular classes you can pass data (including other componen
|
||||
For example we can pass a config object and then retrieve an API key from it when needed:
|
||||
|
||||
```py
|
||||
class DataComponent(MessageProvider):
|
||||
class ConfigurableComponent(MessageProvider):
|
||||
def __init__(self, config: Config):
|
||||
self.config = config
|
||||
|
||||
@@ -51,35 +51,6 @@ class DataComponent(MessageProvider):
|
||||
!!! note
|
||||
Component-specific configuration handling isn't implemented yet.
|
||||
|
||||
## Configuring components
|
||||
|
||||
Components can be configured using a pydantic model.
|
||||
To make component configurable, it must inherit from `ConfigurableComponent[BM]` where `BM` is the configuration class inheriting from pydantic's `BaseModel`.
|
||||
You should pass the configuration instance to the `ConfigurableComponent`'s `__init__` or set its `config` property directly.
|
||||
Using configuration allows you to load confugration from a file, and also serialize and deserialize it easily for any agent.
|
||||
To learn more about configuration, including storing sensitive information and serialization see [Component Configuration](./components.md#component-configuration).
|
||||
|
||||
```py
|
||||
# Example component configuration
|
||||
class UserGreeterConfiguration(BaseModel):
|
||||
user_name: str
|
||||
|
||||
class UserGreeterComponent(MessageProvider, ConfigurableComponent[UserGreeterConfiguration]):
|
||||
def __init__(self):
|
||||
# Creating configuration instance
|
||||
# You could also pass it to the component constructor
|
||||
# e.g. `def __init__(self, config: UserGreeterConfiguration):`
|
||||
config = UserGreeterConfiguration(user_name="World")
|
||||
# Passing the configuration instance to the parent class
|
||||
UserGreeterComponent.__init__(self, config)
|
||||
# This has the same effect as the line above:
|
||||
# self.config = UserGreeterConfiguration(user_name="World")
|
||||
|
||||
def get_messages(self) -> Iterator[ChatMessage]:
|
||||
# You can use the configuration like a regular model
|
||||
yield ChatMessage.system(f"Hello, {self.config.user_name}!")
|
||||
```
|
||||
|
||||
## Providing commands
|
||||
|
||||
To extend what an agent can do, you need to provide commands using `CommandProvider` protocol. For example to allow agent to multiply two numbers, you can create a component like this:
|
||||
@@ -177,12 +148,12 @@ It gives an ability for the agent to ask user for input in the terminal.
|
||||
yield self.ask_user
|
||||
```
|
||||
|
||||
5. Since agent isn't always running in the terminal or interactive mode, we need to disable this component by setting `self._enabled=False` when it's not possible to ask for user input.
|
||||
5. Since agent isn't always running in the terminal or interactive mode, we need to disable this component by setting `self._enabled` when it's not possible to ask for user input.
|
||||
|
||||
```py
|
||||
def __init__(self, interactive_mode: bool):
|
||||
def __init__(self, config: Config):
|
||||
self.config = config
|
||||
self._enabled = interactive_mode
|
||||
self._enabled = not config.noninteractive_mode
|
||||
```
|
||||
|
||||
The final component should look like this:
|
||||
@@ -193,10 +164,10 @@ class MyUserInteractionComponent(CommandProvider):
|
||||
"""Provides commands to interact with the user."""
|
||||
|
||||
# We pass config to check if we're in noninteractive mode
|
||||
def __init__(self, interactive_mode: bool):
|
||||
def __init__(self, config: Config):
|
||||
self.config = config
|
||||
# 5.
|
||||
self._enabled = interactive_mode
|
||||
self._enabled = not config.noninteractive_mode
|
||||
|
||||
# 4.
|
||||
def get_commands(self) -> Iterator[Command]:
|
||||
@@ -234,10 +205,10 @@ class MyAgent(Agent):
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
app_config: Config,
|
||||
legacy_config: Config,
|
||||
):
|
||||
# Call the parent constructor to bring in the default components
|
||||
super().__init__(settings, llm_provider, file_storage, app_config)
|
||||
super().__init__(settings, llm_provider, file_storage, legacy_config)
|
||||
# Disable the default user interaction component by overriding it
|
||||
self.user_interaction = MyUserInteractionComponent()
|
||||
```
|
||||
@@ -251,14 +222,14 @@ class MyAgent(Agent):
|
||||
settings: AgentSettings,
|
||||
llm_provider: MultiProvider,
|
||||
file_storage: FileStorage,
|
||||
app_config: Config,
|
||||
legacy_config: Config,
|
||||
):
|
||||
# Call the parent constructor to bring in the default components
|
||||
super().__init__(settings, llm_provider, file_storage, app_config)
|
||||
super().__init__(settings, llm_provider, file_storage, legacy_config)
|
||||
# Disable the default user interaction component
|
||||
self.user_interaction = None
|
||||
# Add our own component
|
||||
self.my_user_interaction = MyUserInteractionComponent(app_config)
|
||||
self.my_user_interaction = MyUserInteractionComponent(legacy_config)
|
||||
```
|
||||
|
||||
## Learn more
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
# Component Agents
|
||||
|
||||
!!! important
|
||||
[Legacy plugins] no longer work with AutoGPT. They have been replaced by components,
|
||||
although we're still working on a new system to load plug-in components.
|
||||
|
||||
[Legacy plugins]: https://github.com/Significant-Gravitas/Auto-GPT-Plugins
|
||||
|
||||
This guide explains the component-based architecture of AutoGPT agents. It's a new way of building agents that is more flexible and easier to extend. Components replace some agent's logic and plugins with a more modular and composable system.
|
||||
|
||||
Agent is composed of *components*, and each *component* implements a range of *protocols* (interfaces), each one providing a specific functionality, e.g. additional commands or messages. Each *protocol* is handled in a specific order, defined by the agent. This allows for a clear separation of concerns and a more modular design.
|
||||
|
||||
This system is simple, flexible, and doesn't hide any data - anything can still be passed or accessed directly from or between components.
|
||||
This system is simple, flexible, requires basically no configuration, and doesn't hide any data - anything can still be passed or accessed directly from or between components.
|
||||
|
||||
### Definitions & Guides
|
||||
|
||||
|
||||
@@ -19,16 +19,16 @@ Forge is a ready-to-go template for *your* agent application. All the boilerplat
|
||||
|
||||
### 🚀 **Get Started!**
|
||||
|
||||
The best way to get started is to fork or download the AutoGPT repository and look at the example agent in `forge/forge/agent/forge_agent.py`.
|
||||
This can work as a starting point for your own agent.
|
||||
Agents are built using *components* which provide different functionality, see the [Component Introduction](./components/introduction.md). You can find built-in components in `forge/forge/components/`.
|
||||
|
||||
!!! warning
|
||||
The tutorial series below is out of date.
|
||||
|
||||
The getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) will guide you through the process of setting up your project all the way through to building a generalist agent.
|
||||
|
||||
1. [AutoGPT Forge: A Comprehensive Guide to Your First Steps](https://aiedge.medium.com/autogpt-forge-a-comprehensive-guide-to-your-first-steps-a1dfdf46e3b4)
|
||||
2. [AutoGPT Forge: The Blueprint of an AI Agent](https://aiedge.medium.com/autogpt-forge-the-blueprint-of-an-ai-agent-75cd72ffde6)
|
||||
3. [AutoGPT Forge: Interacting with your Agent](https://aiedge.medium.com/autogpt-forge-interacting-with-your-agent-1214561b06b)
|
||||
4. [AutoGPT Forge: Crafting Intelligent Agent Logic](https://medium.com/@aiedge/autogpt-forge-crafting-intelligent-agent-logic-bc5197b14cb4)
|
||||
|
||||
Coming soon:
|
||||
|
||||
5. Interacting with and Benchmarking your Agent
|
||||
6. Abilities
|
||||
7. The Planning Loop
|
||||
8. Memories
|
||||
|
||||
@@ -4,26 +4,13 @@ Welcome to the AutoGPT Documentation.
|
||||
|
||||
The AutoGPT project consists of four main components:
|
||||
|
||||
- The [Server](#server) – known as the "AutoGPT Platform"
|
||||
- The [Agent](#agent) – also known as just "AutoGPT"
|
||||
- The [Benchmark](#benchmark) – AKA `agbenchmark`
|
||||
- The [Forge](#forge)
|
||||
- The [Frontend](#frontend)
|
||||
* The [Agent](#agent) – also known as just "AutoGPT"
|
||||
* The [Benchmark](#benchmark) – AKA `agbenchmark`
|
||||
* The [Forge](#forge)
|
||||
* The [Frontend](#frontend)
|
||||
|
||||
To tie these together, we also have a [CLI] at the root of the project.
|
||||
|
||||
## 🌐 Server
|
||||
|
||||
<!-- Setup, then Advanced, then New Blocks -->
|
||||
|
||||
**[📖 Setup](server/setup.md)**
|
||||
 | 
|
||||
**[📖 Advanced Setup](server/advanced_setup.md)**
|
||||
 | 
|
||||
**[📖 Making New Blocks](server/new_blocks.md)**
|
||||
|
||||
The server is the backbone of the New AutoGPT project. It provides the infrastructure for the agents to run, and the UI for you to interact with them. It integrates with the Forge, Agent, and a bespoke UI to provide a seamless experience.
|
||||
|
||||
---
|
||||
|
||||
## 🤖 Agent
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
# Advanced Setup
|
||||
|
||||
The advanced steps below are intended for people with sysadmin experience. If you are not comfortable with these steps, please refer to the [basic setup guide](setup.md).
|
||||
|
||||
## Introduction
|
||||
|
||||
For the advanced setup, first follow the [basic setup guide](setup.md) to get the server up and running. Once you have the server running, you can follow the steps below to configure the server for your specific needs.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Setting config via environment variables
|
||||
|
||||
The server uses environment variables to store configs. You can set these environment variables in a `.env` file in the root of the project. The `.env` file should look like this:
|
||||
|
||||
```bash
|
||||
# .env
|
||||
KEY1=value1
|
||||
KEY2=value2
|
||||
```
|
||||
|
||||
The server will automatically load the `.env` file when it starts. You can also set the environment variables directly in your shell. Refer to your operating system's documentation on how to set environment variables in the current session.
|
||||
|
||||
The valid options are listed in `.env.example` in the root of the builder and server directories. You can copy the `.env.example` file to `.env` and modify the values as needed.
|
||||
|
||||
```bash
|
||||
# Copy the .env.example file to .env
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
### Secrets directory
|
||||
|
||||
The secret directory is located at `./secrets`. You can store any secrets you need in this directory. The server will automatically load the secrets when it starts.
|
||||
|
||||
An example for a secret called `my_secret` would look like this:
|
||||
|
||||
```bash
|
||||
# ./secrets/my_secret
|
||||
my_secret_value
|
||||
```
|
||||
|
||||
This is useful when running on docker so you can copy the secrets into the container without exposing them in the Dockerfile.
|
||||
|
||||
## Database selection
|
||||
|
||||
### SQLite
|
||||
|
||||
By default, the server uses SQLite as the database. SQLite is a file-based database that is easy to set up and use. However, it is not recommended for production usecases where auth is required because that subsystem requires Postgres.
|
||||
|
||||
### PostgreSQL
|
||||
|
||||
For production use, it is recommended to use PostgreSQL as the database. You will swap the commands you use to generate and run prisma to the following
|
||||
|
||||
```bash
|
||||
poetry run prisma generate --schema postgres/schema.prisma
|
||||
```
|
||||
|
||||
This will generate the Prisma client for PostgreSQL. You will also need to run the PostgreSQL database in a separate container. You can use the `docker-compose.yml` file in the `rnd` directory to run the PostgreSQL database.
|
||||
|
||||
```bash
|
||||
cd rnd/
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
You can then run the migrations from the `autogpt_server` directory.
|
||||
|
||||
```bash
|
||||
cd ../autogpt_server
|
||||
prisma migrate dev --schema postgres/schema.prisma
|
||||
```
|
||||
@@ -1,17 +0,0 @@
|
||||
# Find available voices for D-ID
|
||||
|
||||
1. **ElevenLabs**
|
||||
- Select any voice from the voice list: https://api.elevenlabs.io/v1/voices
|
||||
- Copy the voice_id
|
||||
- Use it as a string in the voice_id field in the CreateTalkingAvatarClip Block
|
||||
|
||||
2. **Microsoft Azure Voices**
|
||||
- Select any voice from the voice gallery: https://speech.microsoft.com/portal/voicegallery
|
||||
- Click on the "Sample code" tab on the right
|
||||
- Copy the voice name, for example: config.SpeechSynthesisVoiceName ="en-GB-AbbiNeural"
|
||||
- Use this string en-GB-AbbiNeural in the voice_id field in the CreateTalkingAvatarClip Block
|
||||
|
||||
3. **Amazon Polly Voices**
|
||||
- Select any voice from the voice list: https://docs.aws.amazon.com/polly/latest/dg/available-voices.html
|
||||
- Copy the voice name / ID
|
||||
- Use it as string in the voice_id field in the CreateTalkingAvatarClip Block
|
||||
@@ -1,224 +0,0 @@
|
||||
# Contributing to AutoGPT Agent Server: Creating and Testing Blocks
|
||||
|
||||
This guide will walk you through the process of creating and testing a new block for the AutoGPT Agent Server, using the WikipediaSummaryBlock as an example.
|
||||
|
||||
## Understanding Blocks and Testing
|
||||
|
||||
Blocks are reusable components that can be connected to form a graph representing an agent's behavior. Each block has inputs, outputs, and a specific function. Proper testing is crucial to ensure blocks work correctly and consistently.
|
||||
|
||||
## Creating and Testing a New Block
|
||||
|
||||
Follow these steps to create and test a new block:
|
||||
|
||||
1. **Create a new Python file** in the `autogpt_server/blocks` directory. Name it descriptively and use snake_case. For example: `get_wikipedia_summary.py`.
|
||||
|
||||
2. **Import necessary modules and create a class that inherits from `Block`**. Make sure to include all necessary imports for your block.
|
||||
|
||||
Every block should contain the following:
|
||||
|
||||
```python
|
||||
from autogpt_server.data.block import Block, BlockSchema, BlockOutput
|
||||
```
|
||||
|
||||
Example for the Wikipedia summary block:
|
||||
|
||||
```python
|
||||
from autogpt_server.data.block import Block, BlockSchema, BlockOutput
|
||||
from autogpt_server.utils.get_request import GetRequest
|
||||
import requests
|
||||
|
||||
class WikipediaSummaryBlock(Block, GetRequest):
|
||||
# Block implementation will go here
|
||||
```
|
||||
|
||||
3. **Define the input and output schemas** using `BlockSchema`. These schemas specify the data structure that the block expects to receive (input) and produce (output).
|
||||
|
||||
- The input schema defines the structure of the data the block will process. Each field in the schema represents a required piece of input data.
|
||||
- The output schema defines the structure of the data the block will return after processing. Each field in the schema represents a piece of output data.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
class Input(BlockSchema):
|
||||
topic: str # The topic to get the Wikipedia summary for
|
||||
|
||||
class Output(BlockSchema):
|
||||
summary: str # The summary of the topic from Wikipedia
|
||||
error: str # Any error message if the request fails
|
||||
```
|
||||
|
||||
4. **Implement the `__init__` method, including test data and mocks:**
|
||||
|
||||
```python
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
# Unique ID for the block, used across users for templates
|
||||
# you can generate this with this python one liner
|
||||
# print(__import__('uuid').uuid4())
|
||||
id="h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m",
|
||||
input_schema=WikipediaSummaryBlock.Input, # Assign input schema
|
||||
output_schema=WikipediaSummaryBlock.Output, # Assign output schema
|
||||
|
||||
# Provide sample input, output and test mock for testing the block
|
||||
|
||||
test_input={"topic": "Artificial Intelligence"},
|
||||
test_output=("summary", "summary content"),
|
||||
test_mock={"get_request": lambda url, json: {"extract": "summary content"}},
|
||||
)
|
||||
```
|
||||
|
||||
- `id`: A unique identifier for the block.
|
||||
|
||||
- `input_schema` and `output_schema`: Define the structure of the input and output data.
|
||||
|
||||
Let's break down the testing components:
|
||||
|
||||
- `test_input`: This is a sample input that will be used to test the block. It should be a valid input according to your Input schema.
|
||||
|
||||
- `test_output`: This is the expected output when running the block with the `test_input`. It should match your Output schema. For non-deterministic outputs or when you only want to assert the type, you can use Python types instead of specific values. In this example, `("summary", str)` asserts that the output key is "summary" and its value is a string.
|
||||
|
||||
- `test_mock`: This is crucial for blocks that make network calls. It provides a mock function that replaces the actual network call during testing.
|
||||
|
||||
In this case, we're mocking the `get_request` method to always return a dictionary with an 'extract' key, simulating a successful API response. This allows us to test the block's logic without making actual network requests, which could be slow, unreliable, or rate-limited.
|
||||
|
||||
5. **Implement the `run` method with error handling:**, this should contain the main logic of the block:
|
||||
|
||||
```python
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
try:
|
||||
topic = input_data.topic
|
||||
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
|
||||
|
||||
response = self.get_request(url, json=True)
|
||||
yield "summary", response['extract']
|
||||
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
yield "error", f"HTTP error occurred: {http_err}"
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Request to Wikipedia failed: {e}"
|
||||
except KeyError as e:
|
||||
yield "error", f"Error parsing Wikipedia response: {e}"
|
||||
```
|
||||
|
||||
- **Try block**: Contains the main logic to fetch and process the Wikipedia summary.
|
||||
- **API request**: Send a GET request to the Wikipedia API.
|
||||
- **Error handling**: Handle various exceptions that might occur during the API request and data processing.
|
||||
- **Yield**: Use `yield` to output the results.
|
||||
|
||||
## Key Points to Remember
|
||||
|
||||
- **Unique ID**: Give your block a unique ID in the **init** method.
|
||||
- **Input and Output Schemas**: Define clear input and output schemas.
|
||||
- **Error Handling**: Implement error handling in the `run` method.
|
||||
- **Output Results**: Use `yield` to output results in the `run` method.
|
||||
- **Testing**: Provide test input and output in the **init** method for automatic testing.
|
||||
|
||||
## Understanding the Testing Process
|
||||
|
||||
The testing of blocks is handled by `test_block.py`, which does the following:
|
||||
|
||||
1. It calls the block with the provided `test_input`.
|
||||
2. If a `test_mock` is provided, it temporarily replaces the specified methods with the mock functions.
|
||||
3. It then asserts that the output matches the `test_output`.
|
||||
|
||||
For the WikipediaSummaryBlock:
|
||||
|
||||
- The test will call the block with the topic "Artificial Intelligence".
|
||||
- Instead of making a real API call, it will use the mock function, which returns `{"extract": "summary content"}`.
|
||||
- It will then check if the output key is "summary" and its value is a string.
|
||||
|
||||
This approach allows us to test the block's logic comprehensively without relying on external services, while also accommodating non-deterministic outputs.
|
||||
|
||||
## Tips for Effective Block Testing
|
||||
|
||||
1. **Provide realistic test_input**: Ensure your test input covers typical use cases.
|
||||
|
||||
2. **Define appropriate test_output**:
|
||||
- For deterministic outputs, use specific expected values.
|
||||
- For non-deterministic outputs or when only the type matters, use Python types (e.g., `str`, `int`, `dict`).
|
||||
- You can mix specific values and types, e.g., `("key1", str), ("key2", 42)`.
|
||||
|
||||
3. **Use test_mock for network calls**: This prevents tests from failing due to network issues or API changes.
|
||||
|
||||
4. **Consider omitting test_mock for blocks without external dependencies**: If your block doesn't make network calls or use external resources, you might not need a mock.
|
||||
|
||||
5. **Consider edge cases**: Include tests for potential error conditions in your `run` method.
|
||||
|
||||
6. **Update tests when changing block behavior**: If you modify your block, ensure the tests are updated accordingly.
|
||||
|
||||
By following these steps, you can create new blocks that extend the functionality of the AutoGPT Agent Server.
|
||||
|
||||
## Blocks we want to see
|
||||
|
||||
Below is a list of blocks that we would like to see implemented in the AutoGPT Agent Server. If you're interested in contributing, feel free to pick one of these blocks or suggest your own by editing [docs/content/server/new_blocks.md](https://github.com/Significant-Gravitas/AutoGPT/edit/master/docs/content/server/new_blocks.md) and opening a pull request.
|
||||
|
||||
If you would like to implement one of these blocks, open a pull request and we will start the review process.
|
||||
|
||||
### Consumer Services/Platforms
|
||||
|
||||
- Google sheets - Read/Append [Read in Progress](https://github.com/Significant-Gravitas/AutoGPT/pull/7521)
|
||||
- Email - Read/Send with Gmail, Outlook, Yahoo, Proton, etc
|
||||
- Calendar - Read/Write with Google Calendar, Outlook Calendar, etc
|
||||
- Home Assistant - Call Service, Get Status
|
||||
- Dominos - Order Pizza, Track Order
|
||||
- Uber - Book Ride, Track Ride
|
||||
- Notion - Create/Read Page, Create/Append/Read DB
|
||||
- Google drive - read/write/overwrite file/folder
|
||||
|
||||
### Social Media
|
||||
|
||||
- Twitter - Post, Reply, Get Replies, Get Comments, Get Followers, Get Following, Get Tweets, Get Mentions
|
||||
- Instagram - Post, Reply, Get Comments, Get Followers, Get Following, Get Posts, Get Mentions, Get Trending Posts
|
||||
- TikTok - Post, Reply, Get Comments, Get Followers, Get Following, Get Videos, Get Mentions, Get Trending Videos
|
||||
- LinkedIn - Post, Reply, Get Comments, Get Followers, Get Following, Get Posts, Get Mentions, Get Trending Posts
|
||||
- YouTube - Transcribe Videos/Shorts, Post Videos/Shorts, Read/Reply/React to Comments, Update Thumbnails, Update Description, Update Tags, Update Titles, Get Views, Get Likes, Get Dislikes, Get Subscribers, Get Comments, Get Shares, Get Watch Time, Get Revenue, Get Trending Videos, Get Top Videos, Get Top Channels
|
||||
- Reddit - Post, Reply, Get Comments, Get Followers, Get Following, Get Posts, Get Mentions, Get Trending Posts
|
||||
- Treatwell (and related Platforms) - Book, Cancel, Review, Get Recommendations
|
||||
- Substack - Read/Subscribe/Unsubscribe, Post/Reply, Get Recommendations
|
||||
- Discord - Read/Post/Reply, Moderation actions
|
||||
- GoodReads - Read/Post/Reply, Get Recommendations
|
||||
|
||||
### E-commerce
|
||||
|
||||
- Airbnb - Book, Cancel, Review, Get Recommendations
|
||||
- Amazon - Order, Track Order, Return, Review, Get Recommendations
|
||||
- eBay - Order, Track Order, Return, Review, Get Recommendations
|
||||
- Upwork - Post Jobs, Hire Freelancer, Review Freelancer, Fire Freelancer
|
||||
|
||||
### Business Tools
|
||||
|
||||
- External Agents - Call other agents similar to AutoGPT
|
||||
- Trello - Create/Read/Update/Delete Cards, Lists, Boards
|
||||
- Jira - Create/Read/Update/Delete Issues, Projects, Boards
|
||||
- Linear - Create/Read/Update/Delete Issues, Projects, Boards
|
||||
- Excel - Read/Write/Update/Delete Rows, Columns, Sheets
|
||||
- Slack - Read/Post/Reply to Messages, Create Channels, Invite Users
|
||||
- ERPNext - Create/Read/Update/Delete Invoices, Orders, Customers, Products
|
||||
- Salesforce - Create/Read/Update/Delete Leads, Opportunities, Accounts
|
||||
- HubSpot - Create/Read/Update/Delete Contacts, Deals, Companies
|
||||
- Zendesk - Create/Read/Update/Delete Tickets, Users, Organizations
|
||||
- Odoo - Create/Read/Update/Delete Sales Orders, Invoices, Customers
|
||||
- Shopify - Create/Read/Update/Delete Products, Orders, Customers
|
||||
- WooCommerce - Create/Read/Update/Delete Products, Orders, Customers
|
||||
- Squarespace - Create/Read/Update/Delete Pages, Products, Orders
|
||||
|
||||
## Agent Templates we want to see
|
||||
|
||||
|
||||
### Data/Information
|
||||
|
||||
- Summarize top news of today, of this week, this month via Apple News or other large media outlets BBC, TechCrunch, hackernews, etc
|
||||
- Create, read, and summarize substack newsletters or any newsletters (blog writer vs blog reader)
|
||||
- Get/read/summarize the most viral Twitter, Instagram, TikTok (general social media accounts) of the day, week, month
|
||||
- Get/Read any LinkedIn posts or profile that mention AI Agents
|
||||
- Read/Summarize discord (might not be able to do this because you need access)
|
||||
- Read / Get most read books in a given month, year, etc from GoodReads or Amazon Books, etc
|
||||
- Get dates for specific shows across all streaming services
|
||||
- Suggest/Recommend/Get most watched shows in a given month, year, etc across all streaming platforms
|
||||
- Data analysis from xlsx data set
|
||||
- Gather via Excel or Google Sheets data > Sample the data randomly (sample block takes top X, bottom X, randomly, etc) > pass that to LLM Block to generate a script for analysis of the full data > Python block to run the script> making a loop back through LLM Fix Block on error > create chart/visualization (potentially in the code block?) > show the image as output (this may require frontend changes to show)
|
||||
- Tiktok video search and download
|
||||
|
||||
### Marketing
|
||||
|
||||
- Portfolio site design and enhancements
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user