mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
51 Commits
docker-upd
...
update-doc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3860a9b6e4 | ||
|
|
1414b83cf8 | ||
|
|
612e7cfed5 | ||
|
|
52ee846744 | ||
|
|
62a3e1c127 | ||
|
|
ef7cfbb860 | ||
|
|
2dfc927f03 | ||
|
|
e3f35d79c7 | ||
|
|
0040495143 | ||
|
|
d3eac86f9a | ||
|
|
c3cb90ac20 | ||
|
|
9b5bf81d7c | ||
|
|
86db4deef9 | ||
|
|
d8f989daf8 | ||
|
|
00f2b134cb | ||
|
|
a3959712dc | ||
|
|
8477b25c5a | ||
|
|
f133c9c1ef | ||
|
|
dc72ec97bc | ||
|
|
0c915cb558 | ||
|
|
f6ab15db47 | ||
|
|
80161decb9 | ||
|
|
0bf8edcd96 | ||
|
|
b1347a92de | ||
|
|
22ce8e0047 | ||
|
|
5a7193cfb7 | ||
|
|
c1f301ab8b | ||
|
|
f32244a112 | ||
|
|
9395706841 | ||
|
|
a98677b79d | ||
|
|
056eb46c0f | ||
|
|
6fde030c37 | ||
|
|
bf1e01d423 | ||
|
|
52c731abd6 | ||
|
|
c8fbce643e | ||
|
|
6c001bd595 | ||
|
|
f5b89672f8 | ||
|
|
76480ffa03 | ||
|
|
ab60a57379 | ||
|
|
1d9b01fc77 | ||
|
|
e81d9f9f0b | ||
|
|
0d5d0270ea | ||
|
|
bd25f9223c | ||
|
|
07305b55ff | ||
|
|
cdfe3e5fbc | ||
|
|
e992cdf8c2 | ||
|
|
ebd2ecd84c | ||
|
|
0b919522ae | ||
|
|
ef691359b7 | ||
|
|
f8815c3053 | ||
|
|
a60ed21404 |
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -1,10 +1,10 @@
|
||||
frontend/build/** linguist-generated
|
||||
classic/frontend/build/** linguist-generated
|
||||
|
||||
**/poetry.lock linguist-generated
|
||||
|
||||
docs/_javascript/** linguist-vendored
|
||||
|
||||
# Exclude VCR cassettes from stats
|
||||
forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
classic/forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
|
||||
|
||||
* text=auto
|
||||
8
.github/CODEOWNERS
vendored
8
.github/CODEOWNERS
vendored
@@ -1,7 +1,7 @@
|
||||
* @Significant-Gravitas/maintainers
|
||||
.github/workflows/ @Significant-Gravitas/devops
|
||||
forge/ @Significant-Gravitas/forge-maintainers
|
||||
benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
rnd/infra @Significant-Gravitas/devops
|
||||
classic/forge/ @Significant-Gravitas/forge-maintainers
|
||||
classic/benchmark/ @Significant-Gravitas/benchmark-maintainers
|
||||
classic/frontend/ @Significant-Gravitas/frontend-maintainers
|
||||
autogpt_platform/infra @Significant-Gravitas/devops
|
||||
.github/CODEOWNERS @Significant-Gravitas/admins
|
||||
|
||||
30
.github/PULL_REQUEST_TEMPLATE.md
vendored
30
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -6,26 +6,18 @@
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### PR Quality Scorecard ✨
|
||||
|
||||
### Testing 🔍
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in autogpt_platform/
|
||||
|
||||
<!--
|
||||
Check out our contribution guide:
|
||||
https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
|
||||
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
2. Also consider contributing something other than code; see the [contribution guide]
|
||||
for options.
|
||||
3. Clearly explain your changes.
|
||||
4. Avoid making unnecessary changes, especially if they're purely based on personal
|
||||
preferences. Doing so is the maintainers' job. ;-)
|
||||
Please make sure your changes have been tested and are in good working condition.
|
||||
Here is a list of our critical paths, if you need some inspiration on what and how to test:
|
||||
-->
|
||||
|
||||
- [x] Have you used the PR description template?   `+2 pts`
|
||||
- [ ] Is your pull request atomic, focusing on a single change?   `+5 pts`
|
||||
- [ ] Have you linked the GitHub issue(s) that this PR addresses?   `+5 pts`
|
||||
- [ ] Have you documented your changes clearly and comprehensively?   `+5 pts`
|
||||
- [ ] Have you changed or added a feature?   `-4 pts`
|
||||
- [ ] Have you added/updated corresponding documentation?   `+4 pts`
|
||||
- [ ] Have you added/updated corresponding integration tests?   `+5 pts`
|
||||
- [ ] Have you changed the behavior of AutoGPT?   `-5 pts`
|
||||
- [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance?   `+10 pts`
|
||||
- Create from scratch and execute an agent with at least 3 blocks
|
||||
- Import an agent from file upload, and confirm it executes correctly
|
||||
- Upload agent to marketplace
|
||||
- Import an agent from marketplace and confirm it executes correctly
|
||||
- Edit an agent from monitor, and confirm it executes correctly
|
||||
|
||||
30
.github/labeler.yml
vendored
30
.github/labeler.yml
vendored
@@ -1,27 +1,27 @@
|
||||
AutoGPT Agent:
|
||||
Classic AutoGPT Agent:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt/**
|
||||
- any-glob-to-any-file: classic/original_autogpt/**
|
||||
|
||||
Classic Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/benchmark/**
|
||||
|
||||
Classic Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: classic/frontend/**
|
||||
|
||||
Forge:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: forge/**
|
||||
|
||||
Benchmark:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: benchmark/**
|
||||
|
||||
Frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: frontend/**
|
||||
- any-glob-to-any-file: classic/forge/**
|
||||
|
||||
documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: docs/**
|
||||
|
||||
Builder:
|
||||
platform/frontend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_builder/**
|
||||
- any-glob-to-any-file: autogpt_platform/frontend/**
|
||||
|
||||
Server:
|
||||
platform/backend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: rnd/autogpt_server/**
|
||||
- any-glob-to-any-file: autogpt_platform/backend/**
|
||||
|
||||
97
.github/workflows/autogpts-benchmark.yml
vendored
97
.github/workflows/autogpts-benchmark.yml
vendored
@@ -1,97 +0,0 @@
|
||||
name: AutoGPTs Nightly Benchmark
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
benchmark:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
min-python-version: '3.10'
|
||||
REPORTS_BRANCH: data/benchmark-reports
|
||||
REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
run: curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Prepare reports folder
|
||||
run: mkdir -p ${{ env.REPORTS_FOLDER }}
|
||||
|
||||
- run: poetry -C benchmark install
|
||||
|
||||
- name: Benchmark ${{ matrix.agent-name }}
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
|
||||
set +e # Do not quit on non-zero exit codes
|
||||
poetry run agbenchmark run -N 3 \
|
||||
--test=ReadFile \
|
||||
--test=BasicRetrieval --test=RevenueRetrieval2 \
|
||||
--test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
|
||||
--test=UrlShortener --test=TicTacToe --test=Battleship \
|
||||
--test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
|
||||
--test=WebArenaTask_134 --test=WebArenaTask_163
|
||||
|
||||
# Convert exit code 1 (some challenges failed) to exit code 0
|
||||
if [ $? -eq 0 ] || [ $? -eq 1 ]; then
|
||||
exit 0
|
||||
else
|
||||
exit $?
|
||||
fi
|
||||
env:
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
|
||||
REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
|
||||
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
|
||||
|
||||
- name: Push reports to data branch
|
||||
run: |
|
||||
# BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
|
||||
rm ${{ env.REPORTS_FOLDER }}/*.json
|
||||
|
||||
# Find folder with newest (untracked) report in it
|
||||
report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
|
||||
| xargs -I {} dirname {} \
|
||||
| xargs -I {} git ls-files --others --exclude-standard {} \
|
||||
| xargs -I {} dirname {} \
|
||||
| sort -u)
|
||||
json_report_file="$report_subfolder/report.json"
|
||||
|
||||
# Convert JSON report to Markdown
|
||||
markdown_report_file="$report_subfolder/report.md"
|
||||
poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
|
||||
cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
git config --global user.name 'GitHub Actions'
|
||||
git config --global user.email 'github-actions@agpt.co'
|
||||
git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
|
||||
&& git checkout ${{ env.REPORTS_BRANCH }} \
|
||||
|| git checkout --orphan ${{ env.REPORTS_BRANCH }}
|
||||
git reset --hard
|
||||
git add ${{ env.REPORTS_FOLDER }}
|
||||
git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
|
||||
&& git push origin ${{ env.REPORTS_BRANCH }}
|
||||
@@ -1,25 +1,25 @@
|
||||
name: AutoGPT CI
|
||||
name: Classic - AutoGPT CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('classic-autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt
|
||||
working-directory: classic/original_autogpt
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/original_autogpt/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
@@ -135,4 +135,4 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: autogpt/logs/
|
||||
path: classic/original_autogpt/logs/
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Purge Auto-GPT Docker CI cache
|
||||
name: Classic - Purge Auto-GPT Docker CI cache
|
||||
|
||||
on:
|
||||
schedule:
|
||||
@@ -25,7 +25,8 @@ jobs:
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
@@ -1,24 +1,26 @@
|
||||
name: AutoGPT Docker CI
|
||||
name: Classic - AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-docker-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('classic-autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: autogpt
|
||||
working-directory: classic/original_autogpt
|
||||
|
||||
env:
|
||||
IMAGE_NAME: auto-gpt
|
||||
@@ -47,7 +49,8 @@ jobs:
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
@@ -116,7 +119,8 @@ jobs:
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile.autogpt
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
@@ -1,4 +1,4 @@
|
||||
name: AutoGPT Docker Release
|
||||
name: Classic - AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
@@ -44,6 +44,7 @@ jobs:
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Agent smoke tests
|
||||
name: Classic - Agent smoke tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -7,32 +7,37 @@ on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/autogpts-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- 'run'
|
||||
- 'cli.py'
|
||||
- 'setup.py'
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- 'classic/run'
|
||||
- 'classic/cli.py'
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: classic
|
||||
|
||||
jobs:
|
||||
serve-agent-protocol:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ autogpt ]
|
||||
agent-name: [ original_autogpt ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
@@ -50,7 +55,7 @@ jobs:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./${{ matrix.agent-name }}/
|
||||
working-directory: ./classic/${{ matrix.agent-name }}/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
name: AGBenchmark CI
|
||||
name: Classic - AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- 'benchmark/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
- '!benchmark/reports/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- 'benchmark/**'
|
||||
- '!benchmark/reports/**'
|
||||
- .github/workflows/benchmark-ci.yml
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: benchmark
|
||||
working-directory: classic/benchmark
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/benchmark/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Run regression tests
|
||||
working-directory: .
|
||||
working-directory: classic
|
||||
run: |
|
||||
./run agent start ${{ matrix.agent-name }}
|
||||
cd ${{ matrix.agent-name }}
|
||||
@@ -155,7 +155,7 @@ jobs:
|
||||
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Publish to PyPI
|
||||
name: Classic - Publish to PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -21,21 +21,21 @@ jobs:
|
||||
python-version: 3.8
|
||||
|
||||
- name: Install Poetry
|
||||
working-directory: ./benchmark/
|
||||
working-directory: ./classic/benchmark/
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
echo "$HOME/.poetry/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Build project for distribution
|
||||
working-directory: ./benchmark/
|
||||
working-directory: ./classic/benchmark/
|
||||
run: poetry build
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: ./benchmark/
|
||||
working-directory: ./classic/benchmark/
|
||||
run: poetry install
|
||||
|
||||
- name: Check Version
|
||||
working-directory: ./benchmark/
|
||||
working-directory: ./classic/benchmark/
|
||||
id: check-version
|
||||
run: |
|
||||
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
- name: Create Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
artifacts: "benchmark/dist/*"
|
||||
artifacts: "classic/benchmark/dist/*"
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
draft: false
|
||||
generateReleaseNotes: false
|
||||
@@ -51,5 +51,5 @@ jobs:
|
||||
commit: master
|
||||
|
||||
- name: Build and publish
|
||||
working-directory: ./benchmark/
|
||||
working-directory: ./classic/benchmark/
|
||||
run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}
|
||||
@@ -1,18 +1,18 @@
|
||||
name: Forge CI
|
||||
name: Classic - Forge CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/forge-ci.yml'
|
||||
- 'forge/**'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -21,7 +21,7 @@ concurrency:
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: forge
|
||||
working-directory: classic/forge
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -110,7 +110,7 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('classic/forge/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
@@ -233,4 +233,4 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-logs
|
||||
path: forge/logs/
|
||||
path: classic/forge/logs/
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Frontend CI/CD
|
||||
name: Classic - Frontend CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -7,12 +7,12 @@ on:
|
||||
- development
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
- '.github/workflows/frontend-ci.yml'
|
||||
- 'classic/frontend/**'
|
||||
- '.github/workflows/classic-frontend-ci.yml'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd frontend
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add frontend/build/web
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
add-paths: frontend/build/web
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
@@ -1,27 +1,27 @@
|
||||
name: Python checks
|
||||
name: Classic - Python checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/lint-ci.yml'
|
||||
- 'autogpt/**'
|
||||
- 'forge/**'
|
||||
- 'benchmark/**'
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
- 'classic/benchmark/**'
|
||||
- '**.py'
|
||||
- '!forge/tests/vcr_cassettes'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('classic-python-checks-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
@@ -40,18 +40,18 @@ jobs:
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
autogpt:
|
||||
- autogpt/autogpt/**
|
||||
- autogpt/tests/**
|
||||
- autogpt/poetry.lock
|
||||
original_autogpt:
|
||||
- classic/original_autogpt/autogpt/**
|
||||
- classic/original_autogpt/tests/**
|
||||
- classic/original_autogpt/poetry.lock
|
||||
forge:
|
||||
- forge/forge/**
|
||||
- forge/tests/**
|
||||
- forge/poetry.lock
|
||||
- classic/forge/forge/**
|
||||
- classic/forge/tests/**
|
||||
- classic/forge/poetry.lock
|
||||
benchmark:
|
||||
- benchmark/agbenchmark/**
|
||||
- benchmark/tests/**
|
||||
- benchmark/poetry.lock
|
||||
- classic/benchmark/agbenchmark/**
|
||||
- classic/benchmark/tests/**
|
||||
- classic/benchmark/poetry.lock
|
||||
outputs:
|
||||
changed-parts: ${{ steps.changes-in.outputs.changes }}
|
||||
|
||||
@@ -89,23 +89,23 @@ jobs:
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
|
||||
# Lint
|
||||
|
||||
- name: Lint (isort)
|
||||
run: poetry run isort --check .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Black)
|
||||
if: success() || failure()
|
||||
run: poetry run black --check .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
- name: Lint (Flake8)
|
||||
if: success() || failure()
|
||||
run: poetry run flake8 .
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
|
||||
types:
|
||||
needs: get-changed-parts
|
||||
@@ -141,11 +141,11 @@ jobs:
|
||||
# Install dependencies
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry -C ${{ matrix.sub-package }} install
|
||||
run: poetry -C classic/${{ matrix.sub-package }} install
|
||||
|
||||
# Typecheck
|
||||
|
||||
- name: Typecheck
|
||||
if: success() || failure()
|
||||
run: poetry run pyright
|
||||
working-directory: ${{ matrix.sub-package }}
|
||||
working-directory: classic/${{ matrix.sub-package }}
|
||||
133
.github/workflows/hackathon.yml
vendored
133
.github/workflows/hackathon.yml
vendored
@@ -1,133 +0,0 @@
|
||||
name: Hackathon
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
agents:
|
||||
description: "Agents to run (comma-separated)"
|
||||
required: false
|
||||
default: "autogpt" # Default agents if none are specified
|
||||
|
||||
jobs:
|
||||
matrix-setup:
|
||||
runs-on: ubuntu-latest
|
||||
# Service containers to run with `matrix-setup`
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
env-name: ${{ steps.set-matrix.outputs.env-name }}
|
||||
steps:
|
||||
- id: set-matrix
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "schedule" ]; then
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::[ 'irrelevant']"
|
||||
elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
|
||||
IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
|
||||
matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
|
||||
echo "::set-output name=env-name::production"
|
||||
echo "::set-output name=matrix::$matrix_string"
|
||||
else
|
||||
echo "::set-output name=env-name::testing"
|
||||
echo "::set-output name=matrix::[ 'irrelevant' ]"
|
||||
fi
|
||||
|
||||
tests:
|
||||
environment:
|
||||
name: "${{ needs.matrix-setup.outputs.env-name }}"
|
||||
needs: matrix-setup
|
||||
env:
|
||||
min-python-version: "3.10"
|
||||
name: "${{ matrix.agent-name }}"
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
# Provide the password for postgres
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
timeout-minutes: 50
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
|
||||
steps:
|
||||
- name: Print Environment Name
|
||||
run: |
|
||||
echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
|
||||
|
||||
- name: Check Docker Container
|
||||
id: check
|
||||
run: docker ps
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ env.min-python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Poetry
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python -
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: v18.15
|
||||
|
||||
- name: Run benchmark
|
||||
run: |
|
||||
link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
|
||||
branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
|
||||
git clone "$link" -b "$branch" "$AGENT_NAME"
|
||||
cd $AGENT_NAME
|
||||
cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
|
||||
./run agent start $AGENT_NAME
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
poetry run agbenchmark --no-dep
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
|
||||
WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
|
||||
WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
|
||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||
GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
|
||||
AGENT_NAME: ${{ matrix.agent-name }}
|
||||
40
.github/workflows/platform-autogpt-docker-ci.yml
vendored
Normal file
40
.github/workflows/platform-autogpt-docker-ci.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: AutoGPT Server Docker Build & Push
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ update-docker-ci ]
|
||||
paths:
|
||||
- '**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PROJECT_ID: agpt-dev
|
||||
IMAGE_NAME: agpt-server-dev
|
||||
REGION: us-central1
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Cloud SDK
|
||||
uses: google-github-actions/setup-gcloud@v0.2.1
|
||||
with:
|
||||
project_id: ${{ env.PROJECT_ID }}
|
||||
service_account_key: ${{ secrets.GCP_SA_KEY }}
|
||||
export_default_credentials: true
|
||||
|
||||
- name: Configure Docker
|
||||
run: gcloud auth configure-docker ${{ env.REGION }}-docker.pkg.dev
|
||||
|
||||
- name: Build Docker image
|
||||
run: docker build -t ${{ env.REGION }}-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.IMAGE_NAME }}:${{ github.sha }} -f autogpt_platform/backend/Dockerfile .
|
||||
|
||||
- name: Push Docker image
|
||||
run: docker push ${{ env.REGION }}-docker.pkg.dev/${{ env.PROJECT_ID }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
|
||||
@@ -1,20 +1,20 @@
|
||||
name: AutoGPT Builder Infra
|
||||
name: AutoGPT Platform - Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
- '.github/workflows/platform-autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-infra-ci.yml'
|
||||
- 'rnd/infra/**'
|
||||
- '.github/workflows/platform-autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/infra
|
||||
working-directory: autogpt_platform/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
@@ -53,4 +53,4 @@ jobs:
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
@@ -1,25 +1,25 @@
|
||||
name: AutoGPT Server CI
|
||||
name: AutoGPT Platform - Backend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, development, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
pull_request:
|
||||
branches: [master, development, release-*]
|
||||
paths:
|
||||
- ".github/workflows/autogpt-server-ci.yml"
|
||||
- "rnd/autogpt_server/**"
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_server
|
||||
working-directory: autogpt_platform/backend
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/backend/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
@@ -152,4 +152,4 @@ jobs:
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: autogpt-server,${{ runner.os }}
|
||||
# flags: backend,${{ runner.os }}
|
||||
@@ -1,20 +1,20 @@
|
||||
name: AutoGPT Builder CI
|
||||
name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
- '.github/workflows/platform-frontend-ci.yml'
|
||||
- 'autogpt_platform/frontend/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/autogpt-builder-ci.yml'
|
||||
- 'rnd/autogpt_builder/**'
|
||||
- '.github/workflows/platform-frontend-ci.yml'
|
||||
- 'autogpt_platform/frontend/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: rnd/autogpt_builder
|
||||
working-directory: autogpt_platform/frontend
|
||||
|
||||
jobs:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: 'Close stale issues'
|
||||
name: Repo - Close stale issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
@@ -1,12 +1,12 @@
|
||||
name: "Pull Request auto-label"
|
||||
name: Repo - Pull Request auto-label
|
||||
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, development, release-* ]
|
||||
paths-ignore:
|
||||
- 'forge/tests/vcr_cassettes'
|
||||
- 'benchmark/reports/**'
|
||||
- 'classic/forge/tests/vcr_cassettes'
|
||||
- 'classic/benchmark/reports/**'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
2
.github/workflows/repo-stats.yml
vendored
2
.github/workflows/repo-stats.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: github-repo-stats
|
||||
name: Repo - Github Stats
|
||||
|
||||
on:
|
||||
schedule:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: PR Status Checker
|
||||
name: Repo - PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
@@ -26,6 +26,6 @@ jobs:
|
||||
echo "Current directory before running Python script:"
|
||||
pwd
|
||||
echo "Attempting to run Python script:"
|
||||
python check_actions_status.py
|
||||
python .github/workflows/scripts/check_actions_status.py
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,7 +1,7 @@
|
||||
## Original ignores
|
||||
.github_access_token
|
||||
autogpt/keys.py
|
||||
autogpt/*.json
|
||||
classic/original_autogpt/keys.py
|
||||
classic/original_autogpt/*.json
|
||||
auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
@@ -157,7 +157,7 @@ openai/
|
||||
CURRENT_BULLETIN.md
|
||||
|
||||
# AgBenchmark
|
||||
agbenchmark/reports/
|
||||
classic/benchmark/agbenchmark/reports/
|
||||
|
||||
# Nodejs
|
||||
package-lock.json
|
||||
@@ -170,4 +170,4 @@ pri*
|
||||
ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
rnd/autogpt_server/settings.py
|
||||
autogpt_platform/backend/settings.py
|
||||
|
||||
7
.gitmodules
vendored
7
.gitmodules
vendored
@@ -1,3 +1,6 @@
|
||||
[submodule "forge/tests/vcr_cassettes"]
|
||||
path = forge/tests/vcr_cassettes
|
||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||
path = classic/forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
[submodule "autogpt_platform/supabase"]
|
||||
path = autogpt_platform/supabase
|
||||
url = https://github.com/supabase/supabase.git
|
||||
|
||||
@@ -16,22 +16,22 @@ repos:
|
||||
hooks:
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C autogpt run isort
|
||||
files: ^autogpt/
|
||||
entry: poetry -C classic/original_autogpt run isort
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C forge run isort
|
||||
files: ^forge/
|
||||
entry: poetry -C classic/forge run isort
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C benchmark run isort
|
||||
files: ^benchmark/
|
||||
entry: poetry -C classic/benchmark run isort
|
||||
files: ^classic/benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
@@ -52,20 +52,20 @@ repos:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
files: ^autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=autogpt/.flake8]
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
files: ^forge/(forge|tests)/
|
||||
args: [--config=forge/.flake8]
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=benchmark/.flake8]
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
# To have watertight type checking, we check *all* the files in an affected
|
||||
@@ -74,10 +74,10 @@ repos:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C autogpt run pyright
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
args: [-p, autogpt, autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
@@ -85,9 +85,9 @@ repos:
|
||||
- id: pyright
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
entry: poetry -C forge run pyright
|
||||
entry: poetry -C classic/forge run pyright
|
||||
args: [-p, forge, forge]
|
||||
files: ^forge/(forge/|poetry\.lock$)
|
||||
files: ^classic/forge/(classic/forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
@@ -95,9 +95,9 @@ repos:
|
||||
- id: pyright
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
entry: poetry -C benchmark run pyright
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
@@ -106,22 +106,22 @@ repos:
|
||||
hooks:
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^forge/(forge/|tests/|poetry\.lock$)
|
||||
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^classic/forge/(classic/forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
66
.vscode/all-projects.code-workspace
vendored
66
.vscode/all-projects.code-workspace
vendored
@@ -1,49 +1,49 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"name": "autogpt",
|
||||
"path": "../autogpt"
|
||||
"name": "autogpt_server",
|
||||
"path": "../autogpt_platform/autogpt_server"
|
||||
},
|
||||
{
|
||||
"name": "benchmark",
|
||||
"path": "../benchmark"
|
||||
"name": "autogpt_builder",
|
||||
"path": "../autogpt_platform/autogpt_builder"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
"path": "../autogpt_platform/market"
|
||||
},
|
||||
{
|
||||
"name": "lib",
|
||||
"path": "../autogpt_platform/autogpt_libs"
|
||||
},
|
||||
{
|
||||
"name": "infra",
|
||||
"path": "../autogpt_platform/infra"
|
||||
},
|
||||
{
|
||||
"name": "docs",
|
||||
"path": "../docs"
|
||||
},
|
||||
{
|
||||
"name": "forge",
|
||||
"path": "../forge"
|
||||
},
|
||||
{
|
||||
"name": "frontend",
|
||||
"path": "../frontend"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_server",
|
||||
"path": "../rnd/autogpt_server"
|
||||
},
|
||||
{
|
||||
"name": "autogpt_builder",
|
||||
"path": "../rnd/autogpt_builder"
|
||||
},
|
||||
{
|
||||
"name": "market",
|
||||
"path": "../rnd/market"
|
||||
},
|
||||
{
|
||||
"name": "lib",
|
||||
"path": "../rnd/autogpt_libs"
|
||||
},
|
||||
{
|
||||
"name": "infra",
|
||||
"path": "../rnd/infra"
|
||||
},
|
||||
{
|
||||
"name": "[root]",
|
||||
"path": ".."
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "classic - autogpt",
|
||||
"path": "../classic/original_autogpt"
|
||||
},
|
||||
{
|
||||
"name": "classic - benchmark",
|
||||
"path": "../classic/benchmark"
|
||||
},
|
||||
{
|
||||
"name": "classic - forge",
|
||||
"path": "../classic/forge"
|
||||
},
|
||||
{
|
||||
"name": "classic - frontend",
|
||||
"path": "../classic/frontend"
|
||||
},
|
||||
],
|
||||
"settings": {
|
||||
"python.analysis.typeCheckingMode": "basic"
|
||||
|
||||
11
README.md
11
README.md
@@ -55,15 +55,16 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
|
||||
## 🤖 AutoGPT Classic
|
||||
> Below is information about the classic version of AutoGPT.
|
||||
|
||||
**🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
|
||||
**🛠️ [Build your own Agent - Quickstart](classic/FORGE-QUICKSTART.md)**
|
||||
|
||||
### 🏗️ Forge
|
||||
|
||||
**Forge your own agent!** – Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
|
||||
**Forge your own agent!** – Forge is a ready-to-go toolkit to build your own agent application. It handles most of the boilerplate code, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from [`forge`](/classic/forge/) can also be used individually to speed up development and reduce boilerplate in your agent project.
|
||||
|
||||
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/tutorials/001_getting_started.md) –
|
||||
🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/classic/forge/tutorials/001_getting_started.md) –
|
||||
This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/forge) about Forge
|
||||
|
||||
### 🎯 Benchmark
|
||||
|
||||
@@ -83,7 +84,7 @@ This guide will walk you through the process of creating your own agent and usin
|
||||
|
||||
The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
|
||||
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/frontend) about the Frontend
|
||||
|
||||
### ⌨️ CLI
|
||||
|
||||
|
||||
3
autogpt/.vscode/settings.json
vendored
3
autogpt/.vscode/settings.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"python.analysis.typeCheckingMode": "basic",
|
||||
}
|
||||
133
autogpt_platform/README.md
Normal file
133
autogpt_platform/README.md
Normal file
@@ -0,0 +1,133 @@
|
||||
# AutoGPT Platform
|
||||
|
||||
Welcome to the AutoGPT Platform - a powerful system for creating and running AI agents to solve business problems. This platform enables you to harness the power of artificial intelligence to automate tasks, analyze data, and generate insights for your organization.
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Docker
|
||||
- Docker Compose V2 (comes with Docker Desktop, or can be installed separately)
|
||||
|
||||
### Running the System
|
||||
|
||||
To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
1. Clone this repository to your local machine.
|
||||
2. Navigate to autogpt_platform/supabase
|
||||
3. Run the following command:
|
||||
```
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
4. Navigate back to autogpt_platform (cd ..)
|
||||
5. Run the following command:
|
||||
```
|
||||
cp supabase/docker/.env.example .env
|
||||
```
|
||||
6. Run the following command:
|
||||
|
||||
```
|
||||
docker compose -f docker-compose.combined.yml up -d
|
||||
|
||||
```
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.combined.yml` file in detached mode.
|
||||
7. Navigate to autogpt_platform/frontend.
|
||||
8. Run the following command:
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
9. Run the following command:
|
||||
```
|
||||
yarn dev
|
||||
```
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
Here are some useful Docker Compose commands for managing your AutoGPT Platform:
|
||||
|
||||
- `docker compose -f docker-compose.combined.yml up -d`: Start the services in detached mode.
|
||||
- `docker compose -f docker-compose.combined.yml stop`: Stop the running services without removing them.
|
||||
- `docker compose rm`: Remove stopped service containers.
|
||||
- `docker compose build`: Build or rebuild services.
|
||||
- `docker compose down`: Stop and remove containers, networks, and volumes.
|
||||
- `docker compose watch`: Watch for changes in your services and automatically update them.
|
||||
|
||||
|
||||
### Sample Scenarios
|
||||
|
||||
Here are some common scenarios where you might use multiple Docker Compose commands:
|
||||
|
||||
1. Updating and restarting a specific service:
|
||||
```
|
||||
docker compose build api_srv
|
||||
docker compose up -d --no-deps api_srv
|
||||
```
|
||||
This rebuilds the `api_srv` service and restarts it without affecting other services.
|
||||
|
||||
2. Viewing logs for troubleshooting:
|
||||
```
|
||||
docker compose logs -f api_srv ws_srv
|
||||
```
|
||||
This shows and follows the logs for both `api_srv` and `ws_srv` services.
|
||||
|
||||
3. Scaling a service for increased load:
|
||||
```
|
||||
docker compose up -d --scale executor=3
|
||||
```
|
||||
This scales the `executor` service to 3 instances to handle increased load.
|
||||
|
||||
4. Stopping the entire system for maintenance:
|
||||
```
|
||||
docker compose stop
|
||||
docker compose rm -f
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
This stops all services, removes containers, pulls the latest images, and restarts the system.
|
||||
|
||||
5. Developing with live updates:
|
||||
```
|
||||
docker compose watch
|
||||
```
|
||||
This watches for changes in your code and automatically updates the relevant services.
|
||||
|
||||
6. Checking the status of services:
|
||||
```
|
||||
docker compose ps
|
||||
```
|
||||
This shows the current status of all services defined in your docker-compose.yml file.
|
||||
|
||||
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
|
||||
|
||||
|
||||
### Persisting Data
|
||||
|
||||
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
|
||||
|
||||
1. Open the `docker-compose.yml` file in a text editor.
|
||||
2. Add volume configurations for PostgreSQL and Redis services:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
postgres:
|
||||
# ... other configurations ...
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
|
||||
redis:
|
||||
# ... other configurations ...
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
redis_data:
|
||||
```
|
||||
|
||||
3. Save the file and run `docker compose up -d` to apply the changes.
|
||||
|
||||
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import pytest
|
||||
|
||||
from .depends import verify_user, requires_admin_user, requires_user
|
||||
from .depends import requires_admin_user, requires_user, verify_user
|
||||
|
||||
|
||||
def test_verify_user_no_payload():
|
||||
166
autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py
Normal file
166
autogpt_platform/autogpt_libs/autogpt_libs/logging/config.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Logging module for Auto-GPT."""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import AGPTFormatter, StructuredLoggingFormatter
|
||||
|
||||
LOG_DIR = Path(__file__).parent.parent.parent.parent / "logs"
|
||||
LOG_FILE = "activity.log"
|
||||
DEBUG_LOG_FILE = "debug.log"
|
||||
ERROR_LOG_FILE = "error.log"
|
||||
|
||||
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s"
|
||||
|
||||
DEBUG_LOG_FORMAT = (
|
||||
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d" " %(title)s%(message)s"
|
||||
)
|
||||
|
||||
|
||||
class LoggingConfig(BaseSettings):
|
||||
|
||||
level: str = Field(
|
||||
default="INFO",
|
||||
description="Logging level",
|
||||
validation_alias="LOG_LEVEL",
|
||||
)
|
||||
|
||||
enable_cloud_logging: bool = Field(
|
||||
default=False,
|
||||
description="Enable logging to Google Cloud Logging",
|
||||
)
|
||||
|
||||
enable_file_logging: bool = Field(
|
||||
default=False,
|
||||
description="Enable logging to file",
|
||||
)
|
||||
# File output
|
||||
log_dir: Path = Field(
|
||||
default=LOG_DIR,
|
||||
description="Log directory",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
env_prefix="",
|
||||
env_file=".env",
|
||||
env_file_encoding="utf-8",
|
||||
extra="ignore",
|
||||
)
|
||||
|
||||
@field_validator("level", mode="before")
|
||||
@classmethod
|
||||
def parse_log_level(cls, v):
|
||||
if isinstance(v, str):
|
||||
v = v.upper()
|
||||
if v not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
|
||||
raise ValueError(f"Invalid log level: {v}")
|
||||
return v
|
||||
return v
|
||||
|
||||
|
||||
def configure_logging(force_cloud_logging: bool = False) -> None:
|
||||
"""Configure the native logging module based on the LoggingConfig settings.
|
||||
|
||||
This function sets up logging handlers and formatters according to the
|
||||
configuration specified in the LoggingConfig object. It supports various
|
||||
logging outputs including console, file, cloud, and JSON logging.
|
||||
|
||||
The function uses the LoggingConfig object to determine which logging
|
||||
features to enable and how to configure them. This includes setting
|
||||
log levels, log formats, and output destinations.
|
||||
|
||||
No arguments are required as the function creates its own LoggingConfig
|
||||
instance internally.
|
||||
|
||||
Note: This function is typically called at the start of the application
|
||||
to set up the logging infrastructure.
|
||||
"""
|
||||
|
||||
config = LoggingConfig()
|
||||
|
||||
log_handlers: list[logging.Handler] = []
|
||||
|
||||
# Cloud logging setup
|
||||
if config.enable_cloud_logging or force_cloud_logging:
|
||||
import google.cloud.logging
|
||||
from google.cloud.logging.handlers import CloudLoggingHandler
|
||||
from google.cloud.logging_v2.handlers.transports.sync import SyncTransport
|
||||
|
||||
client = google.cloud.logging.Client()
|
||||
cloud_handler = CloudLoggingHandler(
|
||||
client,
|
||||
name="autogpt_logs",
|
||||
transport=SyncTransport,
|
||||
)
|
||||
cloud_handler.setLevel(config.level)
|
||||
cloud_handler.setFormatter(StructuredLoggingFormatter())
|
||||
log_handlers.append(cloud_handler)
|
||||
print("Cloud logging enabled")
|
||||
else:
|
||||
# Console output handlers
|
||||
stdout = logging.StreamHandler(stream=sys.stdout)
|
||||
stdout.setLevel(config.level)
|
||||
stdout.addFilter(BelowLevelFilter(logging.WARNING))
|
||||
if config.level == logging.DEBUG:
|
||||
stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
|
||||
stderr = logging.StreamHandler()
|
||||
stderr.setLevel(logging.WARNING)
|
||||
if config.level == logging.DEBUG:
|
||||
stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
|
||||
else:
|
||||
stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
|
||||
|
||||
log_handlers += [stdout, stderr]
|
||||
print("Console logging enabled")
|
||||
|
||||
# File logging setup
|
||||
if config.enable_file_logging:
|
||||
# create log directory if it doesn't exist
|
||||
if not config.log_dir.exists():
|
||||
config.log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print(f"Log directory: {config.log_dir}")
|
||||
|
||||
# Activity log handler (INFO and above)
|
||||
activity_log_handler = logging.FileHandler(
|
||||
config.log_dir / LOG_FILE, "a", "utf-8"
|
||||
)
|
||||
activity_log_handler.setLevel(config.level)
|
||||
activity_log_handler.setFormatter(
|
||||
AGPTFormatter(SIMPLE_LOG_FORMAT, no_color=True)
|
||||
)
|
||||
log_handlers.append(activity_log_handler)
|
||||
|
||||
if config.level == logging.DEBUG:
|
||||
# Debug log handler (all levels)
|
||||
debug_log_handler = logging.FileHandler(
|
||||
config.log_dir / DEBUG_LOG_FILE, "a", "utf-8"
|
||||
)
|
||||
debug_log_handler.setLevel(logging.DEBUG)
|
||||
debug_log_handler.setFormatter(
|
||||
AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True)
|
||||
)
|
||||
log_handlers.append(debug_log_handler)
|
||||
|
||||
# Error log handler (ERROR and above)
|
||||
error_log_handler = logging.FileHandler(
|
||||
config.log_dir / ERROR_LOG_FILE, "a", "utf-8"
|
||||
)
|
||||
error_log_handler.setLevel(logging.ERROR)
|
||||
error_log_handler.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True))
|
||||
log_handlers.append(error_log_handler)
|
||||
print("File logging enabled")
|
||||
|
||||
# Configure the root logger
|
||||
logging.basicConfig(
|
||||
format=DEBUG_LOG_FORMAT if config.level == logging.DEBUG else SIMPLE_LOG_FORMAT,
|
||||
level=config.level,
|
||||
handlers=log_handlers,
|
||||
)
|
||||
@@ -0,0 +1,95 @@
|
||||
import logging
|
||||
|
||||
from colorama import Fore, Style
|
||||
from google.cloud.logging_v2.handlers import CloudLoggingFilter, StructuredLogHandler
|
||||
|
||||
from .utils import remove_color_codes
|
||||
|
||||
|
||||
class FancyConsoleFormatter(logging.Formatter):
|
||||
"""
|
||||
A custom logging formatter designed for console output.
|
||||
|
||||
This formatter enhances the standard logging output with color coding. The color
|
||||
coding is based on the level of the log message, making it easier to distinguish
|
||||
between different types of messages in the console output.
|
||||
|
||||
The color for each level is defined in the LEVEL_COLOR_MAP class attribute.
|
||||
"""
|
||||
|
||||
# level -> (level & text color, title color)
|
||||
LEVEL_COLOR_MAP = {
|
||||
logging.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
logging.INFO: Fore.BLUE,
|
||||
logging.WARNING: Fore.YELLOW,
|
||||
logging.ERROR: Fore.RED,
|
||||
logging.CRITICAL: Fore.RED + Style.BRIGHT,
|
||||
}
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
# Make sure `msg` is a string
|
||||
if not hasattr(record, "msg"):
|
||||
record.msg = ""
|
||||
elif type(record.msg) is not str:
|
||||
record.msg = str(record.msg)
|
||||
|
||||
# Determine default color based on error level
|
||||
level_color = ""
|
||||
if record.levelno in self.LEVEL_COLOR_MAP:
|
||||
level_color = self.LEVEL_COLOR_MAP[record.levelno]
|
||||
record.levelname = f"{level_color}{record.levelname}{Style.RESET_ALL}"
|
||||
|
||||
# Determine color for message
|
||||
color = getattr(record, "color", level_color)
|
||||
color_is_specified = hasattr(record, "color")
|
||||
|
||||
# Don't color INFO messages unless the color is explicitly specified.
|
||||
if color and (record.levelno != logging.INFO or color_is_specified):
|
||||
record.msg = f"{color}{record.msg}{Style.RESET_ALL}"
|
||||
|
||||
return super().format(record)
|
||||
|
||||
|
||||
class AGPTFormatter(FancyConsoleFormatter):
|
||||
def __init__(self, *args, no_color: bool = False, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.no_color = no_color
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
# Make sure `msg` is a string
|
||||
if not hasattr(record, "msg"):
|
||||
record.msg = ""
|
||||
elif type(record.msg) is not str:
|
||||
record.msg = str(record.msg)
|
||||
|
||||
# Strip color from the message to prevent color spoofing
|
||||
if record.msg and not getattr(record, "preserve_color", False):
|
||||
record.msg = remove_color_codes(record.msg)
|
||||
|
||||
# Determine color for title
|
||||
title = getattr(record, "title", "")
|
||||
title_color = getattr(record, "title_color", "") or self.LEVEL_COLOR_MAP.get(
|
||||
record.levelno, ""
|
||||
)
|
||||
if title and title_color:
|
||||
title = f"{title_color + Style.BRIGHT}{title}{Style.RESET_ALL}"
|
||||
# Make sure record.title is set, and padded with a space if not empty
|
||||
record.title = f"{title} " if title else ""
|
||||
|
||||
if self.no_color:
|
||||
return remove_color_codes(super().format(record))
|
||||
else:
|
||||
return super().format(record)
|
||||
|
||||
|
||||
class StructuredLoggingFormatter(StructuredLogHandler, logging.Formatter):
|
||||
def __init__(self):
|
||||
# Set up CloudLoggingFilter to add diagnostic info to the log records
|
||||
self.cloud_logging_filter = CloudLoggingFilter()
|
||||
|
||||
# Init StructuredLogHandler
|
||||
super().__init__()
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
self.cloud_logging_filter.filter(record)
|
||||
return super().format(record)
|
||||
@@ -0,0 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
|
||||
class JsonFileHandler(logging.FileHandler):
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
record.json_data = json.loads(record.getMessage())
|
||||
return json.dumps(getattr(record, "json_data"), ensure_ascii=False, indent=4)
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
with open(self.baseFilename, "w", encoding="utf-8") as f:
|
||||
f.write(self.format(record))
|
||||
27
autogpt_platform/autogpt_libs/autogpt_libs/logging/utils.py
Normal file
27
autogpt_platform/autogpt_libs/autogpt_libs/logging/utils.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
|
||||
def remove_color_codes(s: str) -> str:
|
||||
return re.sub(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])", "", s)
|
||||
|
||||
|
||||
def fmt_kwargs(kwargs: dict) -> str:
|
||||
return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items())
|
||||
|
||||
|
||||
def print_attribute(
|
||||
title: str, value: Any, title_color: str = Fore.GREEN, value_color: str = ""
|
||||
) -> None:
|
||||
logger = logging.getLogger()
|
||||
logger.info(
|
||||
str(value),
|
||||
extra={
|
||||
"title": f"{title.rstrip(':')}:",
|
||||
"title_color": title_color,
|
||||
"color": value_color,
|
||||
},
|
||||
)
|
||||
@@ -1,13 +1,21 @@
|
||||
import secrets
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import cast
|
||||
|
||||
from supabase import Client, create_client
|
||||
from supabase import Client
|
||||
|
||||
from .types import Credentials, OAuth2Credentials, UserMetadata, UserMetadataRaw
|
||||
from .types import (
|
||||
Credentials,
|
||||
OAuth2Credentials,
|
||||
OAuthState,
|
||||
UserMetadata,
|
||||
UserMetadataRaw,
|
||||
)
|
||||
|
||||
|
||||
class SupabaseIntegrationCredentialsStore:
|
||||
def __init__(self, url: str, key: str):
|
||||
self.supabase: Client = create_client(url, key)
|
||||
def __init__(self, supabase: Client):
|
||||
self.supabase = supabase
|
||||
|
||||
def add_creds(self, user_id: str, credentials: Credentials) -> None:
|
||||
if self.get_creds_by_id(user_id, credentials.id):
|
||||
@@ -73,6 +81,52 @@ class SupabaseIntegrationCredentialsStore:
|
||||
]
|
||||
self._set_user_integration_creds(user_id, filtered_credentials)
|
||||
|
||||
async def store_state_token(self, user_id: str, provider: str) -> str:
|
||||
token = secrets.token_urlsafe(32)
|
||||
expires_at = datetime.now(timezone.utc) + timedelta(minutes=10)
|
||||
|
||||
state = OAuthState(
|
||||
token=token, provider=provider, expires_at=int(expires_at.timestamp())
|
||||
)
|
||||
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
oauth_states = user_metadata.get("integration_oauth_states", [])
|
||||
oauth_states.append(state.model_dump())
|
||||
user_metadata["integration_oauth_states"] = oauth_states
|
||||
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": user_metadata}
|
||||
)
|
||||
|
||||
return token
|
||||
|
||||
async def verify_state_token(self, user_id: str, token: str, provider: str) -> bool:
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
oauth_states = user_metadata.get("integration_oauth_states", [])
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
valid_state = next(
|
||||
(
|
||||
state
|
||||
for state in oauth_states
|
||||
if state["token"] == token
|
||||
and state["provider"] == provider
|
||||
and state["expires_at"] > now.timestamp()
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if valid_state:
|
||||
# Remove the used state
|
||||
oauth_states.remove(valid_state)
|
||||
user_metadata["integration_oauth_states"] = oauth_states
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": user_metadata}
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _set_user_integration_creds(
|
||||
self, user_id: str, credentials: list[Credentials]
|
||||
) -> None:
|
||||
@@ -7,7 +7,7 @@ from pydantic import BaseModel, Field, SecretStr, field_serializer
|
||||
class _BaseCredentials(BaseModel):
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
provider: str
|
||||
title: str
|
||||
title: Optional[str]
|
||||
|
||||
@field_serializer("*")
|
||||
def dump_secret_strings(value: Any, _info):
|
||||
@@ -18,10 +18,14 @@ class _BaseCredentials(BaseModel):
|
||||
|
||||
class OAuth2Credentials(_BaseCredentials):
|
||||
type: Literal["oauth2"] = "oauth2"
|
||||
username: Optional[str]
|
||||
"""Username of the third-party service user that these credentials belong to"""
|
||||
access_token: SecretStr
|
||||
access_token_expires_at: Optional[int] # seconds
|
||||
access_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the access token expires (if at all)"""
|
||||
refresh_token: Optional[SecretStr]
|
||||
refresh_token_expires_at: Optional[int] # seconds
|
||||
refresh_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the refresh token expires (if at all)"""
|
||||
scopes: list[str]
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
@@ -29,7 +33,8 @@ class OAuth2Credentials(_BaseCredentials):
|
||||
class APIKeyCredentials(_BaseCredentials):
|
||||
type: Literal["api_key"] = "api_key"
|
||||
api_key: SecretStr
|
||||
expires_at: Optional[int] # seconds
|
||||
expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the API key expires (if at all)"""
|
||||
|
||||
|
||||
Credentials = Annotated[
|
||||
@@ -38,9 +43,18 @@ Credentials = Annotated[
|
||||
]
|
||||
|
||||
|
||||
class OAuthState(BaseModel):
|
||||
token: str
|
||||
provider: str
|
||||
expires_at: int
|
||||
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
|
||||
|
||||
|
||||
class UserMetadata(BaseModel):
|
||||
integration_credentials: list[Credentials] = Field(default_factory=list)
|
||||
integration_oauth_states: list[OAuthState] = Field(default_factory=list)
|
||||
|
||||
|
||||
class UserMetadataRaw(TypedDict, total=False):
|
||||
integration_credentials: list[dict]
|
||||
integration_oauth_states: list[dict]
|
||||
1693
autogpt_platform/autogpt_libs/poetry.lock
generated
Normal file
1693
autogpt_platform/autogpt_libs/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,19 +1,21 @@
|
||||
[tool.poetry]
|
||||
name = "autogpt-libs"
|
||||
version = "0.1.0"
|
||||
version = "0.2.0"
|
||||
description = "Shared libraries across NextGen AutoGPT"
|
||||
authors = ["Aarushi <aarushik93@gmail.com>"]
|
||||
readme = "README.md"
|
||||
packages = [{ include = "autogpt_libs" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<4.0"
|
||||
colorama = "^0.4.6"
|
||||
google-cloud-logging = "^3.8.0"
|
||||
pydantic = "^2.8.2"
|
||||
pydantic-settings = "^2.5.2"
|
||||
pyjwt = "^2.8.0"
|
||||
python = ">=3.10,<4.0"
|
||||
python-dotenv = "^1.0.1"
|
||||
supabase = "^2.7.2"
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
@@ -1,7 +1,7 @@
|
||||
DB_USER=agpt_user
|
||||
DB_PASS=pass123
|
||||
DB_NAME=agpt_local
|
||||
DB_PORT=5432
|
||||
DB_PORT=5433
|
||||
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}"
|
||||
PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
|
||||
@@ -9,9 +9,13 @@ REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=password
|
||||
|
||||
AUTH_ENABLED=false
|
||||
ENABLE_AUTH=false
|
||||
ENABLE_CREDIT=false
|
||||
APP_ENV="local"
|
||||
PYRO_HOST=localhost
|
||||
SENTRY_DSN=
|
||||
# This is needed when ENABLE_AUTH is true
|
||||
SUPABASE_JWT_SECRET=
|
||||
|
||||
## ===== OPTIONAL API KEYS ===== ##
|
||||
|
||||
@@ -50,3 +54,11 @@ SMTP_PASSWORD=
|
||||
# Medium
|
||||
MEDIUM_API_KEY=
|
||||
MEDIUM_AUTHOR_ID=
|
||||
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_CLOUD_LOGGING=false
|
||||
ENABLE_FILE_LOGGING=false
|
||||
# Use to manually set the log directory
|
||||
# LOG_DIR=./logs
|
||||
78
autogpt_platform/backend/Dockerfile
Normal file
78
autogpt_platform/backend/Dockerfile
Normal file
@@ -0,0 +1,78 @@
|
||||
FROM python:3.11-slim-buster AS builder
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev postgresql-client git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV POETRY_VERSION=1.8.3 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
POETRY_VIRTUALENVS_CREATE=false \
|
||||
PATH="$POETRY_HOME/bin:$PATH"
|
||||
|
||||
# Upgrade pip and setuptools to fix security vulnerabilities
|
||||
RUN pip3 install --upgrade pip setuptools
|
||||
|
||||
RUN pip3 install poetry
|
||||
|
||||
# Copy and install dependencies
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& poetry install --no-interaction --no-ansi
|
||||
|
||||
# Generate Prisma client
|
||||
COPY autogpt_platform/backend/schema.prisma ./
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& poetry run prisma generate
|
||||
|
||||
FROM python:3.11-slim-buster AS server_dependencies
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ENV POETRY_VERSION=1.8.3 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
POETRY_VIRTUALENVS_CREATE=false \
|
||||
PATH="$POETRY_HOME/bin:$PATH"
|
||||
|
||||
|
||||
# Upgrade pip and setuptools to fix security vulnerabilities
|
||||
RUN pip3 install --upgrade pip setuptools
|
||||
|
||||
# Copy only necessary files from builder
|
||||
COPY --from=builder /app /app
|
||||
COPY --from=builder /usr/local/lib/python3.11 /usr/local/lib/python3.11
|
||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||
# Copy Prisma binaries
|
||||
COPY --from=builder /root/.cache/prisma-python/binaries /root/.cache/prisma-python/binaries
|
||||
|
||||
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
|
||||
RUN mkdir -p /app/autogpt_platform/autogpt_libs
|
||||
RUN mkdir -p /app/autogpt_platform/backend
|
||||
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
|
||||
FROM server_dependencies AS server
|
||||
|
||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||
|
||||
ENV DATABASE_URL=""
|
||||
ENV PORT=8000
|
||||
|
||||
CMD ["poetry", "run", "rest"]
|
||||
@@ -48,19 +48,19 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
|
||||
> ```
|
||||
>
|
||||
> Then run the generation again. The path *should* look something like this:
|
||||
> `<some path>/pypoetry/virtualenvs/autogpt-server-TQIRSwR6-py3.12/bin/prisma`
|
||||
> `<some path>/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma`
|
||||
|
||||
6. Run the postgres database from the /rnd folder
|
||||
|
||||
```sh
|
||||
cd rnd/
|
||||
cd autogpt_platform/
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
7. Run the migrations (from the autogpt_server folder)
|
||||
7. Run the migrations (from the backend folder)
|
||||
|
||||
```sh
|
||||
cd ../autogpt_server
|
||||
cd ../backend
|
||||
prisma migrate dev --schema postgres/schema.prisma
|
||||
```
|
||||
|
||||
@@ -53,7 +53,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
|
||||
> ```
|
||||
>
|
||||
> Then run the generation again. The path *should* look something like this:
|
||||
> `<some path>/pypoetry/virtualenvs/autogpt-server-TQIRSwR6-py3.12/bin/prisma`
|
||||
> `<some path>/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma`
|
||||
|
||||
6. Migrate the database. Be careful because this deletes current data in the database.
|
||||
|
||||
@@ -101,7 +101,7 @@ docker compose down
|
||||
If you run into issues with dangling orphans, try:
|
||||
|
||||
```sh
|
||||
docker-compose down --volumes --remove-orphans && docker-compose up --force-recreate --renew-anon-volumes --remove-orphans
|
||||
docker compose down --volumes --remove-orphans && docker-compose up --force-recreate --renew-anon-volumes --remove-orphans
|
||||
```
|
||||
|
||||
## Testing
|
||||
@@ -183,10 +183,17 @@ A communication layer (`service.py`) is created to decouple the communication li
|
||||
|
||||
Currently, the IPC is done using Pyro5 and abstracted in a way that allows a function decorated with `@expose` to be called from a different process.
|
||||
|
||||
|
||||
By default the daemons run on the following ports:
|
||||
|
||||
Execution Manager Daemon: 8002
|
||||
Execution Scheduler Daemon: 8003
|
||||
Rest Server Daemon: 8004
|
||||
|
||||
## Adding a New Agent Block
|
||||
|
||||
To add a new agent block, you need to create a new class that inherits from `Block` and provides the following information:
|
||||
* All the block code should live in the `blocks` (`autogpt_server.blocks`) module.
|
||||
* All the block code should live in the `blocks` (`backend.blocks`) module.
|
||||
* `input_schema`: the schema of the input data, represented by a Pydantic object.
|
||||
* `output_schema`: the schema of the output data, represented by a Pydantic object.
|
||||
* `run` method: the main logic of the block.
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt_server.util.process import AppProcess
|
||||
from backend.util.process import AppProcess
|
||||
|
||||
|
||||
def run_processes(*processes: "AppProcess", **kwargs):
|
||||
@@ -24,12 +24,10 @@ def main(**kwargs):
|
||||
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
|
||||
"""
|
||||
|
||||
from autogpt_server.executor import ExecutionManager, ExecutionScheduler
|
||||
from autogpt_server.server import AgentServer, WebsocketServer
|
||||
from autogpt_server.util.service import PyroNameServer
|
||||
from backend.executor import ExecutionManager, ExecutionScheduler
|
||||
from backend.server import AgentServer, WebsocketServer
|
||||
|
||||
run_processes(
|
||||
PyroNameServer(),
|
||||
ExecutionManager(),
|
||||
ExecutionScheduler(),
|
||||
WebsocketServer(),
|
||||
@@ -4,9 +4,9 @@ import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
from autogpt_server.data.block import Block
|
||||
from backend.data.block import Block
|
||||
|
||||
# Dynamically load all modules under autogpt_server.blocks
|
||||
# Dynamically load all modules under backend.blocks
|
||||
AVAILABLE_MODULES = []
|
||||
current_dir = os.path.dirname(__file__)
|
||||
modules = glob.glob(os.path.join(current_dir, "*.py"))
|
||||
@@ -55,15 +55,15 @@ for cls in all_subclasses(Block):
|
||||
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
|
||||
|
||||
# Prevent duplicate field name in input_schema and output_schema
|
||||
duplicate_field_names = set(block.input_schema.__fields__.keys()) & set(
|
||||
block.output_schema.__fields__.keys()
|
||||
duplicate_field_names = set(block.input_schema.model_fields.keys()) & set(
|
||||
block.output_schema.model_fields.keys()
|
||||
)
|
||||
if duplicate_field_names:
|
||||
raise ValueError(
|
||||
f"{block.name} has duplicate field names in input_schema and output_schema: {duplicate_field_names}"
|
||||
)
|
||||
|
||||
for field in block.input_schema.__fields__.values():
|
||||
for field in block.input_schema.model_fields.values():
|
||||
if field.annotation is bool and field.default not in (True, False):
|
||||
raise ValueError(f"{block.name} has a boolean field with no default value")
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
import re
|
||||
from typing import Any, List
|
||||
|
||||
from jinja2 import BaseLoader, Environment
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt_server.data.block import (
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
BlockUIType,
|
||||
)
|
||||
from autogpt_server.data.model import SchemaField
|
||||
from autogpt_server.util.mock import MockObject
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.mock import MockObject
|
||||
|
||||
jinja = Environment(loader=BaseLoader())
|
||||
|
||||
|
||||
class StoreValueBlock(Block):
|
||||
@@ -81,7 +85,6 @@ class PrintToConsoleBlock(Block):
|
||||
|
||||
|
||||
class FindInDictionaryBlock(Block):
|
||||
|
||||
class Input(BlockSchema):
|
||||
input: Any = Field(description="Dictionary to lookup from")
|
||||
key: str | int = Field(description="Key to lookup in the dictionary")
|
||||
@@ -136,7 +139,7 @@ class FindInDictionaryBlock(Block):
|
||||
yield "missing", input_data.input
|
||||
|
||||
|
||||
class InputBlock(Block):
|
||||
class AgentInputBlock(Block):
|
||||
"""
|
||||
This block is used to provide input to the graph.
|
||||
|
||||
@@ -148,13 +151,20 @@ class InputBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
value: Any = SchemaField(description="The value to be passed as input.")
|
||||
name: str = SchemaField(description="The name of the input.")
|
||||
description: str = SchemaField(description="The description of the input.")
|
||||
description: str = SchemaField(
|
||||
description="The description of the input.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
placeholder_values: List[Any] = SchemaField(
|
||||
description="The placeholder values to be passed as input."
|
||||
description="The placeholder values to be passed as input.",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
limit_to_placeholder_values: bool = SchemaField(
|
||||
description="Whether to limit the selection to placeholder values.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -164,8 +174,8 @@ class InputBlock(Block):
|
||||
super().__init__(
|
||||
id="c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
description="This block is used to provide input to the graph.",
|
||||
input_schema=InputBlock.Input,
|
||||
output_schema=InputBlock.Output,
|
||||
input_schema=AgentInputBlock.Input,
|
||||
output_schema=AgentInputBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"value": "Hello, World!",
|
||||
@@ -194,7 +204,7 @@ class InputBlock(Block):
|
||||
yield "result", input_data.value
|
||||
|
||||
|
||||
class OutputBlock(Block):
|
||||
class AgentOutputBlock(Block):
|
||||
"""
|
||||
Records the output of the graph for users to see.
|
||||
|
||||
@@ -215,13 +225,17 @@ class OutputBlock(Block):
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
recorded_value: Any = SchemaField(
|
||||
description="The value to be recorded as output."
|
||||
)
|
||||
value: Any = SchemaField(description="The value to be recorded as output.")
|
||||
name: str = SchemaField(description="The name of the output.")
|
||||
description: str = SchemaField(description="The description of the output.")
|
||||
fmt_string: str = SchemaField(
|
||||
description="The format string to be used to format the recorded_value."
|
||||
description: str = SchemaField(
|
||||
description="The description of the output.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
format: str = SchemaField(
|
||||
description="The format string to be used to format the recorded_value.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -238,31 +252,31 @@ class OutputBlock(Block):
|
||||
"This block is key for capturing and presenting final results or "
|
||||
"important intermediate outputs of the graph execution."
|
||||
),
|
||||
input_schema=OutputBlock.Input,
|
||||
output_schema=OutputBlock.Output,
|
||||
input_schema=AgentOutputBlock.Input,
|
||||
output_schema=AgentOutputBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"recorded_value": "Hello, World!",
|
||||
"value": "Hello, World!",
|
||||
"name": "output_1",
|
||||
"description": "This is a test output.",
|
||||
"fmt_string": "{value}",
|
||||
"format": "{{ output_1 }}!!",
|
||||
},
|
||||
{
|
||||
"recorded_value": 42,
|
||||
"value": "42",
|
||||
"name": "output_2",
|
||||
"description": "This is another test output.",
|
||||
"fmt_string": "{value}",
|
||||
"format": "{{ output_2 }}",
|
||||
},
|
||||
{
|
||||
"recorded_value": MockObject(value="!!", key="key"),
|
||||
"value": MockObject(value="!!", key="key"),
|
||||
"name": "output_3",
|
||||
"description": "This is a test output with a mock object.",
|
||||
"fmt_string": "{value}",
|
||||
"format": "{{ output_3 }}",
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
("output", "Hello, World!"),
|
||||
("output", 42),
|
||||
("output", "Hello, World!!!"),
|
||||
("output", "42"),
|
||||
("output", MockObject(value="!!", key="key")),
|
||||
],
|
||||
categories={BlockCategory.OUTPUT, BlockCategory.BASIC},
|
||||
@@ -274,13 +288,15 @@ class OutputBlock(Block):
|
||||
Attempts to format the recorded_value using the fmt_string if provided.
|
||||
If formatting fails or no fmt_string is given, returns the original recorded_value.
|
||||
"""
|
||||
if input_data.fmt_string:
|
||||
if input_data.format:
|
||||
try:
|
||||
yield "output", input_data.fmt_string.format(input_data.recorded_value)
|
||||
except Exception:
|
||||
yield "output", input_data.recorded_value
|
||||
fmt = re.sub(r"(?<!{){[ a-zA-Z0-9_]+}", r"{\g<0>}", input_data.format)
|
||||
template = jinja.from_string(fmt)
|
||||
yield "output", template.render({input_data.name: input_data.value})
|
||||
except Exception as e:
|
||||
yield "output", f"Error: {e}, {input_data.value}"
|
||||
else:
|
||||
yield "output", input_data.recorded_value
|
||||
yield "output", input_data.value
|
||||
|
||||
|
||||
class AddToDictionaryBlock(Block):
|
||||
@@ -422,7 +438,8 @@ class NoteBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(description="The text to display in the sticky note.")
|
||||
|
||||
class Output(BlockSchema): ...
|
||||
class Output(BlockSchema):
|
||||
output: str = SchemaField(description="The text to display in the sticky note.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -432,8 +449,11 @@ class NoteBlock(Block):
|
||||
input_schema=NoteBlock.Input,
|
||||
output_schema=NoteBlock.Output,
|
||||
test_input={"text": "Hello, World!"},
|
||||
test_output=None,
|
||||
test_output=[
|
||||
("output", "Hello, World!"),
|
||||
],
|
||||
ui_type=BlockUIType.NOTE,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput: ...
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
yield "output", input_data.text
|
||||
@@ -2,7 +2,7 @@ import os
|
||||
import re
|
||||
from typing import Type
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
|
||||
|
||||
class BlockInstallationBlock(Block):
|
||||
@@ -48,7 +48,7 @@ class BlockInstallationBlock(Block):
|
||||
|
||||
block_dir = os.path.dirname(__file__)
|
||||
file_path = f"{block_dir}/{file_name}.py"
|
||||
module_name = f"autogpt_server.blocks.{file_name}"
|
||||
module_name = f"backend.blocks.{file_name}"
|
||||
with open(file_path, "w") as f:
|
||||
f.write(code)
|
||||
|
||||
@@ -57,7 +57,7 @@ class BlockInstallationBlock(Block):
|
||||
block_class: Type[Block] = getattr(module, class_name)
|
||||
block = block_class()
|
||||
|
||||
from autogpt_server.util.test import execute_block_test
|
||||
from backend.util.test import execute_block_test
|
||||
|
||||
execute_block_test(block)
|
||||
yield "success", "Block installed successfully."
|
||||
@@ -1,8 +1,8 @@
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import SchemaField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class ComparisonOperator(Enum):
|
||||
@@ -1,5 +1,5 @@
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import ContributorDetails
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import ContributorDetails
|
||||
|
||||
|
||||
class ReadCsvBlock(Block):
|
||||
@@ -14,7 +14,8 @@ class ReadCsvBlock(Block):
|
||||
skip_columns: list[str] = []
|
||||
|
||||
class Output(BlockSchema):
|
||||
data: dict[str, str]
|
||||
row: dict[str, str]
|
||||
all_data: list[dict[str, str]]
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -27,8 +28,15 @@ class ReadCsvBlock(Block):
|
||||
"contents": "a, b, c\n1,2,3\n4,5,6",
|
||||
},
|
||||
test_output=[
|
||||
("data", {"a": "1", "b": "2", "c": "3"}),
|
||||
("data", {"a": "4", "b": "5", "c": "6"}),
|
||||
("row", {"a": "1", "b": "2", "c": "3"}),
|
||||
("row", {"a": "4", "b": "5", "c": "6"}),
|
||||
(
|
||||
"all_data",
|
||||
[
|
||||
{"a": "1", "b": "2", "c": "3"},
|
||||
{"a": "4", "b": "5", "c": "6"},
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,8 +61,7 @@ class ReadCsvBlock(Block):
|
||||
for _ in range(input_data.skip_rows):
|
||||
next(reader)
|
||||
|
||||
# join the data with the header
|
||||
for row in reader:
|
||||
def process_row(row):
|
||||
data = {}
|
||||
for i, value in enumerate(row):
|
||||
if i not in input_data.skip_columns:
|
||||
@@ -62,4 +69,12 @@ class ReadCsvBlock(Block):
|
||||
data[header[i]] = value.strip() if input_data.strip else value
|
||||
else:
|
||||
data[str(i)] = value.strip() if input_data.strip else value
|
||||
yield "data", data
|
||||
return data
|
||||
|
||||
all_data = []
|
||||
for row in reader:
|
||||
processed_row = process_row(row)
|
||||
all_data.append(processed_row)
|
||||
yield "row", processed_row
|
||||
|
||||
yield "all_data", all_data
|
||||
@@ -4,8 +4,8 @@ import aiohttp
|
||||
import discord
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SecretField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SecretField
|
||||
|
||||
|
||||
class ReadDiscordMessagesBlock(Block):
|
||||
@@ -4,8 +4,8 @@ from email.mime.text import MIMEText
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
|
||||
|
||||
class EmailCredentials(BaseModel):
|
||||
@@ -3,7 +3,7 @@ from enum import Enum
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
|
||||
|
||||
class HttpMethod(Enum):
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Any, List, Tuple
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import SchemaField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class ListIteratorBlock(Block):
|
||||
@@ -1,15 +1,16 @@
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import List, NamedTuple
|
||||
from json import JSONDecodeError
|
||||
from typing import Any, List, NamedTuple
|
||||
|
||||
import anthropic
|
||||
import ollama
|
||||
import openai
|
||||
from groq import Groq
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SchemaField, SecretField
|
||||
from autogpt_server.util import json
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.util import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -24,6 +25,7 @@ LlmApiKeys = {
|
||||
class ModelMetadata(NamedTuple):
|
||||
provider: str
|
||||
context_window: int
|
||||
cost_factor: int
|
||||
|
||||
|
||||
class LlmModel(str, Enum):
|
||||
@@ -55,26 +57,29 @@ class LlmModel(str, Enum):
|
||||
|
||||
|
||||
MODEL_METADATA = {
|
||||
LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000),
|
||||
LlmModel.GPT4O: ModelMetadata("openai", 128000),
|
||||
LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000),
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385),
|
||||
LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000),
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000),
|
||||
LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192),
|
||||
LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192),
|
||||
LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768),
|
||||
LlmModel.GEMMA_7B: ModelMetadata("groq", 8192),
|
||||
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192),
|
||||
LlmModel.LLAMA3_1_405B: ModelMetadata(
|
||||
"groq", 8192
|
||||
), # Limited to 16k during preview
|
||||
LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192),
|
||||
LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=10),
|
||||
LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=12),
|
||||
LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=11),
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=8),
|
||||
LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=14),
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=13),
|
||||
LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=6),
|
||||
LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=9),
|
||||
LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=7),
|
||||
LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=6),
|
||||
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=7),
|
||||
LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=10),
|
||||
# Limited to 16k during preview
|
||||
LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=15),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=13),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=7),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=11),
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_METADATA:
|
||||
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
|
||||
|
||||
|
||||
class AIStructuredResponseGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
@@ -89,7 +94,7 @@ class AIStructuredResponseGeneratorBlock(Block):
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: dict[str, str]
|
||||
response: dict[str, Any]
|
||||
error: str
|
||||
|
||||
def __init__(self):
|
||||
@@ -135,16 +140,33 @@ class AIStructuredResponseGeneratorBlock(Block):
|
||||
)
|
||||
return response.choices[0].message.content or ""
|
||||
elif provider == "anthropic":
|
||||
sysprompt = "".join([p["content"] for p in prompt if p["role"] == "system"])
|
||||
usrprompt = [p for p in prompt if p["role"] == "user"]
|
||||
system_messages = [p["content"] for p in prompt if p["role"] == "system"]
|
||||
sysprompt = " ".join(system_messages)
|
||||
|
||||
messages = []
|
||||
last_role = None
|
||||
for p in prompt:
|
||||
if p["role"] in ["user", "assistant"]:
|
||||
if p["role"] != last_role:
|
||||
messages.append({"role": p["role"], "content": p["content"]})
|
||||
last_role = p["role"]
|
||||
else:
|
||||
# If the role is the same as the last one, combine the content
|
||||
messages[-1]["content"] += "\n" + p["content"]
|
||||
|
||||
client = anthropic.Anthropic(api_key=api_key)
|
||||
response = client.messages.create(
|
||||
model=model.value,
|
||||
max_tokens=4096,
|
||||
system=sysprompt,
|
||||
messages=usrprompt, # type: ignore
|
||||
)
|
||||
return response.content[0].text if response.content else ""
|
||||
try:
|
||||
response = client.messages.create(
|
||||
model=model.value,
|
||||
max_tokens=4096,
|
||||
system=sysprompt,
|
||||
messages=messages,
|
||||
)
|
||||
return response.content[0].text if response.content else ""
|
||||
except anthropic.APIError as e:
|
||||
error_message = f"Anthropic API error: {str(e)}"
|
||||
logger.error(error_message)
|
||||
raise ValueError(error_message)
|
||||
elif provider == "groq":
|
||||
client = Groq(api_key=api_key)
|
||||
response_format = {"type": "json_object"} if json_format else None
|
||||
@@ -195,14 +217,16 @@ class AIStructuredResponseGeneratorBlock(Block):
|
||||
|
||||
prompt.append({"role": "user", "content": input_data.prompt})
|
||||
|
||||
def parse_response(resp: str) -> tuple[dict[str, str], str | None]:
|
||||
def parse_response(resp: str) -> tuple[dict[str, Any], str | None]:
|
||||
try:
|
||||
parsed = json.loads(resp)
|
||||
if not isinstance(parsed, dict):
|
||||
return {}, f"Expected a dictionary, but got {type(parsed)}"
|
||||
miss_keys = set(input_data.expected_format.keys()) - set(parsed.keys())
|
||||
if miss_keys:
|
||||
return parsed, f"Missing keys: {miss_keys}"
|
||||
return parsed, None
|
||||
except Exception as e:
|
||||
except JSONDecodeError as e:
|
||||
return {}, f"JSON decode error: {e}"
|
||||
|
||||
logger.info(f"LLM request: {prompt}")
|
||||
@@ -226,7 +250,16 @@ class AIStructuredResponseGeneratorBlock(Block):
|
||||
if input_data.expected_format:
|
||||
parsed_dict, parsed_error = parse_response(response_text)
|
||||
if not parsed_error:
|
||||
yield "response", {k: str(v) for k, v in parsed_dict.items()}
|
||||
yield "response", {
|
||||
k: (
|
||||
json.loads(v)
|
||||
if isinstance(v, str)
|
||||
and v.startswith("[")
|
||||
and v.endswith("]")
|
||||
else (", ".join(v) if isinstance(v, list) else v)
|
||||
)
|
||||
for k, v in parsed_dict.items()
|
||||
}
|
||||
return
|
||||
else:
|
||||
yield "response", {"response": response_text}
|
||||
@@ -287,7 +320,7 @@ class AITextGeneratorBlock(Block):
|
||||
if output_name == "response":
|
||||
return output_data["response"]
|
||||
else:
|
||||
raise output_data
|
||||
raise RuntimeError(output_data)
|
||||
raise ValueError("Failed to get a response from the LLM.")
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
@@ -301,7 +334,7 @@ class AITextGeneratorBlock(Block):
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class TextSummarizerBlock(Block):
|
||||
class AITextSummarizerBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str
|
||||
model: LlmModel = LlmModel.GPT4_TURBO
|
||||
@@ -319,8 +352,8 @@ class TextSummarizerBlock(Block):
|
||||
id="c3d4e5f6-7g8h-9i0j-1k2l-m3n4o5p6q7r8",
|
||||
description="Utilize a Large Language Model (LLM) to summarize a long text.",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=TextSummarizerBlock.Input,
|
||||
output_schema=TextSummarizerBlock.Output,
|
||||
input_schema=AITextSummarizerBlock.Input,
|
||||
output_schema=AITextSummarizerBlock.Output,
|
||||
test_input={"text": "Lorem ipsum..." * 100},
|
||||
test_output=("summary", "Final summary of a long text"),
|
||||
test_mock={
|
||||
@@ -412,7 +445,7 @@ class TextSummarizerBlock(Block):
|
||||
else:
|
||||
# If combined summaries are still too long, recursively summarize
|
||||
return self._run(
|
||||
TextSummarizerBlock.Input(
|
||||
AITextSummarizerBlock.Input(
|
||||
text=combined_text,
|
||||
api_key=input_data.api_key,
|
||||
model=input_data.model,
|
||||
@@ -438,7 +471,7 @@ class Message(BlockSchema):
|
||||
class AIConversationBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
messages: List[Message] = SchemaField(
|
||||
description="List of messages in the conversation.", min_items=1
|
||||
description="List of messages in the conversation.", min_length=1
|
||||
)
|
||||
model: LlmModel = SchemaField(
|
||||
default=LlmModel.GPT4_TURBO,
|
||||
@@ -2,8 +2,8 @@ import operator
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import SchemaField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class Operation(Enum):
|
||||
@@ -2,8 +2,8 @@ from typing import List
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
|
||||
|
||||
class PublishToMediumBlock(Block):
|
||||
@@ -4,9 +4,9 @@ from typing import Iterator
|
||||
import praw
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SecretField
|
||||
from autogpt_server.util.mock import MockObject
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SecretField
|
||||
from backend.util.mock import MockObject
|
||||
|
||||
|
||||
class RedditCredentials(BaseModel):
|
||||
@@ -5,8 +5,8 @@ from typing import Any
|
||||
import feedparser
|
||||
import pydantic
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import SchemaField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class RSSEntry(pydantic.BaseModel):
|
||||
264
autogpt_platform/backend/backend/blocks/sampling.py
Normal file
264
autogpt_platform/backend/backend/blocks/sampling.py
Normal file
@@ -0,0 +1,264 @@
|
||||
import random
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class SamplingMethod(str, Enum):
|
||||
RANDOM = "random"
|
||||
SYSTEMATIC = "systematic"
|
||||
TOP = "top"
|
||||
BOTTOM = "bottom"
|
||||
STRATIFIED = "stratified"
|
||||
WEIGHTED = "weighted"
|
||||
RESERVOIR = "reservoir"
|
||||
CLUSTER = "cluster"
|
||||
|
||||
|
||||
class DataSamplingBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
data: Union[Dict[str, Any], List[Union[dict, List[Any]]]] = SchemaField(
|
||||
description="The dataset to sample from. Can be a single dictionary, a list of dictionaries, or a list of lists.",
|
||||
placeholder="{'id': 1, 'value': 'a'} or [{'id': 1, 'value': 'a'}, {'id': 2, 'value': 'b'}, ...]",
|
||||
)
|
||||
sample_size: int = SchemaField(
|
||||
description="The number of samples to take from the dataset.",
|
||||
placeholder="10",
|
||||
default=10,
|
||||
)
|
||||
sampling_method: SamplingMethod = SchemaField(
|
||||
description="The method to use for sampling.",
|
||||
default=SamplingMethod.RANDOM,
|
||||
)
|
||||
accumulate: bool = SchemaField(
|
||||
description="Whether to accumulate data before sampling.",
|
||||
default=False,
|
||||
)
|
||||
random_seed: Optional[int] = SchemaField(
|
||||
description="Seed for random number generator (optional).",
|
||||
default=None,
|
||||
)
|
||||
stratify_key: Optional[str] = SchemaField(
|
||||
description="Key to use for stratified sampling (required for stratified sampling).",
|
||||
default=None,
|
||||
)
|
||||
weight_key: Optional[str] = SchemaField(
|
||||
description="Key to use for weighted sampling (required for weighted sampling).",
|
||||
default=None,
|
||||
)
|
||||
cluster_key: Optional[str] = SchemaField(
|
||||
description="Key to use for cluster sampling (required for cluster sampling).",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
sampled_data: List[Union[dict, List[Any]]] = SchemaField(
|
||||
description="The sampled subset of the input data."
|
||||
)
|
||||
sample_indices: List[int] = SchemaField(
|
||||
description="The indices of the sampled data in the original dataset."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="4a448883-71fa-49cf-91cf-70d793bd7d87",
|
||||
description="This block samples data from a given dataset using various sampling methods.",
|
||||
categories={BlockCategory.LOGIC},
|
||||
input_schema=DataSamplingBlock.Input,
|
||||
output_schema=DataSamplingBlock.Output,
|
||||
test_input={
|
||||
"data": [
|
||||
{"id": i, "value": chr(97 + i), "group": i % 3} for i in range(10)
|
||||
],
|
||||
"sample_size": 3,
|
||||
"sampling_method": SamplingMethod.STRATIFIED,
|
||||
"accumulate": False,
|
||||
"random_seed": 42,
|
||||
"stratify_key": "group",
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"sampled_data",
|
||||
[
|
||||
{"id": 0, "value": "a", "group": 0},
|
||||
{"id": 1, "value": "b", "group": 1},
|
||||
{"id": 8, "value": "i", "group": 2},
|
||||
],
|
||||
),
|
||||
("sample_indices", [0, 1, 8]),
|
||||
],
|
||||
)
|
||||
self.accumulated_data = []
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
if input_data.accumulate:
|
||||
if isinstance(input_data.data, dict):
|
||||
self.accumulated_data.append(input_data.data)
|
||||
elif isinstance(input_data.data, list):
|
||||
self.accumulated_data.extend(input_data.data)
|
||||
else:
|
||||
raise ValueError(f"Unsupported data type: {type(input_data.data)}")
|
||||
|
||||
# If we don't have enough data yet, return without sampling
|
||||
if len(self.accumulated_data) < input_data.sample_size:
|
||||
return
|
||||
|
||||
data_to_sample = self.accumulated_data
|
||||
else:
|
||||
# If not accumulating, use the input data directly
|
||||
data_to_sample = (
|
||||
input_data.data
|
||||
if isinstance(input_data.data, list)
|
||||
else [input_data.data]
|
||||
)
|
||||
|
||||
if input_data.random_seed is not None:
|
||||
random.seed(input_data.random_seed)
|
||||
|
||||
data_size = len(data_to_sample)
|
||||
|
||||
if input_data.sample_size > data_size:
|
||||
raise ValueError(
|
||||
f"Sample size ({input_data.sample_size}) cannot be larger than the dataset size ({data_size})."
|
||||
)
|
||||
|
||||
indices = []
|
||||
|
||||
if input_data.sampling_method == SamplingMethod.RANDOM:
|
||||
indices = random.sample(range(data_size), input_data.sample_size)
|
||||
elif input_data.sampling_method == SamplingMethod.SYSTEMATIC:
|
||||
step = data_size // input_data.sample_size
|
||||
start = random.randint(0, step - 1)
|
||||
indices = list(range(start, data_size, step))[: input_data.sample_size]
|
||||
elif input_data.sampling_method == SamplingMethod.TOP:
|
||||
indices = list(range(input_data.sample_size))
|
||||
elif input_data.sampling_method == SamplingMethod.BOTTOM:
|
||||
indices = list(range(data_size - input_data.sample_size, data_size))
|
||||
elif input_data.sampling_method == SamplingMethod.STRATIFIED:
|
||||
if not input_data.stratify_key:
|
||||
raise ValueError(
|
||||
"Stratify key must be provided for stratified sampling."
|
||||
)
|
||||
strata = defaultdict(list)
|
||||
for i, item in enumerate(data_to_sample):
|
||||
if isinstance(item, dict):
|
||||
strata_value = item.get(input_data.stratify_key)
|
||||
elif hasattr(item, input_data.stratify_key):
|
||||
strata_value = getattr(item, input_data.stratify_key)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Stratify key '{input_data.stratify_key}' not found in item {item}"
|
||||
)
|
||||
|
||||
if strata_value is None:
|
||||
raise ValueError(
|
||||
f"Stratify value for key '{input_data.stratify_key}' is None"
|
||||
)
|
||||
|
||||
strata[str(strata_value)].append(i)
|
||||
|
||||
# Calculate the number of samples to take from each stratum
|
||||
stratum_sizes = {
|
||||
k: max(1, int(len(v) / data_size * input_data.sample_size))
|
||||
for k, v in strata.items()
|
||||
}
|
||||
|
||||
# Adjust sizes to ensure we get exactly sample_size samples
|
||||
while sum(stratum_sizes.values()) != input_data.sample_size:
|
||||
if sum(stratum_sizes.values()) < input_data.sample_size:
|
||||
stratum_sizes[
|
||||
max(stratum_sizes, key=lambda k: stratum_sizes[k])
|
||||
] += 1
|
||||
else:
|
||||
stratum_sizes[
|
||||
max(stratum_sizes, key=lambda k: stratum_sizes[k])
|
||||
] -= 1
|
||||
|
||||
for stratum, size in stratum_sizes.items():
|
||||
indices.extend(random.sample(strata[stratum], size))
|
||||
elif input_data.sampling_method == SamplingMethod.WEIGHTED:
|
||||
if not input_data.weight_key:
|
||||
raise ValueError("Weight key must be provided for weighted sampling.")
|
||||
weights = []
|
||||
for item in data_to_sample:
|
||||
if isinstance(item, dict):
|
||||
weight = item.get(input_data.weight_key)
|
||||
elif hasattr(item, input_data.weight_key):
|
||||
weight = getattr(item, input_data.weight_key)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Weight key '{input_data.weight_key}' not found in item {item}"
|
||||
)
|
||||
|
||||
if weight is None:
|
||||
raise ValueError(
|
||||
f"Weight value for key '{input_data.weight_key}' is None"
|
||||
)
|
||||
try:
|
||||
weights.append(float(weight))
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
f"Weight value '{weight}' cannot be converted to a number"
|
||||
)
|
||||
|
||||
if not weights:
|
||||
raise ValueError(
|
||||
f"No valid weights found using key '{input_data.weight_key}'"
|
||||
)
|
||||
|
||||
indices = random.choices(
|
||||
range(data_size), weights=weights, k=input_data.sample_size
|
||||
)
|
||||
elif input_data.sampling_method == SamplingMethod.RESERVOIR:
|
||||
indices = list(range(input_data.sample_size))
|
||||
for i in range(input_data.sample_size, data_size):
|
||||
j = random.randint(0, i)
|
||||
if j < input_data.sample_size:
|
||||
indices[j] = i
|
||||
elif input_data.sampling_method == SamplingMethod.CLUSTER:
|
||||
if not input_data.cluster_key:
|
||||
raise ValueError("Cluster key must be provided for cluster sampling.")
|
||||
clusters = defaultdict(list)
|
||||
for i, item in enumerate(data_to_sample):
|
||||
if isinstance(item, dict):
|
||||
cluster_value = item.get(input_data.cluster_key)
|
||||
elif hasattr(item, input_data.cluster_key):
|
||||
cluster_value = getattr(item, input_data.cluster_key)
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Item {item} does not have the cluster key '{input_data.cluster_key}'"
|
||||
)
|
||||
|
||||
clusters[str(cluster_value)].append(i)
|
||||
|
||||
# Randomly select clusters until we have enough samples
|
||||
selected_clusters = []
|
||||
while (
|
||||
sum(len(clusters[c]) for c in selected_clusters)
|
||||
< input_data.sample_size
|
||||
):
|
||||
available_clusters = [c for c in clusters if c not in selected_clusters]
|
||||
if not available_clusters:
|
||||
break
|
||||
selected_clusters.append(random.choice(available_clusters))
|
||||
|
||||
for cluster in selected_clusters:
|
||||
indices.extend(clusters[cluster])
|
||||
|
||||
# If we have more samples than needed, randomly remove some
|
||||
if len(indices) > input_data.sample_size:
|
||||
indices = random.sample(indices, input_data.sample_size)
|
||||
else:
|
||||
raise ValueError(f"Unknown sampling method: {input_data.sampling_method}")
|
||||
|
||||
sampled_data = [data_to_sample[i] for i in indices]
|
||||
|
||||
# Clear accumulated data after sampling if accumulation is enabled
|
||||
if input_data.accumulate:
|
||||
self.accumulated_data = []
|
||||
|
||||
yield "sampled_data", sampled_data
|
||||
yield "sample_indices", indices
|
||||
@@ -3,8 +3,8 @@ from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SecretField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SecretField
|
||||
|
||||
|
||||
class GetRequest:
|
||||
@@ -3,8 +3,8 @@ from typing import Literal
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
|
||||
|
||||
class CreateTalkingAvatarVideoBlock(Block):
|
||||
@@ -4,8 +4,8 @@ from typing import Any
|
||||
from jinja2 import BaseLoader, Environment
|
||||
from pydantic import Field
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.util import json
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.util import json
|
||||
|
||||
jinja = Environment(loader=BaseLoader())
|
||||
|
||||
@@ -2,7 +2,7 @@ import time
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Union
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
|
||||
|
||||
class GetCurrentTimeBlock(Block):
|
||||
@@ -23,7 +23,7 @@ class GetCurrentTimeBlock(Block):
|
||||
{"trigger": "Hello", "format": "{time}"},
|
||||
],
|
||||
test_output=[
|
||||
("time", time.strftime("%H:%M:%S")),
|
||||
("time", lambda _: time.strftime("%H:%M:%S")),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -130,7 +130,6 @@ class CountdownTimerBlock(Block):
|
||||
)
|
||||
|
||||
def run(self, input_data: Input) -> BlockOutput:
|
||||
|
||||
seconds = int(input_data.seconds)
|
||||
minutes = int(input_data.minutes)
|
||||
hours = int(input_data.hours)
|
||||
@@ -3,8 +3,8 @@ from urllib.parse import parse_qs, urlparse
|
||||
from youtube_transcript_api import YouTubeTranscriptApi
|
||||
from youtube_transcript_api.formatters import TextFormatter
|
||||
|
||||
from autogpt_server.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from autogpt_server.data.model import SchemaField
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TranscribeYouTubeVideoBlock(Block):
|
||||
@@ -8,8 +8,8 @@ import pathlib
|
||||
import click
|
||||
import psutil
|
||||
|
||||
from autogpt_server import app
|
||||
from autogpt_server.util.process import AppProcess
|
||||
from backend import app
|
||||
from backend.util.process import AppProcess
|
||||
|
||||
|
||||
def get_pid_path() -> pathlib.Path:
|
||||
@@ -109,7 +109,7 @@ def reddit(server_address: str):
|
||||
"""
|
||||
import requests
|
||||
|
||||
from autogpt_server.usecases.reddit_marketing import create_test_graph
|
||||
from backend.usecases.reddit_marketing import create_test_graph
|
||||
|
||||
test_graph = create_test_graph()
|
||||
url = f"{server_address}/graphs"
|
||||
@@ -130,7 +130,7 @@ def populate_db(server_address: str):
|
||||
"""
|
||||
import requests
|
||||
|
||||
from autogpt_server.usecases.sample import create_test_graph
|
||||
from backend.usecases.sample import create_test_graph
|
||||
|
||||
test_graph = create_test_graph()
|
||||
url = f"{server_address}/graphs"
|
||||
@@ -166,7 +166,7 @@ def graph(server_address: str):
|
||||
"""
|
||||
import requests
|
||||
|
||||
from autogpt_server.usecases.sample import create_test_graph
|
||||
from backend.usecases.sample import create_test_graph
|
||||
|
||||
url = f"{server_address}/graphs"
|
||||
headers = {"Content-Type": "application/json"}
|
||||
@@ -219,7 +219,7 @@ def websocket(server_address: str, graph_id: str):
|
||||
|
||||
import websockets
|
||||
|
||||
from autogpt_server.server.ws_api import ExecutionSubscription, Methods, WsMessage
|
||||
from backend.server.ws_api import ExecutionSubscription, Methods, WsMessage
|
||||
|
||||
async def send_message(server_address: str):
|
||||
uri = f"ws://{server_address}"
|
||||
43
autogpt_platform/backend/backend/data/analytics.py
Normal file
43
autogpt_platform/backend/backend/data/analytics.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import logging
|
||||
|
||||
import prisma.types
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def log_raw_analytics(
|
||||
user_id: str,
|
||||
type: str,
|
||||
data: dict,
|
||||
data_index: str,
|
||||
):
|
||||
details = await prisma.models.AnalyticsDetails.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"type": type,
|
||||
"data": prisma.Json(data),
|
||||
"dataIndex": data_index,
|
||||
}
|
||||
)
|
||||
return details
|
||||
|
||||
|
||||
async def log_raw_metric(
|
||||
user_id: str,
|
||||
metric_name: str,
|
||||
metric_value: float,
|
||||
data_string: str,
|
||||
):
|
||||
if metric_value < 0:
|
||||
raise ValueError("metric_value must be non-negative")
|
||||
|
||||
result = await prisma.models.AnalyticsMetrics.prisma().create(
|
||||
data={
|
||||
"value": metric_value,
|
||||
"analyticMetric": metric_name,
|
||||
"userId": user_id,
|
||||
"dataString": data_string,
|
||||
},
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -7,8 +7,8 @@ import jsonschema
|
||||
from prisma.models import AgentBlock
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt_server.data.model import ContributorDetails
|
||||
from autogpt_server.util import json
|
||||
from backend.data.model import ContributorDetails
|
||||
from backend.util import json
|
||||
|
||||
BlockData = tuple[str, Any] # Input & Output data should be a tuple of (name, data).
|
||||
BlockInput = dict[str, Any] # Input: 1 input pin consumes 1 data.
|
||||
@@ -225,7 +225,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
|
||||
|
||||
def get_blocks() -> dict[str, Block]:
|
||||
from autogpt_server.blocks import AVAILABLE_BLOCKS # noqa: E402
|
||||
from backend.blocks import AVAILABLE_BLOCKS # noqa: E402
|
||||
|
||||
return AVAILABLE_BLOCKS
|
||||
|
||||
274
autogpt_platform/backend/backend/data/credit.py
Normal file
274
autogpt_platform/backend/backend/data/credit.py
Normal file
@@ -0,0 +1,274 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import prisma.errors
|
||||
from prisma import Json
|
||||
from prisma.enums import UserBlockCreditType
|
||||
from prisma.models import UserBlockCredit
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.blocks.llm import (
|
||||
MODEL_METADATA,
|
||||
AIConversationBlock,
|
||||
AIStructuredResponseGeneratorBlock,
|
||||
AITextGeneratorBlock,
|
||||
AITextSummarizerBlock,
|
||||
LlmModel,
|
||||
)
|
||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from backend.data.block import Block, BlockInput
|
||||
from backend.util.settings import Config
|
||||
|
||||
|
||||
class BlockCostType(str, Enum):
|
||||
RUN = "run" # cost X credits per run
|
||||
BYTE = "byte" # cost X credits per byte
|
||||
SECOND = "second" # cost X credits per second
|
||||
|
||||
|
||||
class BlockCost(BaseModel):
|
||||
cost_amount: int
|
||||
cost_filter: BlockInput
|
||||
cost_type: BlockCostType
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cost_amount: int,
|
||||
cost_type: BlockCostType = BlockCostType.RUN,
|
||||
cost_filter: Optional[BlockInput] = None,
|
||||
**data: Any,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
cost_amount=cost_amount,
|
||||
cost_filter=cost_filter or {},
|
||||
cost_type=cost_type,
|
||||
**data,
|
||||
)
|
||||
|
||||
|
||||
llm_cost = [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"api_key": None, # Running LLM with user own API key is free.
|
||||
},
|
||||
cost_amount=metadata.cost_factor,
|
||||
)
|
||||
for model, metadata in MODEL_METADATA.items()
|
||||
] + [
|
||||
BlockCost(
|
||||
# Default cost is running LlmModel.GPT4O.
|
||||
cost_amount=MODEL_METADATA[LlmModel.GPT4O].cost_factor,
|
||||
cost_filter={"api_key": None},
|
||||
),
|
||||
]
|
||||
|
||||
BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
AIConversationBlock: llm_cost,
|
||||
AITextGeneratorBlock: llm_cost,
|
||||
AIStructuredResponseGeneratorBlock: llm_cost,
|
||||
AITextSummarizerBlock: llm_cost,
|
||||
CreateTalkingAvatarVideoBlock: [
|
||||
BlockCost(cost_amount=15, cost_filter={"api_key": None})
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class UserCreditBase(ABC):
|
||||
def __init__(self, num_user_credits_refill: int):
|
||||
self.num_user_credits_refill = num_user_credits_refill
|
||||
|
||||
@abstractmethod
|
||||
async def get_or_refill_credit(self, user_id: str) -> int:
|
||||
"""
|
||||
Get the current credit for the user and refill if no transaction has been made in the current cycle.
|
||||
|
||||
Returns:
|
||||
int: The current credit for the user.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def spend_credits(
|
||||
self,
|
||||
user_id: str,
|
||||
user_credit: int,
|
||||
block: Block,
|
||||
input_data: BlockInput,
|
||||
data_size: float,
|
||||
run_time: float,
|
||||
) -> int:
|
||||
"""
|
||||
Spend the credits for the user based on the block usage.
|
||||
|
||||
Args:
|
||||
user_id (str): The user ID.
|
||||
user_credit (int): The current credit for the user.
|
||||
block (Block): The block that is being used.
|
||||
input_data (BlockInput): The input data for the block.
|
||||
data_size (float): The size of the data being processed.
|
||||
run_time (float): The time taken to run the block.
|
||||
|
||||
Returns:
|
||||
int: amount of credit spent
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def top_up_credits(self, user_id: str, amount: int):
|
||||
"""
|
||||
Top up the credits for the user.
|
||||
|
||||
Args:
|
||||
user_id (str): The user ID.
|
||||
amount (int): The amount to top up.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class UserCredit(UserCreditBase):
|
||||
async def get_or_refill_credit(self, user_id: str) -> int:
|
||||
cur_time = self.time_now()
|
||||
cur_month = cur_time.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
|
||||
nxt_month = cur_month.replace(month=cur_month.month + 1)
|
||||
|
||||
user_credit = await UserBlockCredit.prisma().group_by(
|
||||
by=["userId"],
|
||||
sum={"amount": True},
|
||||
where={
|
||||
"userId": user_id,
|
||||
"createdAt": {"gte": cur_month, "lt": nxt_month},
|
||||
"isActive": True,
|
||||
},
|
||||
)
|
||||
|
||||
if user_credit:
|
||||
credit_sum = user_credit[0].get("_sum") or {}
|
||||
return credit_sum.get("amount", 0)
|
||||
|
||||
key = f"MONTHLY-CREDIT-TOP-UP-{cur_month}"
|
||||
|
||||
try:
|
||||
await UserBlockCredit.prisma().create(
|
||||
data={
|
||||
"amount": self.num_user_credits_refill,
|
||||
"type": UserBlockCreditType.TOP_UP,
|
||||
"userId": user_id,
|
||||
"transactionKey": key,
|
||||
"createdAt": self.time_now(),
|
||||
}
|
||||
)
|
||||
except prisma.errors.UniqueViolationError:
|
||||
pass # Already refilled this month
|
||||
|
||||
return self.num_user_credits_refill
|
||||
|
||||
@staticmethod
|
||||
def time_now():
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
@staticmethod
|
||||
def _block_usage_cost(
|
||||
block: Block,
|
||||
input_data: BlockInput,
|
||||
data_size: float,
|
||||
run_time: float,
|
||||
) -> tuple[int, BlockInput]:
|
||||
block_costs = BLOCK_COSTS.get(type(block))
|
||||
if not block_costs:
|
||||
return 0, {}
|
||||
|
||||
for block_cost in block_costs:
|
||||
if all(
|
||||
# None, [], {}, "", are considered the same value.
|
||||
input_data.get(k) == b or (not input_data.get(k) and not b)
|
||||
for k, b in block_cost.cost_filter.items()
|
||||
):
|
||||
if block_cost.cost_type == BlockCostType.RUN:
|
||||
return block_cost.cost_amount, block_cost.cost_filter
|
||||
|
||||
if block_cost.cost_type == BlockCostType.SECOND:
|
||||
return (
|
||||
int(run_time * block_cost.cost_amount),
|
||||
block_cost.cost_filter,
|
||||
)
|
||||
|
||||
if block_cost.cost_type == BlockCostType.BYTE:
|
||||
return (
|
||||
int(data_size * block_cost.cost_amount),
|
||||
block_cost.cost_filter,
|
||||
)
|
||||
|
||||
return 0, {}
|
||||
|
||||
async def spend_credits(
|
||||
self,
|
||||
user_id: str,
|
||||
user_credit: int,
|
||||
block: Block,
|
||||
input_data: BlockInput,
|
||||
data_size: float,
|
||||
run_time: float,
|
||||
validate_balance: bool = True,
|
||||
) -> int:
|
||||
cost, matching_filter = self._block_usage_cost(
|
||||
block=block, input_data=input_data, data_size=data_size, run_time=run_time
|
||||
)
|
||||
if cost <= 0:
|
||||
return 0
|
||||
|
||||
if validate_balance and user_credit < cost:
|
||||
raise ValueError(f"Insufficient credit: {user_credit} < {cost}")
|
||||
|
||||
await UserBlockCredit.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"amount": -cost,
|
||||
"type": UserBlockCreditType.USAGE,
|
||||
"blockId": block.id,
|
||||
"metadata": Json(
|
||||
{
|
||||
"block": block.name,
|
||||
"input": matching_filter,
|
||||
}
|
||||
),
|
||||
"createdAt": self.time_now(),
|
||||
}
|
||||
)
|
||||
return cost
|
||||
|
||||
async def top_up_credits(self, user_id: str, amount: int):
|
||||
await UserBlockCredit.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"amount": amount,
|
||||
"type": UserBlockCreditType.TOP_UP,
|
||||
"createdAt": self.time_now(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class DisabledUserCredit(UserCreditBase):
|
||||
async def get_or_refill_credit(self, *args, **kwargs) -> int:
|
||||
return 0
|
||||
|
||||
async def spend_credits(self, *args, **kwargs) -> int:
|
||||
return 0
|
||||
|
||||
async def top_up_credits(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
def get_user_credit_model() -> UserCreditBase:
|
||||
config = Config()
|
||||
if config.enable_credit.lower() == "true":
|
||||
return UserCredit(config.num_user_credits_refill)
|
||||
else:
|
||||
return DisabledUserCredit(0)
|
||||
|
||||
|
||||
def get_block_costs() -> dict[str, list[BlockCost]]:
|
||||
return {block().id: costs for block, costs in BLOCK_COSTS.items()}
|
||||
@@ -31,7 +31,7 @@ async def connect(call_count=0):
|
||||
except Exception as e:
|
||||
if call_count <= 5:
|
||||
logger.info(f"[Prisma-{conn_id}] Connection failed: {e}. Retrying now..")
|
||||
await asyncio.sleep(call_count)
|
||||
await asyncio.sleep(2**call_count)
|
||||
await connect(call_count + 1)
|
||||
else:
|
||||
raise e
|
||||
@@ -1,9 +1,9 @@
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from multiprocessing import Manager
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
from prisma.enums import AgentExecutionStatus
|
||||
from prisma.models import (
|
||||
AgentGraphExecution,
|
||||
AgentNodeExecution,
|
||||
@@ -16,17 +16,19 @@ from prisma.types import (
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
|
||||
from autogpt_server.data.block import BlockData, BlockInput, CompletedBlockOutput
|
||||
from autogpt_server.util import json, mock
|
||||
from backend.data.block import BlockData, BlockInput, CompletedBlockOutput
|
||||
from backend.util import json, mock
|
||||
|
||||
|
||||
class GraphExecution(BaseModel):
|
||||
user_id: str
|
||||
graph_exec_id: str
|
||||
start_node_execs: list["NodeExecution"]
|
||||
graph_id: str
|
||||
start_node_execs: list["NodeExecution"]
|
||||
|
||||
|
||||
class NodeExecution(BaseModel):
|
||||
user_id: str
|
||||
graph_exec_id: str
|
||||
graph_id: str
|
||||
node_exec_id: str
|
||||
@@ -34,13 +36,7 @@ class NodeExecution(BaseModel):
|
||||
data: BlockInput
|
||||
|
||||
|
||||
class ExecutionStatus(str, Enum):
|
||||
INCOMPLETE = "INCOMPLETE"
|
||||
QUEUED = "QUEUED"
|
||||
RUNNING = "RUNNING"
|
||||
COMPLETED = "COMPLETED"
|
||||
FAILED = "FAILED"
|
||||
|
||||
ExecutionStatus = AgentExecutionStatus
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
@@ -148,6 +144,7 @@ async def create_graph_execution(
|
||||
data={
|
||||
"agentGraphId": graph_id,
|
||||
"agentGraphVersion": graph_version,
|
||||
"executionStatus": ExecutionStatus.QUEUED,
|
||||
"AgentNodeExecutions": {
|
||||
"create": [ # type: ignore
|
||||
{
|
||||
@@ -259,10 +256,20 @@ async def upsert_execution_output(
|
||||
)
|
||||
|
||||
|
||||
async def update_graph_execution_start_time(graph_exec_id: str):
|
||||
await AgentGraphExecution.prisma().update(
|
||||
where={"id": graph_exec_id},
|
||||
data={
|
||||
"executionStatus": ExecutionStatus.RUNNING,
|
||||
"startedAt": datetime.now(tz=timezone.utc),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def update_graph_execution_stats(graph_exec_id: str, stats: dict[str, Any]):
|
||||
await AgentGraphExecution.prisma().update(
|
||||
where={"id": graph_exec_id},
|
||||
data={"stats": json.dumps(stats)},
|
||||
data={"executionStatus": ExecutionStatus.COMPLETED, "stats": json.dumps(stats)},
|
||||
)
|
||||
|
||||
|
||||
@@ -389,19 +396,19 @@ def merge_execution_input(data: BlockInput) -> BlockInput:
|
||||
|
||||
# Merge all input with <input_name>_$_<index> into a single list.
|
||||
items = list(data.items())
|
||||
list_input: list[Any] = []
|
||||
|
||||
for key, value in items:
|
||||
if LIST_SPLIT not in key:
|
||||
continue
|
||||
name, index = key.split(LIST_SPLIT)
|
||||
if not index.isdigit():
|
||||
list_input.append((name, value, 0))
|
||||
else:
|
||||
list_input.append((name, value, int(index)))
|
||||
raise ValueError(f"Invalid key: {key}, #{index} index must be an integer.")
|
||||
|
||||
for name, value, _ in sorted(list_input, key=lambda x: x[2]):
|
||||
data[name] = data.get(name, [])
|
||||
data[name].append(value)
|
||||
if int(index) >= len(data[name]):
|
||||
# Pad list with empty string on missing indices.
|
||||
data[name].extend([""] * (int(index) - len(data[name]) + 1))
|
||||
data[name][int(index)] = value
|
||||
|
||||
# Merge all input with <input_name>_#_<index> into a single dict.
|
||||
for key, value in items:
|
||||
@@ -9,11 +9,11 @@ from prisma.models import AgentGraph, AgentNode, AgentNodeLink
|
||||
from pydantic import BaseModel, PrivateAttr
|
||||
from pydantic_core import PydanticUndefinedType
|
||||
|
||||
from autogpt_server.blocks.basic import InputBlock, OutputBlock
|
||||
from autogpt_server.data.block import BlockInput, get_block, get_blocks
|
||||
from autogpt_server.data.db import BaseDbModel, transaction
|
||||
from autogpt_server.data.user import DEFAULT_USER_ID
|
||||
from autogpt_server.util import json
|
||||
from backend.blocks.basic import AgentInputBlock, AgentOutputBlock
|
||||
from backend.data.block import BlockInput, get_block, get_blocks
|
||||
from backend.data.db import BaseDbModel, transaction
|
||||
from backend.data.user import DEFAULT_USER_ID
|
||||
from backend.util import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -106,7 +106,9 @@ class Graph(GraphMeta):
|
||||
def starting_nodes(self) -> list[Node]:
|
||||
outbound_nodes = {link.sink_id for link in self.links}
|
||||
input_nodes = {
|
||||
v.id for v in self.nodes if isinstance(get_block(v.block_id), InputBlock)
|
||||
v.id
|
||||
for v in self.nodes
|
||||
if isinstance(get_block(v.block_id), AgentInputBlock)
|
||||
}
|
||||
return [
|
||||
node
|
||||
@@ -116,7 +118,9 @@ class Graph(GraphMeta):
|
||||
|
||||
@property
|
||||
def ending_nodes(self) -> list[Node]:
|
||||
return [v for v in self.nodes if isinstance(get_block(v.block_id), OutputBlock)]
|
||||
return [
|
||||
v for v in self.nodes if isinstance(get_block(v.block_id), AgentOutputBlock)
|
||||
]
|
||||
|
||||
@property
|
||||
def subgraph_map(self) -> dict[str, str]:
|
||||
@@ -179,7 +183,9 @@ class Graph(GraphMeta):
|
||||
+ [sanitize(link.sink_name) for link in node.input_links]
|
||||
)
|
||||
for name in block.input_schema.get_required_fields():
|
||||
if name not in provided_inputs and not isinstance(block, InputBlock):
|
||||
if name not in provided_inputs and not isinstance(
|
||||
block, AgentInputBlock
|
||||
):
|
||||
raise ValueError(
|
||||
f"Node {block.name} #{node.id} required input missing: `{name}`"
|
||||
)
|
||||
@@ -193,7 +199,7 @@ class Graph(GraphMeta):
|
||||
def is_input_output_block(nid: str) -> bool:
|
||||
bid = node_map[nid].block_id
|
||||
b = get_block(bid)
|
||||
return isinstance(b, InputBlock) or isinstance(b, OutputBlock)
|
||||
return isinstance(b, AgentInputBlock) or isinstance(b, AgentOutputBlock)
|
||||
|
||||
# subgraphs: all nodes in subgraph must be present in the graph.
|
||||
for subgraph_id, node_ids in self.subgraphs.items():
|
||||
@@ -268,7 +274,6 @@ class Graph(GraphMeta):
|
||||
PydanticUndefinedType,
|
||||
)
|
||||
):
|
||||
|
||||
input_schema.append(
|
||||
InputSchemaItem(
|
||||
node_id=node.id,
|
||||
@@ -11,7 +11,7 @@ from pydantic_core import (
|
||||
core_schema,
|
||||
)
|
||||
|
||||
from autogpt_server.util.settings import Secrets
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
T = TypeVar("T")
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -6,7 +6,7 @@ from datetime import datetime
|
||||
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from autogpt_server.data.execution import ExecutionResult
|
||||
from backend.data.execution import ExecutionResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -37,7 +37,6 @@ class AsyncEventQueue(ABC):
|
||||
|
||||
|
||||
class AsyncRedisEventQueue(AsyncEventQueue):
|
||||
|
||||
def __init__(self):
|
||||
self.host = os.getenv("REDIS_HOST", "localhost")
|
||||
self.port = int(os.getenv("REDIS_PORT", "6379"))
|
||||
@@ -3,9 +3,9 @@ from typing import Optional
|
||||
|
||||
from prisma.models import AgentGraphExecutionSchedule
|
||||
|
||||
from autogpt_server.data.block import BlockInput
|
||||
from autogpt_server.data.db import BaseDbModel
|
||||
from autogpt_server.util import json
|
||||
from backend.data.block import BlockInput
|
||||
from backend.data.db import BaseDbModel
|
||||
from backend.util import json
|
||||
|
||||
|
||||
class ExecutionSchedule(BaseDbModel):
|
||||
@@ -3,14 +3,13 @@ from typing import Optional
|
||||
from fastapi import HTTPException
|
||||
from prisma.models import User
|
||||
|
||||
from autogpt_server.data.db import prisma
|
||||
from backend.data.db import prisma
|
||||
|
||||
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
DEFAULT_EMAIL = "default@example.com"
|
||||
|
||||
|
||||
async def get_or_create_user(user_data: dict) -> User:
|
||||
|
||||
user_id = user_data.get("sub")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User ID not found in token")
|
||||
15
autogpt_platform/backend/backend/exec.py
Normal file
15
autogpt_platform/backend/backend/exec.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from backend.app import run_processes
|
||||
from backend.executor import ExecutionManager
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Run all the processes required for the AutoGPT-server REST API.
|
||||
"""
|
||||
run_processes(
|
||||
ExecutionManager(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -12,13 +12,15 @@ from multiprocessing.pool import AsyncResult, Pool
|
||||
from typing import TYPE_CHECKING, Any, Coroutine, Generator, TypeVar
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt_server.server.rest_api import AgentServer
|
||||
from backend.server.rest_api import AgentServer
|
||||
|
||||
from autogpt_server.blocks.basic import InputBlock
|
||||
from autogpt_server.data import db
|
||||
from autogpt_server.data.block import Block, BlockData, BlockInput, get_block
|
||||
from autogpt_server.data.execution import (
|
||||
from backend.blocks.basic import AgentInputBlock
|
||||
from backend.data import db
|
||||
from backend.data.block import Block, BlockData, BlockInput, get_block
|
||||
from backend.data.credit import get_user_credit_model
|
||||
from backend.data.execution import (
|
||||
ExecutionQueue,
|
||||
ExecutionResult,
|
||||
ExecutionStatus,
|
||||
GraphExecution,
|
||||
NodeExecution,
|
||||
@@ -34,32 +36,60 @@ from autogpt_server.data.execution import (
|
||||
upsert_execution_input,
|
||||
upsert_execution_output,
|
||||
)
|
||||
from autogpt_server.data.graph import Graph, Link, Node, get_graph, get_node
|
||||
from autogpt_server.util import json
|
||||
from autogpt_server.util.decorator import error_logged, time_measured
|
||||
from autogpt_server.util.logging import configure_logging
|
||||
from autogpt_server.util.service import AppService, expose, get_service_client
|
||||
from autogpt_server.util.settings import Config
|
||||
from autogpt_server.util.type import convert
|
||||
from backend.data.graph import Graph, Link, Node, get_graph, get_node
|
||||
from backend.util import json
|
||||
from backend.util.decorator import error_logged, time_measured
|
||||
from backend.util.logging import configure_logging
|
||||
from backend.util.service import AppService, expose, get_service_client
|
||||
from backend.util.settings import Config
|
||||
from backend.util.type import convert
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_log_metadata(
|
||||
graph_eid: str,
|
||||
graph_id: str,
|
||||
node_eid: str,
|
||||
node_id: str,
|
||||
block_name: str,
|
||||
) -> dict:
|
||||
return {
|
||||
"component": "ExecutionManager",
|
||||
"graph_eid": graph_eid,
|
||||
"graph_id": graph_id,
|
||||
"node_eid": node_eid,
|
||||
"node_id": node_id,
|
||||
"block_name": block_name,
|
||||
}
|
||||
class LogMetadata:
|
||||
def __init__(
|
||||
self,
|
||||
user_id: str,
|
||||
graph_eid: str,
|
||||
graph_id: str,
|
||||
node_eid: str,
|
||||
node_id: str,
|
||||
block_name: str,
|
||||
):
|
||||
self.metadata = {
|
||||
"component": "ExecutionManager",
|
||||
"user_id": user_id,
|
||||
"graph_eid": graph_eid,
|
||||
"graph_id": graph_id,
|
||||
"node_eid": node_eid,
|
||||
"node_id": node_id,
|
||||
"block_name": block_name,
|
||||
}
|
||||
self.prefix = f"[ExecutionManager|uid:{user_id}|gid:{graph_id}|nid:{node_id}]|geid:{graph_eid}|nid:{node_eid}|{block_name}]"
|
||||
|
||||
def info(self, msg: str, **extra):
|
||||
msg = self._wrap(msg, **extra)
|
||||
logger.info(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def warning(self, msg: str, **extra):
|
||||
msg = self._wrap(msg, **extra)
|
||||
logger.warning(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def error(self, msg: str, **extra):
|
||||
msg = self._wrap(msg, **extra)
|
||||
logger.error(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def debug(self, msg: str, **extra):
|
||||
msg = self._wrap(msg, **extra)
|
||||
logger.debug(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def exception(self, msg: str, **extra):
|
||||
msg = self._wrap(msg, **extra)
|
||||
logger.exception(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def _wrap(self, msg: str, **extra):
|
||||
return f"{self.prefix} {msg} {extra}"
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
@@ -85,6 +115,7 @@ def execute_node(
|
||||
Returns:
|
||||
The subsequent node to be enqueued, or None if there is no subsequent node.
|
||||
"""
|
||||
user_id = data.user_id
|
||||
graph_exec_id = data.graph_exec_id
|
||||
graph_id = data.graph_id
|
||||
node_exec_id = data.node_exec_id
|
||||
@@ -95,9 +126,10 @@ def execute_node(
|
||||
def wait(f: Coroutine[Any, Any, T]) -> T:
|
||||
return loop.run_until_complete(f)
|
||||
|
||||
def update_execution(status: ExecutionStatus):
|
||||
def update_execution(status: ExecutionStatus) -> ExecutionResult:
|
||||
exec_update = wait(update_execution_status(node_exec_id, status))
|
||||
api_client.send_execution_update(exec_update.model_dump())
|
||||
return exec_update
|
||||
|
||||
node = wait(get_node(node_id))
|
||||
|
||||
@@ -107,7 +139,8 @@ def execute_node(
|
||||
return
|
||||
|
||||
# Sanity check: validate the execution input.
|
||||
log_metadata = get_log_metadata(
|
||||
log_metadata = LogMetadata(
|
||||
user_id=user_id,
|
||||
graph_eid=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
node_eid=node_exec_id,
|
||||
@@ -116,29 +149,25 @@ def execute_node(
|
||||
)
|
||||
input_data, error = validate_exec(node, data.data, resolve_input=False)
|
||||
if input_data is None:
|
||||
logger.error(
|
||||
"Skip execution, input validation error",
|
||||
extra={"json_fields": {**log_metadata, "error": error}},
|
||||
)
|
||||
log_metadata.error(f"Skip execution, input validation error: {error}")
|
||||
return
|
||||
|
||||
# Execute the node
|
||||
input_data_str = json.dumps(input_data)
|
||||
input_size = len(input_data_str)
|
||||
logger.info(
|
||||
"Executed node with input",
|
||||
extra={"json_fields": {**log_metadata, "input": input_data_str}},
|
||||
)
|
||||
log_metadata.info("Executed node with input", input=input_data_str)
|
||||
update_execution(ExecutionStatus.RUNNING)
|
||||
user_credit = get_user_credit_model()
|
||||
|
||||
output_size = 0
|
||||
try:
|
||||
credit = wait(user_credit.get_or_refill_credit(user_id))
|
||||
if credit < 0:
|
||||
raise ValueError(f"Insufficient credit: {credit}")
|
||||
|
||||
for output_name, output_data in node_block.execute(input_data):
|
||||
output_size += len(json.dumps(output_data))
|
||||
logger.info(
|
||||
"Node produced output",
|
||||
extra={"json_fields": {**log_metadata, output_name: output_data}},
|
||||
)
|
||||
log_metadata.info("Node produced output", output_name=output_data)
|
||||
wait(upsert_execution_output(node_exec_id, output_name, output_data))
|
||||
|
||||
for execution in _enqueue_next_nodes(
|
||||
@@ -146,20 +175,25 @@ def execute_node(
|
||||
loop=loop,
|
||||
node=node,
|
||||
output=(output_name, output_data),
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
log_metadata=log_metadata,
|
||||
):
|
||||
yield execution
|
||||
|
||||
update_execution(ExecutionStatus.COMPLETED)
|
||||
r = update_execution(ExecutionStatus.COMPLETED)
|
||||
s = input_size + output_size
|
||||
t = (
|
||||
(r.end_time - r.start_time).total_seconds()
|
||||
if r.end_time and r.start_time
|
||||
else 0
|
||||
)
|
||||
wait(user_credit.spend_credits(user_id, credit, node_block, input_data, s, t))
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"{e.__class__.__name__}: {e}"
|
||||
logger.exception(
|
||||
"Node execution failed with error",
|
||||
extra={"json_fields": {**log_metadata, error: error_msg}},
|
||||
)
|
||||
error_msg = str(e)
|
||||
log_metadata.exception(f"Node execution failed with error {error_msg}")
|
||||
wait(upsert_execution_output(node_exec_id, "error", error_msg))
|
||||
update_execution(ExecutionStatus.FAILED)
|
||||
|
||||
@@ -185,9 +219,10 @@ def _enqueue_next_nodes(
|
||||
loop: asyncio.AbstractEventLoop,
|
||||
node: Node,
|
||||
output: BlockData,
|
||||
user_id: str,
|
||||
graph_exec_id: str,
|
||||
graph_id: str,
|
||||
log_metadata: dict,
|
||||
log_metadata: LogMetadata,
|
||||
) -> list[NodeExecution]:
|
||||
def wait(f: Coroutine[Any, Any, T]) -> T:
|
||||
return loop.run_until_complete(f)
|
||||
@@ -200,6 +235,7 @@ def _enqueue_next_nodes(
|
||||
)
|
||||
api_client.send_execution_update(exec_update.model_dump())
|
||||
return NodeExecution(
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
graph_id=graph_id,
|
||||
node_exec_id=node_exec_id,
|
||||
@@ -253,17 +289,11 @@ def _enqueue_next_nodes(
|
||||
|
||||
# Incomplete input data, skip queueing the execution.
|
||||
if not next_node_input:
|
||||
logger.warning(
|
||||
f"Skipped queueing {suffix}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.warning(f"Skipped queueing {suffix}")
|
||||
return enqueued_executions
|
||||
|
||||
# Input is complete, enqueue the execution.
|
||||
logger.info(
|
||||
f"Enqueued {suffix}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Enqueued {suffix}")
|
||||
enqueued_executions.append(
|
||||
add_enqueued_execution(next_node_exec_id, next_node_id, next_node_input)
|
||||
)
|
||||
@@ -289,11 +319,9 @@ def _enqueue_next_nodes(
|
||||
idata, msg = validate_exec(next_node, idata)
|
||||
suffix = f"{next_output_name}>{next_input_name}~{ineid}:{msg}"
|
||||
if not idata:
|
||||
logger.info(
|
||||
f"{log_metadata} Enqueueing static-link skipped: {suffix}"
|
||||
)
|
||||
log_metadata.info(f"Enqueueing static-link skipped: {suffix}")
|
||||
continue
|
||||
logger.info(f"{log_metadata} Enqueueing static-link execution {suffix}")
|
||||
log_metadata.info(f"Enqueueing static-link execution {suffix}")
|
||||
enqueued_executions.append(
|
||||
add_enqueued_execution(iexec.node_exec_id, next_node_id, idata)
|
||||
)
|
||||
@@ -362,9 +390,9 @@ def validate_exec(
|
||||
|
||||
|
||||
def get_agent_server_client() -> "AgentServer":
|
||||
from autogpt_server.server.rest_api import AgentServer
|
||||
from backend.server.rest_api import AgentServer
|
||||
|
||||
return get_service_client(AgentServer)
|
||||
return get_service_client(AgentServer, Config().agent_server_port)
|
||||
|
||||
|
||||
class Executor:
|
||||
@@ -434,7 +462,8 @@ class Executor:
|
||||
def on_node_execution(
|
||||
cls, q: ExecutionQueue[NodeExecution], node_exec: NodeExecution
|
||||
):
|
||||
log_metadata = get_log_metadata(
|
||||
log_metadata = LogMetadata(
|
||||
user_id=node_exec.user_id,
|
||||
graph_eid=node_exec.graph_exec_id,
|
||||
graph_id=node_exec.graph_id,
|
||||
node_eid=node_exec.node_exec_id,
|
||||
@@ -459,28 +488,19 @@ class Executor:
|
||||
cls,
|
||||
q: ExecutionQueue[NodeExecution],
|
||||
node_exec: NodeExecution,
|
||||
log_metadata: dict,
|
||||
log_metadata: LogMetadata,
|
||||
stats: dict[str, Any] | None = None,
|
||||
):
|
||||
try:
|
||||
logger.info(
|
||||
f"Start node execution {node_exec.node_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Start node execution {node_exec.node_exec_id}")
|
||||
for execution in execute_node(
|
||||
cls.loop, cls.agent_server_client, node_exec, stats
|
||||
):
|
||||
q.add(execution)
|
||||
logger.info(
|
||||
f"Finished node execution {node_exec.node_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Finished node execution {node_exec.node_exec_id}")
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
f"Failed node execution {node_exec.node_exec_id}: {e}",
|
||||
extra={
|
||||
**log_metadata,
|
||||
},
|
||||
log_metadata.exception(
|
||||
f"Failed node execution {node_exec.node_exec_id}: {e}"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -502,10 +522,12 @@ class Executor:
|
||||
|
||||
@classmethod
|
||||
def on_graph_executor_stop(cls):
|
||||
logger.info(
|
||||
f"[on_graph_executor_stop {cls.pid}] ⏳ Terminating node executor pool..."
|
||||
)
|
||||
prefix = f"[on_graph_executor_stop {cls.pid}]"
|
||||
logger.info(f"{prefix} ⏳ Disconnecting DB...")
|
||||
cls.loop.run_until_complete(db.disconnect())
|
||||
logger.info(f"{prefix} ⏳ Terminating node executor pool...")
|
||||
cls.executor.terminate()
|
||||
logger.info(f"{prefix} ✅ Finished cleanup")
|
||||
|
||||
@classmethod
|
||||
def _init_node_executor_pool(cls):
|
||||
@@ -517,7 +539,8 @@ class Executor:
|
||||
@classmethod
|
||||
@error_logged
|
||||
def on_graph_execution(cls, graph_exec: GraphExecution, cancel: threading.Event):
|
||||
log_metadata = get_log_metadata(
|
||||
log_metadata = LogMetadata(
|
||||
user_id=graph_exec.user_id,
|
||||
graph_eid=graph_exec.graph_exec_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
node_id="*",
|
||||
@@ -542,12 +565,12 @@ class Executor:
|
||||
@classmethod
|
||||
@time_measured
|
||||
def _on_graph_execution(
|
||||
cls, graph_exec: GraphExecution, cancel: threading.Event, log_metadata: dict
|
||||
cls,
|
||||
graph_exec: GraphExecution,
|
||||
cancel: threading.Event,
|
||||
log_metadata: LogMetadata,
|
||||
) -> int:
|
||||
logger.info(
|
||||
f"Start graph execution {graph_exec.graph_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Start graph execution {graph_exec.graph_exec_id}")
|
||||
n_node_executions = 0
|
||||
finished = False
|
||||
|
||||
@@ -557,10 +580,7 @@ class Executor:
|
||||
if finished:
|
||||
return
|
||||
cls.executor.terminate()
|
||||
logger.info(
|
||||
f"Terminated graph execution {graph_exec.graph_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Terminated graph execution {graph_exec.graph_exec_id}")
|
||||
cls._init_node_executor_pool()
|
||||
|
||||
cancel_thread = threading.Thread(target=cancel_handler)
|
||||
@@ -598,10 +618,9 @@ class Executor:
|
||||
# Re-enqueueing the data back to the queue will disrupt the order.
|
||||
execution.wait()
|
||||
|
||||
logger.debug(
|
||||
log_metadata.debug(
|
||||
f"Dispatching node execution {exec_data.node_exec_id} "
|
||||
f"for node {exec_data.node_id}",
|
||||
extra={**log_metadata},
|
||||
)
|
||||
running_executions[exec_data.node_id] = cls.executor.apply_async(
|
||||
cls.on_node_execution,
|
||||
@@ -611,10 +630,8 @@ class Executor:
|
||||
|
||||
# Avoid terminating graph execution when some nodes are still running.
|
||||
while queue.empty() and running_executions:
|
||||
logger.debug(
|
||||
"Queue empty; running nodes: "
|
||||
f"{list(running_executions.keys())}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
log_metadata.debug(
|
||||
f"Queue empty; running nodes: {list(running_executions.keys())}"
|
||||
)
|
||||
for node_id, execution in list(running_executions.items()):
|
||||
if cancel.is_set():
|
||||
@@ -623,20 +640,13 @@ class Executor:
|
||||
if not queue.empty():
|
||||
break # yield to parent loop to execute new queue items
|
||||
|
||||
logger.debug(
|
||||
f"Waiting on execution of node {node_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.debug(f"Waiting on execution of node {node_id}")
|
||||
execution.wait(3)
|
||||
|
||||
logger.info(
|
||||
f"Finished graph execution {graph_exec.graph_exec_id}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
)
|
||||
log_metadata.info(f"Finished graph execution {graph_exec.graph_exec_id}")
|
||||
except Exception as e:
|
||||
logger.exception(
|
||||
f"Failed graph execution {graph_exec.graph_exec_id}: {e}",
|
||||
extra={"json_fields": {**log_metadata}},
|
||||
log_metadata.exception(
|
||||
f"Failed graph execution {graph_exec.graph_exec_id}: {e}"
|
||||
)
|
||||
finally:
|
||||
if not cancel.is_set():
|
||||
@@ -648,6 +658,7 @@ class Executor:
|
||||
|
||||
class ExecutionManager(AppService):
|
||||
def __init__(self):
|
||||
super().__init__(port=Config().execution_manager_port)
|
||||
self.use_db = True
|
||||
self.pool_size = Config().num_graph_workers
|
||||
self.queue = ExecutionQueue[GraphExecution]()
|
||||
@@ -698,7 +709,7 @@ class ExecutionManager(AppService):
|
||||
nodes_input = []
|
||||
for node in graph.starting_nodes:
|
||||
input_data = {}
|
||||
if isinstance(get_block(node.block_id), InputBlock):
|
||||
if isinstance(get_block(node.block_id), AgentInputBlock):
|
||||
name = node.input_default.get("name")
|
||||
if name and name in data:
|
||||
input_data = {"value": data[name]}
|
||||
@@ -722,6 +733,7 @@ class ExecutionManager(AppService):
|
||||
for node_exec in node_execs:
|
||||
starting_node_execs.append(
|
||||
NodeExecution(
|
||||
user_id=user_id,
|
||||
graph_exec_id=node_exec.graph_exec_id,
|
||||
graph_id=node_exec.graph_id,
|
||||
node_exec_id=node_exec.node_exec_id,
|
||||
@@ -737,6 +749,7 @@ class ExecutionManager(AppService):
|
||||
self.agent_server_client.send_execution_update(exec_update.model_dump())
|
||||
|
||||
graph_exec = GraphExecution(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
start_node_execs=starting_node_execs,
|
||||
@@ -5,10 +5,11 @@ from datetime import datetime
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
|
||||
from autogpt_server.data import schedule as model
|
||||
from autogpt_server.data.block import BlockInput
|
||||
from autogpt_server.executor.manager import ExecutionManager
|
||||
from autogpt_server.util.service import AppService, expose, get_service_client
|
||||
from backend.data import schedule as model
|
||||
from backend.data.block import BlockInput
|
||||
from backend.executor.manager import ExecutionManager
|
||||
from backend.util.service import AppService, expose, get_service_client
|
||||
from backend.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -19,13 +20,15 @@ def log(msg, **kwargs):
|
||||
|
||||
class ExecutionScheduler(AppService):
|
||||
def __init__(self, refresh_interval=10):
|
||||
super().__init__(port=Config().execution_scheduler_port)
|
||||
self.use_db = True
|
||||
self.last_check = datetime.min
|
||||
self.refresh_interval = refresh_interval
|
||||
self.use_redis = False
|
||||
|
||||
@property
|
||||
def execution_manager_client(self) -> ExecutionManager:
|
||||
return get_service_client(ExecutionManager)
|
||||
return get_service_client(ExecutionManager, Config().execution_manager_port)
|
||||
|
||||
def run_service(self):
|
||||
scheduler = BackgroundScheduler()
|
||||
@@ -0,0 +1,15 @@
|
||||
from .base import BaseOAuthHandler
|
||||
from .github import GitHubOAuthHandler
|
||||
from .google import GoogleOAuthHandler
|
||||
from .notion import NotionOAuthHandler
|
||||
|
||||
HANDLERS_BY_NAME: dict[str, type[BaseOAuthHandler]] = {
|
||||
handler.PROVIDER_NAME: handler
|
||||
for handler in [
|
||||
GitHubOAuthHandler,
|
||||
GoogleOAuthHandler,
|
||||
NotionOAuthHandler,
|
||||
]
|
||||
}
|
||||
|
||||
__all__ = ["HANDLERS_BY_NAME"]
|
||||
@@ -5,7 +5,7 @@ from urllib.parse import urlencode
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
|
||||
|
||||
from autogpt_server.integrations.oauth import BaseOAuthHandler
|
||||
from .base import BaseOAuthHandler
|
||||
|
||||
|
||||
class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
@@ -23,6 +23,7 @@ class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
""" # noqa
|
||||
|
||||
PROVIDER_NAME = "github"
|
||||
EMAIL_ENDPOINT = "https://api.github.com/user/emails"
|
||||
|
||||
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
|
||||
self.client_id = client_id
|
||||
@@ -69,10 +70,13 @@ class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
response.raise_for_status()
|
||||
token_data: dict = response.json()
|
||||
|
||||
username = self._request_username(token_data["access_token"])
|
||||
|
||||
now = int(time.time())
|
||||
new_credentials = OAuth2Credentials(
|
||||
provider=self.PROVIDER_NAME,
|
||||
title=current_credentials.title if current_credentials else "GitHub",
|
||||
title=current_credentials.title if current_credentials else None,
|
||||
username=username,
|
||||
access_token=token_data["access_token"],
|
||||
# Token refresh responses have an empty `scope` property (see docs),
|
||||
# so we have to get the scope from the existing credentials object.
|
||||
@@ -97,3 +101,19 @@ class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
if current_credentials:
|
||||
new_credentials.id = current_credentials.id
|
||||
return new_credentials
|
||||
|
||||
def _request_username(self, access_token: str) -> str | None:
|
||||
url = "https://api.github.com/user"
|
||||
headers = {
|
||||
"Accept": "application/vnd.github+json",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
}
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
if not response.ok:
|
||||
return None
|
||||
|
||||
# Get the login (username)
|
||||
return response.json().get("login")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user