mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Compare commits
4 Commits
autogpt-pl
...
swiftyos/s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da8184e48e | ||
|
|
3eddcfc254 | ||
|
|
063aabe431 | ||
|
|
8c3e635610 |
52
.github/PULL_REQUEST_TEMPLATE.md
vendored
52
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,38 +1,36 @@
|
||||
### Background
|
||||
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Changes 🏗️
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### Checklist 📋
|
||||
|
||||
#### For code changes:
|
||||
- [ ] I have clearly listed my changes in the PR description
|
||||
- [ ] I have made a test plan
|
||||
- [ ] I have tested my changes according to the test plan:
|
||||
<!-- Put your test plan here: -->
|
||||
- [ ] ...
|
||||
### Testing 🔍
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in autogpt_platform/
|
||||
|
||||
<details>
|
||||
<summary>Example test plan</summary>
|
||||
|
||||
- [ ] Create from scratch and execute an agent with at least 3 blocks
|
||||
- [ ] Import an agent from file upload, and confirm it executes correctly
|
||||
- [ ] Upload agent to marketplace
|
||||
- [ ] Import an agent from marketplace and confirm it executes correctly
|
||||
- [ ] Edit an agent from monitor, and confirm it executes correctly
|
||||
</details>
|
||||
<!--
|
||||
Please make sure your changes have been tested and are in good working condition.
|
||||
Here is a list of our critical paths, if you need some inspiration on what and how to test:
|
||||
-->
|
||||
|
||||
#### For configuration changes:
|
||||
- [ ] `.env.example` is updated or already compatible with my changes
|
||||
- [ ] `docker-compose.yml` is updated or already compatible with my changes
|
||||
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
|
||||
- Create from scratch and execute an agent with at least 3 blocks
|
||||
- Import an agent from file upload, and confirm it executes correctly
|
||||
- Upload agent to marketplace
|
||||
- Import an agent from marketplace and confirm it executes correctly
|
||||
- Edit an agent from monitor, and confirm it executes correctly
|
||||
|
||||
<details>
|
||||
<summary>Examples of configuration changes</summary>
|
||||
### Configuration Changes 📝
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in autogpt_platform/
|
||||
|
||||
- Changing ports
|
||||
- Adding new services that need to communicate with each other
|
||||
- Secrets or environment variable changes
|
||||
- New or infrastructure changes such as databases
|
||||
</details>
|
||||
If you're making configuration or infrastructure changes, please remember to check you've updated the related infrastructure code in the autogpt_platform/infra folder.
|
||||
|
||||
Examples of such changes might include:
|
||||
|
||||
- Changing ports
|
||||
- Adding new services that need to communicate with each other
|
||||
- Secrets or environment variable changes
|
||||
- New or infrastructure changes such as databases
|
||||
|
||||
@@ -5,7 +5,7 @@ on:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: dev
|
||||
BASE_BRANCH: development
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
@@ -15,46 +15,46 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
90
.github/workflows/classic-autogpt-docker-ci.yml
vendored
90
.github/workflows/classic-autogpt-docker-ci.yml
vendored
@@ -2,7 +2,7 @@ name: Classic - AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
branches: [ master, development ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -34,58 +34,58 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -121,12 +121,12 @@ jobs:
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
|
||||
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
@@ -2,7 +2,7 @@ name: Classic - AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, edited]
|
||||
types: [ published, edited ]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -19,69 +19,69 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
24
.github/workflows/classic-benchmark-ci.yml
vendored
24
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [forge]
|
||||
agent-name: [ forge ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
@@ -146,23 +146,23 @@ jobs:
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
# poetry run agbenchmark --mock
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
exit 1
|
||||
else
|
||||
echo "No unstaged changes."
|
||||
fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
|
||||
62
.github/workflows/classic-frontend-ci.yml
vendored
62
.github/workflows/classic-frontend-ci.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- dev
|
||||
- development
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
@@ -24,37 +24,37 @@ jobs:
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
|
||||
147
.github/workflows/platform-autgpt-deploy-prod.yml
vendored
147
.github/workflows/platform-autgpt-deploy-prod.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: AutoGPT Platform - Deploy Prod Environment
|
||||
name: AutoGPT Platform - Build, Push, and Deploy Prod Environment
|
||||
|
||||
on:
|
||||
release:
|
||||
@@ -8,6 +8,12 @@ permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
env:
|
||||
PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }}
|
||||
GKE_CLUSTER: prod-gke-cluster
|
||||
GKE_ZONE: us-central1-a
|
||||
NAMESPACE: prod-agpt
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: production
|
||||
@@ -42,14 +48,135 @@ jobs:
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
build-push-deploy:
|
||||
environment: production
|
||||
name: Build, Push, and Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_prod
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: 'projects/1021527134101/locations/global/workloadIdentityPools/prod-pool/providers/github'
|
||||
service_account: 'prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com'
|
||||
token_format: 'access_token'
|
||||
create_credentials_file: true
|
||||
|
||||
- name: 'Set up Cloud SDK'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'Configure Docker'
|
||||
run: |
|
||||
gcloud auth configure-docker us-east1-docker.pkg.dev
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Check for changes
|
||||
id: check_changes
|
||||
run: |
|
||||
git fetch origin master
|
||||
BACKEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false")
|
||||
FRONTEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false")
|
||||
MARKET_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false")
|
||||
echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT
|
||||
echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT
|
||||
echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get GKE credentials
|
||||
uses: 'google-github-actions/get-gke-credentials@v2'
|
||||
with:
|
||||
cluster_name: ${{ env.GKE_CLUSTER }}
|
||||
location: ${{ env.GKE_ZONE }}
|
||||
|
||||
- name: Build and Push Backend
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/backend/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-prod/agpt-backend-prod/agpt-backend-prod:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Build and Push Frontend
|
||||
if: steps.check_changes.outputs.frontend_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/frontend/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-prod/agpt-frontend-prod/agpt-frontend-prod:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Build and Push Market
|
||||
if: steps.check_changes.outputs.market_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/market/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-prod/agpt-market-prod/agpt-market-prod:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- name: Deploy Backend
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-server ./autogpt-server \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-server/values.yaml \
|
||||
-f autogpt-server/values.prod.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Websocket
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-websocket-server ./autogpt-websocket-server \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-websocket-server/values.yaml \
|
||||
-f autogpt-websocket-server/values.prod.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Market
|
||||
if: steps.check_changes.outputs.market_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-market ./autogpt-market \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-market/values.yaml \
|
||||
-f autogpt-market/values.prod.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Frontend
|
||||
if: steps.check_changes.outputs.frontend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-builder ./autogpt-builder \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-builder/values.yaml \
|
||||
-f autogpt-builder/values.prod.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
name: AutoGPT Platform - Deploy Dev Environment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/**'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
- name: Run Market Migrations
|
||||
working-directory: ./autogpt_platform/market
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_dev
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
186
.github/workflows/platform-autogpt-deploy.yaml
vendored
Normal file
186
.github/workflows/platform-autogpt-deploy.yaml
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
name: AutoGPT Platform - Build, Push, and Deploy Dev Environment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/backend/**'
|
||||
- 'autogpt_platform/frontend/**'
|
||||
- 'autogpt_platform/market/**'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
env:
|
||||
PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }}
|
||||
GKE_CLUSTER: dev-gke-cluster
|
||||
GKE_ZONE: us-central1-a
|
||||
NAMESPACE: dev-agpt
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
- name: Run Market Migrations
|
||||
working-directory: ./autogpt_platform/market
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
|
||||
|
||||
build-push-deploy:
|
||||
name: Build, Push, and Deploy
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: 'projects/638488734936/locations/global/workloadIdentityPools/dev-pool/providers/github'
|
||||
service_account: 'dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com'
|
||||
token_format: 'access_token'
|
||||
create_credentials_file: true
|
||||
|
||||
- name: 'Set up Cloud SDK'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'Configure Docker'
|
||||
run: |
|
||||
gcloud auth configure-docker us-east1-docker.pkg.dev
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Check for changes
|
||||
id: check_changes
|
||||
run: |
|
||||
git fetch origin dev
|
||||
BACKEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false")
|
||||
FRONTEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false")
|
||||
MARKET_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false")
|
||||
echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT
|
||||
echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT
|
||||
echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get GKE credentials
|
||||
uses: 'google-github-actions/get-gke-credentials@v2'
|
||||
with:
|
||||
cluster_name: ${{ env.GKE_CLUSTER }}
|
||||
location: ${{ env.GKE_ZONE }}
|
||||
|
||||
- name: Build and Push Backend
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/backend/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-dev/agpt-backend-dev/agpt-backend-dev:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Build and Push Frontend
|
||||
if: steps.check_changes.outputs.frontend_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/frontend/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-dev/agpt-frontend-dev/agpt-frontend-dev:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Build and Push Market
|
||||
if: steps.check_changes.outputs.market_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/market/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-dev/agpt-market-dev/agpt-market-dev:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- name: Deploy Backend
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-server ./autogpt-server \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-server/values.yaml \
|
||||
-f autogpt-server/values.dev.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Websocket
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-websocket-server ./autogpt-websocket-server \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-websocket-server/values.yaml \
|
||||
-f autogpt-websocket-server/values.dev.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Market
|
||||
if: steps.check_changes.outputs.market_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-market ./autogpt-market \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-market/values.yaml \
|
||||
-f autogpt-market/values.dev.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Frontend
|
||||
if: steps.check_changes.outputs.frontend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-builder ./autogpt-builder \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-builder/values.yaml \
|
||||
-f autogpt-builder/values.dev.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
Normal file
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: AutoGPT Platform - Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev ]
|
||||
paths:
|
||||
- '.github/workflows/platform-autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/platform-autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: TFLint
|
||||
uses: pauloconnor/tflint-action@v0.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tflint_path: terraform/
|
||||
tflint_recurse: true
|
||||
tflint_changed_only: false
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.14.4
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
4
.github/workflows/platform-frontend-ci.yml
vendored
4
.github/workflows/platform-frontend-ci.yml
vendored
@@ -69,10 +69,6 @@ jobs:
|
||||
run: |
|
||||
cp ../supabase/docker/.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.example ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml up -d
|
||||
|
||||
@@ -9,7 +9,7 @@ repos:
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
|
||||
- repo: https://github.com/Yelp/detect-secrets
|
||||
rev: v1.5.0
|
||||
hooks:
|
||||
@@ -19,117 +19,27 @@ repos:
|
||||
files: ^autogpt_platform/
|
||||
stages: [push]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, all dependencies need to be up-to-date.
|
||||
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
|
||||
hooks:
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Backend
|
||||
alias: poetry-install-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend install
|
||||
# include autogpt_libs source (since it's a path dependency)
|
||||
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Libs
|
||||
alias: poetry-install-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs install
|
||||
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, Prisma client must be up-to-date.
|
||||
hooks:
|
||||
- id: prisma-generate
|
||||
name: Prisma Generate - AutoGPT Platform - Backend
|
||||
alias: prisma-generate-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
|
||||
# include everything that triggers poetry install + the prisma schema
|
||||
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.7.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Backend
|
||||
alias: ruff-lint-platform-backend
|
||||
files: ^autogpt_platform/backend/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
args: [--fix]
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
hooks:
|
||||
- id: isort
|
||||
name: Lint (isort) - AutoGPT Platform - Backend
|
||||
alias: isort-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run isort -p backend
|
||||
files: ^autogpt_platform/backend/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run isort -p autogpt
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C classic/original_autogpt run isort
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -C classic/forge run isort -p forge
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C classic/forge run isort
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run isort -p agbenchmark
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C classic/benchmark run isort
|
||||
files: ^classic/benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
@@ -141,6 +51,7 @@ repos:
|
||||
hooks:
|
||||
- id: black
|
||||
name: Lint (Black)
|
||||
language_version: python3.12
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
@@ -148,20 +59,20 @@ repos:
|
||||
# them separately.
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
|
||||
@@ -170,52 +81,31 @@ repos:
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Backend
|
||||
alias: pyright-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run pyright
|
||||
args: [-p, autogpt_platform/backend, autogpt_platform/backend]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Libs
|
||||
alias: pyright-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs run pyright
|
||||
args: [-p, autogpt_platform/autogpt_libs, autogpt_platform/autogpt_libs]
|
||||
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
args: [-p, classic/original_autogpt, classic/original_autogpt]
|
||||
args: [-p, autogpt, autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
args: [-p, classic/forge, classic/forge]
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
args: [-p, forge, forge]
|
||||
files: ^classic/forge/(classic/forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
args: [-p, classic/benchmark, classic/benchmark]
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
@@ -223,35 +113,23 @@ repos:
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest
|
||||
name: Run tests - AutoGPT Platform - Backend
|
||||
alias: pytest-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
|
||||
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - AutoGPT (excl. slow tests)
|
||||
alias: pytest-classic-autogpt
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Forge (excl. slow tests)
|
||||
alias: pytest-classic-forge
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
|
||||
files: ^classic/forge/(classic/forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Benchmark
|
||||
alias: pytest-classic-benchmark
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
|
||||
67
.vscode/launch.json
vendored
67
.vscode/launch.json
vendored
@@ -1,67 +0,0 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Frontend: Server Side",
|
||||
"type": "node-terminal",
|
||||
"request": "launch",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"command": "yarn dev"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Client Side",
|
||||
"type": "msedge",
|
||||
"request": "launch",
|
||||
"url": "http://localhost:3000"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Full Stack",
|
||||
"type": "node-terminal",
|
||||
|
||||
"request": "launch",
|
||||
"command": "yarn dev",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"serverReadyAction": {
|
||||
"pattern": "- Local:.+(https?://.+)",
|
||||
"uriFormat": "%s",
|
||||
"action": "debugWithEdge"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Backend",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "backend.app",
|
||||
// "env": {
|
||||
// "ENV": "dev"
|
||||
// },
|
||||
"envFile": "${workspaceFolder}/backend/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "Marketplace",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "autogpt_platform.market.main",
|
||||
"env": {
|
||||
"ENV": "dev"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/market/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/market"
|
||||
}
|
||||
],
|
||||
"compounds": [
|
||||
{
|
||||
"name": "Everything",
|
||||
"configurations": ["Backend", "Frontend: Full Stack"],
|
||||
// "preLaunchTask": "${defaultBuildTask}",
|
||||
"stopAll": true,
|
||||
"presentation": {
|
||||
"hidden": false,
|
||||
"order": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
47
SECURITY.md
47
SECURITY.md
@@ -1,47 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
|
||||
|
||||
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
|
||||
|
||||
Instead, please report them via:
|
||||
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
|
||||
- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty
|
||||
|
||||
### Reporting Process
|
||||
1. **Submit Report**: Use one of the above channels to submit your report
|
||||
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
|
||||
3. **Collaboration**: We will collaborate with you to understand and validate the issue
|
||||
4. **Resolution**: We will work on a fix and coordinate the release process
|
||||
|
||||
### Disclosure Policy
|
||||
- Please provide detailed reports with reproducible steps
|
||||
- Include the version/commit hash where you discovered the vulnerability
|
||||
- Allow us a 90-day security fix window before any public disclosure
|
||||
- Share any potential mitigations or workarounds if known
|
||||
|
||||
## Supported Versions
|
||||
Only the following versions are eligible for security updates:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|-----------|
|
||||
| Latest release on master branch | ✅ |
|
||||
| Development commits (pre-master) | ✅ |
|
||||
| Classic folder (deprecated) | ❌ |
|
||||
| All other versions | ❌ |
|
||||
|
||||
## Security Best Practices
|
||||
When using this project:
|
||||
1. Always use the latest stable version
|
||||
2. Review security advisories before updating
|
||||
3. Follow our security documentation and guidelines
|
||||
4. Keep your dependencies up to date
|
||||
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
|
||||
|
||||
## Past Security Advisories
|
||||
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
|
||||
|
||||
---
|
||||
Last updated: November 2024
|
||||
@@ -1,31 +0,0 @@
|
||||
from typing import NamedTuple
|
||||
import secrets
|
||||
import hashlib
|
||||
|
||||
class APIKeyContainer(NamedTuple):
|
||||
"""Container for API key parts."""
|
||||
raw: str
|
||||
prefix: str
|
||||
postfix: str
|
||||
hash: str
|
||||
|
||||
class APIKeyManager:
|
||||
PREFIX: str = "agpt_"
|
||||
PREFIX_LENGTH: int = 8
|
||||
POSTFIX_LENGTH: int = 8
|
||||
|
||||
def generate_api_key(self) -> APIKeyContainer:
|
||||
"""Generate a new API key with all its parts."""
|
||||
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
|
||||
return APIKeyContainer(
|
||||
raw=raw_key,
|
||||
prefix=raw_key[:self.PREFIX_LENGTH],
|
||||
postfix=raw_key[-self.POSTFIX_LENGTH:],
|
||||
hash=hashlib.sha256(raw_key.encode()).hexdigest()
|
||||
)
|
||||
|
||||
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
|
||||
"""Verify if a provided API key matches the stored hash."""
|
||||
if not provided_key.startswith(self.PREFIX):
|
||||
return False
|
||||
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
||||
@@ -1,8 +1,7 @@
|
||||
import fastapi
|
||||
|
||||
from .middleware import auth_middleware
|
||||
from .models import User, DEFAULT_USER_ID, DEFAULT_EMAIL
|
||||
from .config import Settings
|
||||
from .models import User
|
||||
|
||||
|
||||
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
|
||||
@@ -17,12 +16,8 @@ def requires_admin_user(
|
||||
|
||||
def verify_user(payload: dict | None, admin_only: bool) -> User:
|
||||
if not payload:
|
||||
if Settings.ENABLE_AUTH:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=401, detail="Authorization header is missing"
|
||||
)
|
||||
# This handles the case when authentication is disabled
|
||||
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
|
||||
payload = {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "admin"}
|
||||
|
||||
user_id = payload.get("sub")
|
||||
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
DEFAULT_EMAIL = "default@example.com"
|
||||
|
||||
|
||||
# Using dataclass here to avoid adding dependency on pydantic
|
||||
@dataclass(frozen=True)
|
||||
|
||||
@@ -46,21 +46,21 @@ replicate_credentials = APIKeyCredentials(
|
||||
)
|
||||
openai_credentials = APIKeyCredentials(
|
||||
id="53c25cb8-e3ee-465c-a4d1-e75a4c899c2a",
|
||||
provider="openai",
|
||||
provider="llm",
|
||||
api_key=SecretStr(settings.secrets.openai_api_key),
|
||||
title="Use Credits for OpenAI",
|
||||
expires_at=None,
|
||||
)
|
||||
anthropic_credentials = APIKeyCredentials(
|
||||
id="24e5d942-d9e3-4798-8151-90143ee55629",
|
||||
provider="anthropic",
|
||||
provider="llm",
|
||||
api_key=SecretStr(settings.secrets.anthropic_api_key),
|
||||
title="Use Credits for Anthropic",
|
||||
expires_at=None,
|
||||
)
|
||||
groq_credentials = APIKeyCredentials(
|
||||
id="4ec22295-8f97-4dd1-b42b-2c6957a02545",
|
||||
provider="groq",
|
||||
provider="llm",
|
||||
api_key=SecretStr(settings.secrets.groq_api_key),
|
||||
title="Use Credits for Groq",
|
||||
expires_at=None,
|
||||
@@ -72,28 +72,6 @@ did_credentials = APIKeyCredentials(
|
||||
title="Use Credits for D-ID",
|
||||
expires_at=None,
|
||||
)
|
||||
jina_credentials = APIKeyCredentials(
|
||||
id="7f26de70-ba0d-494e-ba76-238e65e7b45f",
|
||||
provider="jina",
|
||||
api_key=SecretStr(settings.secrets.jina_api_key),
|
||||
title="Use Credits for Jina",
|
||||
expires_at=None,
|
||||
)
|
||||
unreal_credentials = APIKeyCredentials(
|
||||
id="66f20754-1b81-48e4-91d0-f4f0dd82145f",
|
||||
provider="unreal",
|
||||
api_key=SecretStr(settings.secrets.unreal_speech_api_key),
|
||||
title="Use Credits for Unreal",
|
||||
expires_at=None,
|
||||
)
|
||||
open_router_credentials = APIKeyCredentials(
|
||||
id="b5a0e27d-0c98-4df3-a4b9-10193e1f3c40",
|
||||
provider="open_router",
|
||||
api_key=SecretStr(settings.secrets.open_router_api_key),
|
||||
title="Use Credits for Open Router",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
revid_credentials,
|
||||
@@ -103,9 +81,6 @@ DEFAULT_CREDENTIALS = [
|
||||
anthropic_credentials,
|
||||
groq_credentials,
|
||||
did_credentials,
|
||||
jina_credentials,
|
||||
unreal_credentials,
|
||||
open_router_credentials,
|
||||
]
|
||||
|
||||
|
||||
@@ -149,12 +124,6 @@ class SupabaseIntegrationCredentialsStore:
|
||||
all_credentials.append(anthropic_credentials)
|
||||
if settings.secrets.did_api_key:
|
||||
all_credentials.append(did_credentials)
|
||||
if settings.secrets.jina_api_key:
|
||||
all_credentials.append(jina_credentials)
|
||||
if settings.secrets.unreal_speech_api_key:
|
||||
all_credentials.append(unreal_credentials)
|
||||
if settings.secrets.open_router_api_key:
|
||||
all_credentials.append(open_router_credentials)
|
||||
return all_credentials
|
||||
|
||||
def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None:
|
||||
|
||||
76
autogpt_platform/autogpt_libs/poetry.lock
generated
76
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -626,13 +626,13 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"]
|
||||
|
||||
[[package]]
|
||||
name = "gotrue"
|
||||
version = "2.10.0"
|
||||
version = "2.9.3"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "gotrue-2.10.0-py3-none-any.whl", hash = "sha256:768e58207488e5184ffbdc4351b7280d913daf97962f4e9f2cca05c80004b042"},
|
||||
{file = "gotrue-2.10.0.tar.gz", hash = "sha256:4edf4c251da3535f2b044e23deba221e848ca1210c17d0c7a9b19f79a1e3f3c0"},
|
||||
{file = "gotrue-2.9.3-py3-none-any.whl", hash = "sha256:9d2e9c74405d879f4828e0a7b94daf167a6e109c10ae6e5c59a0e21446f6e423"},
|
||||
{file = "gotrue-2.9.3.tar.gz", hash = "sha256:051551d80e642bdd2ab42cac78207745d89a2a08f429a1512d82624e675d8255"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -986,13 +986,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "0.18.0"
|
||||
version = "0.17.2"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "postgrest-0.18.0-py3-none-any.whl", hash = "sha256:200baad0d23fee986b3a0ffd3e07bfe0cdd40e09760f11e8e13a6c0c2376d5fa"},
|
||||
{file = "postgrest-0.18.0.tar.gz", hash = "sha256:29c1a94801a17eb9ad590189993fe5a7a6d8c1bfc11a3c9d0ce7ba146454ebb3"},
|
||||
{file = "postgrest-0.17.2-py3-none-any.whl", hash = "sha256:f7c4f448e5a5e2d4c1dcf192edae9d1007c4261e9a6fb5116783a0046846ece2"},
|
||||
{file = "postgrest-0.17.2.tar.gz", hash = "sha256:445cd4e4a191e279492549df0c4e827d32f9d01d0852599bb8a6efb0f07fcf78"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1209,13 +1209,13 @@ yaml = ["pyyaml (>=6.0.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyjwt"
|
||||
version = "2.10.0"
|
||||
version = "2.9.0"
|
||||
description = "JSON Web Token implementation in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "PyJWT-2.10.0-py3-none-any.whl", hash = "sha256:543b77207db656de204372350926bed5a86201c4cbff159f623f79c7bb487a15"},
|
||||
{file = "pyjwt-2.10.0.tar.gz", hash = "sha256:7628a7eb7938959ac1b26e819a1df0fd3259505627b575e4bad6d08f76db695c"},
|
||||
{file = "PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850"},
|
||||
{file = "pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -1322,33 +1322,6 @@ files = [
|
||||
[package.dependencies]
|
||||
pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.7.4"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.7.4-py3-none-linux_armv6l.whl", hash = "sha256:a4919925e7684a3f18e18243cd6bea7cfb8e968a6eaa8437971f681b7ec51478"},
|
||||
{file = "ruff-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfb365c135b830778dda8c04fb7d4280ed0b984e1aec27f574445231e20d6c63"},
|
||||
{file = "ruff-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:63a569b36bc66fbadec5beaa539dd81e0527cb258b94e29e0531ce41bacc1f20"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d06218747d361d06fd2fdac734e7fa92df36df93035db3dc2ad7aa9852cb109"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0cea28d0944f74ebc33e9f934238f15c758841f9f5edd180b5315c203293452"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80094ecd4793c68b2571b128f91754d60f692d64bc0d7272ec9197fdd09bf9ea"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:997512325c6620d1c4c2b15db49ef59543ef9cd0f4aa8065ec2ae5103cedc7e7"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00b4cf3a6b5fad6d1a66e7574d78956bbd09abfd6c8a997798f01f5da3d46a05"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7dbdc7d8274e1422722933d1edddfdc65b4336abf0b16dfcb9dedd6e6a517d06"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e92dfb5f00eaedb1501b2f906ccabfd67b2355bdf117fea9719fc99ac2145bc"},
|
||||
{file = "ruff-0.7.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3bd726099f277d735dc38900b6a8d6cf070f80828877941983a57bca1cd92172"},
|
||||
{file = "ruff-0.7.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2e32829c429dd081ee5ba39aef436603e5b22335c3d3fff013cd585806a6486a"},
|
||||
{file = "ruff-0.7.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:662a63b4971807623f6f90c1fb664613f67cc182dc4d991471c23c541fee62dd"},
|
||||
{file = "ruff-0.7.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:876f5e09eaae3eb76814c1d3b68879891d6fde4824c015d48e7a7da4cf066a3a"},
|
||||
{file = "ruff-0.7.4-py3-none-win32.whl", hash = "sha256:75c53f54904be42dd52a548728a5b572344b50d9b2873d13a3f8c5e3b91f5cac"},
|
||||
{file = "ruff-0.7.4-py3-none-win_amd64.whl", hash = "sha256:745775c7b39f914238ed1f1b0bebed0b9155a17cd8bc0b08d3c87e4703b990d6"},
|
||||
{file = "ruff-0.7.4-py3-none-win_arm64.whl", hash = "sha256:11bff065102c3ae9d3ea4dc9ecdfe5a5171349cdd0787c1fc64761212fc9cf1f"},
|
||||
{file = "ruff-0.7.4.tar.gz", hash = "sha256:cd12e35031f5af6b9b93715d8c4f40360070b2041f81273d0527683d5708fce2"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "six"
|
||||
version = "1.16.0"
|
||||
@@ -1373,18 +1346,19 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "0.9.0"
|
||||
version = "0.8.2"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "storage3-0.9.0-py3-none-any.whl", hash = "sha256:8b2fb91f0c61583a2f4eac74a8bae67e00d41ff38095c8a6cd3f2ce5e0ab76e7"},
|
||||
{file = "storage3-0.9.0.tar.gz", hash = "sha256:e16697f60894c94e1d9df0d2e4af783c1b3f7dd08c9013d61978825c624188c4"},
|
||||
{file = "storage3-0.8.2-py3-none-any.whl", hash = "sha256:f2e995b18c77a2a9265d1a33047d43e4d6abb11eb3ca5067959f68281c305de3"},
|
||||
{file = "storage3-0.8.2.tar.gz", hash = "sha256:db05d3fe8fb73bd30c814c4c4749664f37a5dfc78b629e8c058ef558c2b89f5a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
python-dateutil = ">=2.8.2,<3.0.0"
|
||||
typing-extensions = ">=4.2.0,<5.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "strenum"
|
||||
@@ -1404,32 +1378,32 @@ test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"]
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.10.0"
|
||||
version = "2.9.1"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "supabase-2.10.0-py3-none-any.whl", hash = "sha256:183fb23c04528593f8f81c24ceb8178f3a56bff40fec7ed873b6c55ebc2e420a"},
|
||||
{file = "supabase-2.10.0.tar.gz", hash = "sha256:9ac095f8947bf60780e67c0edcbab53e2db3f6f3f022329397b093500bf2607c"},
|
||||
{file = "supabase-2.9.1-py3-none-any.whl", hash = "sha256:a96f857a465712cb551679c1df66ba772c834f861756ce4aa2aa4cb703f6aeb7"},
|
||||
{file = "supabase-2.9.1.tar.gz", hash = "sha256:51fce39c9eb50573126dabb342541ec5e1f13e7476938768f4b0ccfdb8c522cd"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
gotrue = ">=2.10.0,<3.0.0"
|
||||
gotrue = ">=2.9.0,<3.0.0"
|
||||
httpx = ">=0.26,<0.28"
|
||||
postgrest = ">=0.18,<0.19"
|
||||
postgrest = ">=0.17.0,<0.18.0"
|
||||
realtime = ">=2.0.0,<3.0.0"
|
||||
storage3 = ">=0.9.0,<0.10.0"
|
||||
supafunc = ">=0.7.0,<0.8.0"
|
||||
storage3 = ">=0.8.0,<0.9.0"
|
||||
supafunc = ">=0.6.0,<0.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "supafunc"
|
||||
version = "0.7.0"
|
||||
version = "0.6.2"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "supafunc-0.7.0-py3-none-any.whl", hash = "sha256:4160260dc02bdd906be1e2ffd7cb3ae8b74ae437c892bb475352b6a99d9ff8eb"},
|
||||
{file = "supafunc-0.7.0.tar.gz", hash = "sha256:5b1c415fba1395740b2b4eedd1d786384bd58b98f6333a11ba7889820a48b6a7"},
|
||||
{file = "supafunc-0.6.2-py3-none-any.whl", hash = "sha256:101b30616b0a1ce8cf938eca1df362fa4cf1deacb0271f53ebbd674190fb0da5"},
|
||||
{file = "supafunc-0.6.2.tar.gz", hash = "sha256:c7dfa20db7182f7fe4ae436e94e05c06cd7ed98d697fed75d68c7b9792822adc"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1750,4 +1724,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "48184ad1281689c7743b8ca23135a647dc52257d54702d88b043fe31fe27ff27"
|
||||
content-hash = "f80654aae542b1f2f3a44a01f197f87ffbaea52f474dd2cc2b72b8d56b155563"
|
||||
|
||||
@@ -12,21 +12,14 @@ expiringdict = "^1.2.2"
|
||||
google-cloud-logging = "^3.11.3"
|
||||
pydantic = "^2.9.2"
|
||||
pydantic-settings = "^2.6.1"
|
||||
pyjwt = "^2.10.0"
|
||||
pyjwt = "^2.8.0"
|
||||
python = ">=3.10,<4.0"
|
||||
python-dotenv = "^1.0.1"
|
||||
supabase = "^2.10.0"
|
||||
supabase = "^2.9.1"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
redis = "^5.2.0"
|
||||
ruff = "^0.7.4"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
[tool.ruff.lint]
|
||||
extend-select = ["I"] # sort dependencies
|
||||
|
||||
@@ -57,7 +57,6 @@ GOOGLE_CLIENT_SECRET=
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
|
||||
# Reddit
|
||||
REDDIT_CLIENT_ID=
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.11.10-slim-bookworm AS builder
|
||||
FROM python:3.11-slim-buster AS builder
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
@@ -35,7 +35,7 @@ COPY autogpt_platform/backend/schema.prisma ./
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& poetry run prisma generate
|
||||
|
||||
FROM python:3.11.10-slim-bookworm AS server_dependencies
|
||||
FROM python:3.11-slim-buster AS server_dependencies
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
import logging
|
||||
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockInput,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
BlockType,
|
||||
get_block,
|
||||
)
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_executor_manager_client():
|
||||
from backend.executor import ExecutionManager
|
||||
from backend.util.service import get_service_client
|
||||
|
||||
return get_service_client(ExecutionManager)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_event_bus():
|
||||
from backend.data.queue import RedisExecutionEventBus
|
||||
|
||||
return RedisExecutionEventBus()
|
||||
|
||||
|
||||
class AgentExecutorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
user_id: str = SchemaField(description="User ID")
|
||||
graph_id: str = SchemaField(description="Graph ID")
|
||||
graph_version: int = SchemaField(description="Graph Version")
|
||||
|
||||
data: BlockInput = SchemaField(description="Input data for the graph")
|
||||
input_schema: dict = SchemaField(description="Input schema for the graph")
|
||||
output_schema: dict = SchemaField(description="Output schema for the graph")
|
||||
|
||||
class Output(BlockSchema):
|
||||
pass
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="e189baac-8c20-45a1-94a7-55177ea42565",
|
||||
description="Executes an existing agent inside your agent",
|
||||
input_schema=AgentExecutorBlock.Input,
|
||||
output_schema=AgentExecutorBlock.Output,
|
||||
block_type=BlockType.AGENT,
|
||||
categories={BlockCategory.AGENT},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
executor_manager = get_executor_manager_client()
|
||||
event_bus = get_event_bus()
|
||||
|
||||
graph_exec = executor_manager.add_execution(
|
||||
graph_id=input_data.graph_id,
|
||||
graph_version=input_data.graph_version,
|
||||
user_id=input_data.user_id,
|
||||
data=input_data.data,
|
||||
)
|
||||
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
|
||||
logger.info(f"Starting execution of {log_id}")
|
||||
|
||||
for event in event_bus.listen(
|
||||
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
|
||||
):
|
||||
logger.info(
|
||||
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
|
||||
)
|
||||
|
||||
if not event.node_id:
|
||||
if event.status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
|
||||
logger.info(f"Execution {log_id} ended with status {event.status}")
|
||||
break
|
||||
else:
|
||||
continue
|
||||
|
||||
if not event.block_id:
|
||||
logger.warning(f"{log_id} received event without block_id {event}")
|
||||
continue
|
||||
|
||||
block = get_block(event.block_id)
|
||||
if not block or block.block_type != BlockType.OUTPUT:
|
||||
continue
|
||||
|
||||
output_name = event.input_data.get("name")
|
||||
if not output_name:
|
||||
logger.warning(f"{log_id} produced an output with no name {event}")
|
||||
continue
|
||||
|
||||
for output_data in event.output_data.get("output", []):
|
||||
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
|
||||
yield output_name, output_data
|
||||
@@ -1,322 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
from replicate.helpers import FileOutput
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockSchema
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
|
||||
|
||||
class ImageSize(str, Enum):
|
||||
"""
|
||||
Semantic sizes that map reliably across all models
|
||||
"""
|
||||
|
||||
SQUARE = "square" # For profile pictures, icons, etc.
|
||||
LANDSCAPE = "landscape" # For traditional photos, scenes
|
||||
PORTRAIT = "portrait" # For vertical photos, portraits
|
||||
WIDE = "wide" # For cinematic, desktop wallpapers
|
||||
TALL = "tall" # For mobile wallpapers, stories
|
||||
|
||||
|
||||
# Mapping semantic sizes to model-specific formats
|
||||
SIZE_TO_SD_RATIO = {
|
||||
ImageSize.SQUARE: "1:1",
|
||||
ImageSize.LANDSCAPE: "4:3",
|
||||
ImageSize.PORTRAIT: "3:4",
|
||||
ImageSize.WIDE: "16:9",
|
||||
ImageSize.TALL: "9:16",
|
||||
}
|
||||
|
||||
SIZE_TO_FLUX_RATIO = {
|
||||
ImageSize.SQUARE: "1:1",
|
||||
ImageSize.LANDSCAPE: "4:3",
|
||||
ImageSize.PORTRAIT: "3:4",
|
||||
ImageSize.WIDE: "16:9",
|
||||
ImageSize.TALL: "9:16",
|
||||
}
|
||||
|
||||
SIZE_TO_FLUX_DIMENSIONS = {
|
||||
ImageSize.SQUARE: (1024, 1024),
|
||||
ImageSize.LANDSCAPE: (1365, 1024),
|
||||
ImageSize.PORTRAIT: (1024, 1365),
|
||||
ImageSize.WIDE: (1440, 810), # Adjusted to maintain 16:9 within 1440 limit
|
||||
ImageSize.TALL: (810, 1440), # Adjusted to maintain 9:16 within 1440 limit
|
||||
}
|
||||
|
||||
SIZE_TO_RECRAFT_DIMENSIONS = {
|
||||
ImageSize.SQUARE: "1024x1024",
|
||||
ImageSize.LANDSCAPE: "1365x1024",
|
||||
ImageSize.PORTRAIT: "1024x1365",
|
||||
ImageSize.WIDE: "1536x1024",
|
||||
ImageSize.TALL: "1024x1536",
|
||||
}
|
||||
|
||||
|
||||
class ImageStyle(str, Enum):
|
||||
"""
|
||||
Complete set of supported styles
|
||||
"""
|
||||
|
||||
ANY = "any"
|
||||
# Realistic image styles
|
||||
REALISTIC = "realistic_image"
|
||||
REALISTIC_BW = "realistic_image/b_and_w"
|
||||
REALISTIC_HDR = "realistic_image/hdr"
|
||||
REALISTIC_NATURAL = "realistic_image/natural_light"
|
||||
REALISTIC_STUDIO = "realistic_image/studio_portrait"
|
||||
REALISTIC_ENTERPRISE = "realistic_image/enterprise"
|
||||
REALISTIC_HARD_FLASH = "realistic_image/hard_flash"
|
||||
REALISTIC_MOTION_BLUR = "realistic_image/motion_blur"
|
||||
# Digital illustration styles
|
||||
DIGITAL_ART = "digital_illustration"
|
||||
PIXEL_ART = "digital_illustration/pixel_art"
|
||||
HAND_DRAWN = "digital_illustration/hand_drawn"
|
||||
GRAIN = "digital_illustration/grain"
|
||||
SKETCH = "digital_illustration/infantile_sketch"
|
||||
POSTER = "digital_illustration/2d_art_poster"
|
||||
POSTER_2 = "digital_illustration/2d_art_poster_2"
|
||||
HANDMADE_3D = "digital_illustration/handmade_3d"
|
||||
HAND_DRAWN_OUTLINE = "digital_illustration/hand_drawn_outline"
|
||||
ENGRAVING_COLOR = "digital_illustration/engraving_color"
|
||||
|
||||
|
||||
class ImageGenModel(str, Enum):
|
||||
"""
|
||||
Available model providers
|
||||
"""
|
||||
|
||||
FLUX = "Flux 1.1 Pro"
|
||||
FLUX_ULTRA = "Flux 1.1 Pro Ultra"
|
||||
RECRAFT = "Recraft v3"
|
||||
SD3_5 = "Stable Diffusion 3.5 Medium"
|
||||
|
||||
|
||||
class AIImageGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[Literal["replicate"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="replicate",
|
||||
supported_credential_types={"api_key"},
|
||||
description="Enter your Replicate API key to access the image generation API. You can obtain an API key from https://replicate.com/account/api-tokens.",
|
||||
)
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="Text prompt for image generation",
|
||||
placeholder="e.g., 'A red panda using a laptop in a snowy forest'",
|
||||
title="Prompt",
|
||||
)
|
||||
model: ImageGenModel = SchemaField(
|
||||
description="The AI model to use for image generation",
|
||||
default=ImageGenModel.SD3_5,
|
||||
title="Model",
|
||||
)
|
||||
size: ImageSize = SchemaField(
|
||||
description=(
|
||||
"Format of the generated image:\n"
|
||||
"- Square: Perfect for profile pictures, icons\n"
|
||||
"- Landscape: Traditional photo format\n"
|
||||
"- Portrait: Vertical photos, portraits\n"
|
||||
"- Wide: Cinematic format, desktop wallpapers\n"
|
||||
"- Tall: Mobile wallpapers, social media stories"
|
||||
),
|
||||
default=ImageSize.SQUARE,
|
||||
title="Image Format",
|
||||
)
|
||||
style: ImageStyle = SchemaField(
|
||||
description="Visual style for the generated image",
|
||||
default=ImageStyle.ANY,
|
||||
title="Image Style",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
image_url: str = SchemaField(description="URL of the generated image")
|
||||
error: str = SchemaField(description="Error message if generation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ed1ae7a0-b770-4089-b520-1f0005fad19a",
|
||||
description="Generate images using various AI models through a unified interface",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIImageGeneratorBlock.Input,
|
||||
output_schema=AIImageGeneratorBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"prompt": "An octopus using a laptop in a snowy forest with 'AutoGPT' clearly visible on the screen",
|
||||
"model": ImageGenModel.RECRAFT,
|
||||
"size": ImageSize.SQUARE,
|
||||
"style": ImageStyle.REALISTIC,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"image_url",
|
||||
"https://replicate.delivery/generated-image.webp",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_run_client": lambda *args, **kwargs: "https://replicate.delivery/generated-image.webp"
|
||||
},
|
||||
)
|
||||
|
||||
def _run_client(
|
||||
self, credentials: APIKeyCredentials, model_name: str, input_params: dict
|
||||
):
|
||||
try:
|
||||
# Initialize Replicate client
|
||||
client = replicate.Client(api_token=credentials.api_key.get_secret_value())
|
||||
|
||||
# Run the model with input parameters
|
||||
output = client.run(model_name, input=input_params, wait=False)
|
||||
|
||||
# Process output
|
||||
if isinstance(output, list) and len(output) > 0:
|
||||
if isinstance(output[0], FileOutput):
|
||||
result_url = output[0].url
|
||||
else:
|
||||
result_url = output[0]
|
||||
elif isinstance(output, FileOutput):
|
||||
result_url = output.url
|
||||
elif isinstance(output, str):
|
||||
result_url = output
|
||||
else:
|
||||
result_url = None
|
||||
|
||||
return result_url
|
||||
|
||||
except TypeError as e:
|
||||
raise TypeError(f"Error during model execution: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Unexpected error during model execution: {e}")
|
||||
|
||||
def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
|
||||
try:
|
||||
# Handle style-based prompt modification for models without native style support
|
||||
modified_prompt = input_data.prompt
|
||||
if input_data.model not in [ImageGenModel.RECRAFT]:
|
||||
style_prefix = self._style_to_prompt_prefix(input_data.style)
|
||||
modified_prompt = f"{style_prefix} {modified_prompt}".strip()
|
||||
|
||||
if input_data.model == ImageGenModel.SD3_5:
|
||||
# Use Stable Diffusion 3.5 with aspect ratio
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"aspect_ratio": SIZE_TO_SD_RATIO[input_data.size],
|
||||
"output_format": "webp",
|
||||
"output_quality": 90,
|
||||
"steps": 40,
|
||||
"cfg_scale": 7.0,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials,
|
||||
"stability-ai/stable-diffusion-3.5-medium",
|
||||
input_params,
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.FLUX:
|
||||
# Use Flux-specific dimensions with 'jpg' format to avoid ReplicateError
|
||||
width, height = SIZE_TO_FLUX_DIMENSIONS[input_data.size]
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"aspect_ratio": SIZE_TO_FLUX_RATIO[input_data.size],
|
||||
"output_format": "jpg", # Set to jpg for Flux models
|
||||
"output_quality": 90,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials, "black-forest-labs/flux-1.1-pro", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.FLUX_ULTRA:
|
||||
width, height = SIZE_TO_FLUX_DIMENSIONS[input_data.size]
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"aspect_ratio": SIZE_TO_FLUX_RATIO[input_data.size],
|
||||
"output_format": "jpg",
|
||||
"output_quality": 90,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials, "black-forest-labs/flux-1.1-pro-ultra", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.RECRAFT:
|
||||
input_params = {
|
||||
"prompt": input_data.prompt,
|
||||
"size": SIZE_TO_RECRAFT_DIMENSIONS[input_data.size],
|
||||
"style": input_data.style.value,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials, "recraft-ai/recraft-v3", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to generate image: {str(e)}")
|
||||
|
||||
def _style_to_prompt_prefix(self, style: ImageStyle) -> str:
|
||||
"""
|
||||
Convert a style enum to a prompt prefix for models without native style support.
|
||||
"""
|
||||
if style == ImageStyle.ANY:
|
||||
return ""
|
||||
|
||||
style_map = {
|
||||
ImageStyle.REALISTIC: "photorealistic",
|
||||
ImageStyle.REALISTIC_BW: "black and white photograph",
|
||||
ImageStyle.REALISTIC_HDR: "HDR photograph",
|
||||
ImageStyle.REALISTIC_NATURAL: "natural light photograph",
|
||||
ImageStyle.REALISTIC_STUDIO: "studio portrait photograph",
|
||||
ImageStyle.REALISTIC_ENTERPRISE: "enterprise photograph",
|
||||
ImageStyle.REALISTIC_HARD_FLASH: "hard flash photograph",
|
||||
ImageStyle.REALISTIC_MOTION_BLUR: "motion blur photograph",
|
||||
ImageStyle.DIGITAL_ART: "digital art",
|
||||
ImageStyle.PIXEL_ART: "pixel art",
|
||||
ImageStyle.HAND_DRAWN: "hand drawn illustration",
|
||||
ImageStyle.GRAIN: "grainy digital illustration",
|
||||
ImageStyle.SKETCH: "sketchy illustration",
|
||||
ImageStyle.POSTER: "2D art poster",
|
||||
ImageStyle.POSTER_2: "alternate 2D art poster",
|
||||
ImageStyle.HANDMADE_3D: "handmade 3D illustration",
|
||||
ImageStyle.HAND_DRAWN_OUTLINE: "hand drawn outline illustration",
|
||||
ImageStyle.ENGRAVING_COLOR: "color engraving illustration",
|
||||
}
|
||||
|
||||
style_text = style_map.get(style, "")
|
||||
return f"{style_text} of" if style_text else ""
|
||||
|
||||
def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
|
||||
try:
|
||||
url = self.generate_image(input_data, credentials)
|
||||
if url:
|
||||
yield "image_url", url
|
||||
else:
|
||||
yield "error", "Image generation returned an empty result."
|
||||
except Exception as e:
|
||||
# Capture and return only the message of the exception, avoiding serialization of non-serializable objects
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
# Test credentials stay the same
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="replicate",
|
||||
api_key=SecretStr("mock-replicate-api-key"),
|
||||
title="Mock Replicate API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
@@ -1,224 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="replicate",
|
||||
api_key=SecretStr("mock-replicate-api-key"),
|
||||
title="Mock Replicate API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
# Model version enum
|
||||
class MusicGenModelVersion(str, Enum):
|
||||
STEREO_LARGE = "stereo-large"
|
||||
MELODY_LARGE = "melody-large"
|
||||
LARGE = "large"
|
||||
|
||||
|
||||
# Audio format enum
|
||||
class AudioFormat(str, Enum):
|
||||
WAV = "wav"
|
||||
MP3 = "mp3"
|
||||
|
||||
|
||||
# Normalization strategy enum
|
||||
class NormalizationStrategy(str, Enum):
|
||||
LOUDNESS = "loudness"
|
||||
CLIP = "clip"
|
||||
PEAK = "peak"
|
||||
RMS = "rms"
|
||||
|
||||
|
||||
class AIMusicGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[Literal["replicate"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="replicate",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Replicate integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="A description of the music you want to generate",
|
||||
placeholder="e.g., 'An upbeat electronic dance track with heavy bass'",
|
||||
title="Prompt",
|
||||
)
|
||||
music_gen_model_version: MusicGenModelVersion = SchemaField(
|
||||
description="Model to use for generation",
|
||||
default=MusicGenModelVersion.STEREO_LARGE,
|
||||
title="Model Version",
|
||||
)
|
||||
duration: int = SchemaField(
|
||||
description="Duration of the generated audio in seconds",
|
||||
default=8,
|
||||
title="Duration",
|
||||
)
|
||||
temperature: float = SchemaField(
|
||||
description="Controls the 'conservativeness' of the sampling process. Higher temperature means more diversity",
|
||||
default=1.0,
|
||||
title="Temperature",
|
||||
)
|
||||
top_k: int = SchemaField(
|
||||
description="Reduces sampling to the k most likely tokens",
|
||||
default=250,
|
||||
title="Top K",
|
||||
)
|
||||
top_p: float = SchemaField(
|
||||
description="Reduces sampling to tokens with cumulative probability of p. When set to 0 (default), top_k sampling is used",
|
||||
default=0.0,
|
||||
title="Top P",
|
||||
)
|
||||
classifier_free_guidance: int = SchemaField(
|
||||
description="Increases the influence of inputs on the output. Higher values produce lower-variance outputs that adhere more closely to inputs",
|
||||
default=3,
|
||||
title="Classifier Free Guidance",
|
||||
)
|
||||
output_format: AudioFormat = SchemaField(
|
||||
description="Output format for generated audio",
|
||||
default=AudioFormat.WAV,
|
||||
title="Output Format",
|
||||
)
|
||||
normalization_strategy: NormalizationStrategy = SchemaField(
|
||||
description="Strategy for normalizing audio",
|
||||
default=NormalizationStrategy.LOUDNESS,
|
||||
title="Normalization Strategy",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: str = SchemaField(description="URL of the generated audio file")
|
||||
error: str = SchemaField(description="Error message if the model run failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="44f6c8ad-d75c-4ae1-8209-aad1c0326928",
|
||||
description="This block generates music using Meta's MusicGen model on Replicate.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIMusicGeneratorBlock.Input,
|
||||
output_schema=AIMusicGeneratorBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"prompt": "An upbeat electronic dance track with heavy bass",
|
||||
"music_gen_model_version": MusicGenModelVersion.STEREO_LARGE,
|
||||
"duration": 8,
|
||||
"temperature": 1.0,
|
||||
"top_k": 250,
|
||||
"top_p": 0.0,
|
||||
"classifier_free_guidance": 3,
|
||||
"output_format": AudioFormat.WAV,
|
||||
"normalization_strategy": NormalizationStrategy.LOUDNESS,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
"https://replicate.com/output/generated-audio-url.wav",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"run_model": lambda api_key, music_gen_model_version, prompt, duration, temperature, top_k, top_p, classifier_free_guidance, output_format, normalization_strategy: "https://replicate.com/output/generated-audio-url.wav",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
max_retries = 3
|
||||
retry_delay = 5 # seconds
|
||||
last_error = None
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
logger.debug(
|
||||
f"[AIMusicGeneratorBlock] - Running model (attempt {attempt + 1})"
|
||||
)
|
||||
result = self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
music_gen_model_version=input_data.music_gen_model_version,
|
||||
prompt=input_data.prompt,
|
||||
duration=input_data.duration,
|
||||
temperature=input_data.temperature,
|
||||
top_k=input_data.top_k,
|
||||
top_p=input_data.top_p,
|
||||
classifier_free_guidance=input_data.classifier_free_guidance,
|
||||
output_format=input_data.output_format,
|
||||
normalization_strategy=input_data.normalization_strategy,
|
||||
)
|
||||
if result and result != "No output received":
|
||||
yield "result", result
|
||||
return
|
||||
else:
|
||||
last_error = "Model returned empty or invalid response"
|
||||
raise ValueError(last_error)
|
||||
except Exception as e:
|
||||
last_error = f"Unexpected error: {str(e)}"
|
||||
logger.error(f"[AIMusicGeneratorBlock] - Error: {last_error}")
|
||||
if attempt < max_retries - 1:
|
||||
time.sleep(retry_delay)
|
||||
continue
|
||||
|
||||
# If we've exhausted all retries, yield the error
|
||||
yield "error", f"Failed after {max_retries} attempts. Last error: {last_error}"
|
||||
|
||||
def run_model(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
music_gen_model_version: MusicGenModelVersion,
|
||||
prompt: str,
|
||||
duration: int,
|
||||
temperature: float,
|
||||
top_k: int,
|
||||
top_p: float,
|
||||
classifier_free_guidance: int,
|
||||
output_format: AudioFormat,
|
||||
normalization_strategy: NormalizationStrategy,
|
||||
):
|
||||
# Initialize Replicate client with the API key
|
||||
client = replicate.Client(api_token=api_key.get_secret_value())
|
||||
|
||||
# Run the model with parameters
|
||||
output = client.run(
|
||||
"meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb",
|
||||
input={
|
||||
"prompt": prompt,
|
||||
"music_gen_model_version": music_gen_model_version,
|
||||
"duration": duration,
|
||||
"temperature": temperature,
|
||||
"top_k": top_k,
|
||||
"top_p": top_p,
|
||||
"classifier_free_guidance": classifier_free_guidance,
|
||||
"output_format": output_format,
|
||||
"normalization_strategy": normalization_strategy,
|
||||
},
|
||||
)
|
||||
|
||||
# Handle the output
|
||||
if isinstance(output, list) and len(output) > 0:
|
||||
result_url = output[0] # If output is a list, get the first element
|
||||
elif isinstance(output, str):
|
||||
result_url = output # If output is a string, use it directly
|
||||
else:
|
||||
result_url = (
|
||||
"No output received" # Fallback message if output is not as expected
|
||||
)
|
||||
|
||||
return result_url
|
||||
@@ -3,12 +3,12 @@ import time
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -217,6 +217,7 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
url = "https://webhook.site/token"
|
||||
headers = {"Accept": "application/json", "Content-Type": "application/json"}
|
||||
response = requests.post(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
webhook_data = response.json()
|
||||
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
|
||||
|
||||
@@ -227,12 +228,14 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
logger.debug(
|
||||
f"API Response Status Code: {response.status_code}, Content: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
|
||||
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
|
||||
headers = {"key": api_key.get_secret_value()}
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def wait_for_video(
|
||||
|
||||
@@ -233,9 +233,7 @@ class AgentOutputBlock(Block):
|
||||
)
|
||||
name: str = SchemaField(description="The name of the output.")
|
||||
title: str | None = SchemaField(
|
||||
description="The title of the output.",
|
||||
default=None,
|
||||
advanced=True,
|
||||
description="The title of the input.", default=None, advanced=True
|
||||
)
|
||||
description: str | None = SchemaField(
|
||||
description="The description of the output.",
|
||||
@@ -264,7 +262,7 @@ class AgentOutputBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
description="Stores the output of the graph for users to see.",
|
||||
description=("Stores the output of the graph for users to see."),
|
||||
input_schema=AgentOutputBlock.Input,
|
||||
output_schema=AgentOutputBlock.Output,
|
||||
test_input=[
|
||||
@@ -315,26 +313,16 @@ class AgentOutputBlock(Block):
|
||||
|
||||
class AddToDictionaryBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
dictionary: dict[Any, Any] = SchemaField(
|
||||
default={},
|
||||
dictionary: dict | None = SchemaField(
|
||||
default=None,
|
||||
description="The dictionary to add the entry to. If not provided, a new dictionary will be created.",
|
||||
placeholder='{"key1": "value1", "key2": "value2"}',
|
||||
)
|
||||
key: str = SchemaField(
|
||||
default="",
|
||||
description="The key for the new entry.",
|
||||
placeholder="new_key",
|
||||
advanced=False,
|
||||
description="The key for the new entry.", placeholder="new_key"
|
||||
)
|
||||
value: Any = SchemaField(
|
||||
default=None,
|
||||
description="The value for the new entry.",
|
||||
placeholder="new_value",
|
||||
advanced=False,
|
||||
)
|
||||
entries: dict[Any, Any] = SchemaField(
|
||||
default={},
|
||||
description="The entries to add to the dictionary. This is the batch version of the `key` and `value` fields.",
|
||||
advanced=True,
|
||||
description="The value for the new entry.", placeholder="new_value"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -357,10 +345,6 @@ class AddToDictionaryBlock(Block):
|
||||
"value": "new_value",
|
||||
},
|
||||
{"key": "first_key", "value": "first_value"},
|
||||
{
|
||||
"dictionary": {"existing_key": "existing_value"},
|
||||
"entries": {"new_key": "new_value", "first_key": "first_value"},
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
@@ -368,49 +352,38 @@ class AddToDictionaryBlock(Block):
|
||||
{"existing_key": "existing_value", "new_key": "new_value"},
|
||||
),
|
||||
("updated_dictionary", {"first_key": "first_value"}),
|
||||
(
|
||||
"updated_dictionary",
|
||||
{
|
||||
"existing_key": "existing_value",
|
||||
"new_key": "new_value",
|
||||
"first_key": "first_value",
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
updated_dict = input_data.dictionary.copy()
|
||||
# If no dictionary is provided, create a new one
|
||||
if input_data.dictionary is None:
|
||||
updated_dict = {}
|
||||
else:
|
||||
# Create a copy of the input dictionary to avoid modifying the original
|
||||
updated_dict = input_data.dictionary.copy()
|
||||
|
||||
if input_data.value is not None and input_data.key:
|
||||
updated_dict[input_data.key] = input_data.value
|
||||
|
||||
for key, value in input_data.entries.items():
|
||||
updated_dict[key] = value
|
||||
# Add the new key-value pair
|
||||
updated_dict[input_data.key] = input_data.value
|
||||
|
||||
yield "updated_dictionary", updated_dict
|
||||
|
||||
|
||||
class AddToListBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
list: List[Any] = SchemaField(
|
||||
default=[],
|
||||
advanced=False,
|
||||
list: List[Any] | None = SchemaField(
|
||||
default=None,
|
||||
description="The list to add the entry to. If not provided, a new list will be created.",
|
||||
placeholder='[1, "string", {"key": "value"}]',
|
||||
)
|
||||
entry: Any = SchemaField(
|
||||
description="The entry to add to the list. Can be of any type (string, int, dict, etc.).",
|
||||
advanced=False,
|
||||
default=None,
|
||||
)
|
||||
entries: List[Any] = SchemaField(
|
||||
default=[],
|
||||
description="The entries to add to the list. This is the batch version of the `entry` field.",
|
||||
advanced=True,
|
||||
placeholder='{"new_key": "new_value"}',
|
||||
)
|
||||
position: int | None = SchemaField(
|
||||
default=None,
|
||||
description="The position to insert the new entry. If not provided, the entry will be appended to the end of the list.",
|
||||
placeholder="0",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -434,12 +407,6 @@ class AddToListBlock(Block):
|
||||
},
|
||||
{"entry": "first_entry"},
|
||||
{"list": ["a", "b", "c"], "entry": "d"},
|
||||
{
|
||||
"entry": "e",
|
||||
"entries": ["f", "g"],
|
||||
"list": ["a", "b"],
|
||||
"position": 1,
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
@@ -453,20 +420,22 @@ class AddToListBlock(Block):
|
||||
),
|
||||
("updated_list", ["first_entry"]),
|
||||
("updated_list", ["a", "b", "c", "d"]),
|
||||
("updated_list", ["a", "f", "g", "e", "b"]),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
entries_added = input_data.entries.copy()
|
||||
if input_data.entry:
|
||||
entries_added.append(input_data.entry)
|
||||
|
||||
updated_list = input_data.list.copy()
|
||||
if (pos := input_data.position) is not None:
|
||||
updated_list = updated_list[:pos] + entries_added + updated_list[pos:]
|
||||
# If no list is provided, create a new one
|
||||
if input_data.list is None:
|
||||
updated_list = []
|
||||
else:
|
||||
updated_list += entries_added
|
||||
# Create a copy of the input list to avoid modifying the original
|
||||
updated_list = input_data.list.copy()
|
||||
|
||||
# Add the new entry
|
||||
if input_data.position is None:
|
||||
updated_list.append(input_data.entry)
|
||||
else:
|
||||
updated_list.insert(input_data.position, input_data.entry)
|
||||
|
||||
yield "updated_list", updated_list
|
||||
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from backend.blocks.github._auth import GithubCredentials
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
def _convert_to_api_url(url: str) -> str:
|
||||
"""
|
||||
Converts a standard GitHub URL to the corresponding GitHub API URL.
|
||||
Handles repository URLs, issue URLs, pull request URLs, and more.
|
||||
"""
|
||||
parsed_url = urlparse(url)
|
||||
path_parts = parsed_url.path.strip("/").split("/")
|
||||
|
||||
if len(path_parts) >= 2:
|
||||
owner, repo = path_parts[0], path_parts[1]
|
||||
api_base = f"https://api.github.com/repos/{owner}/{repo}"
|
||||
|
||||
if len(path_parts) > 2:
|
||||
additional_path = "/".join(path_parts[2:])
|
||||
api_url = f"{api_base}/{additional_path}"
|
||||
else:
|
||||
# Repository base URL
|
||||
api_url = api_base
|
||||
else:
|
||||
raise ValueError("Invalid GitHub URL format.")
|
||||
|
||||
return api_url
|
||||
|
||||
|
||||
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
|
||||
return {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
|
||||
def get_api(credentials: GithubCredentials) -> Requests:
|
||||
return Requests(
|
||||
trusted_origins=["https://api.github.com", "https://github.com"],
|
||||
extra_url_validator=_convert_to_api_url,
|
||||
extra_headers=_get_headers(credentials),
|
||||
)
|
||||
@@ -1,11 +1,9 @@
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -15,10 +13,6 @@ from ._auth import (
|
||||
)
|
||||
|
||||
|
||||
def is_github_url(url: str) -> bool:
|
||||
return urlparse(url).netloc == "github.com"
|
||||
|
||||
|
||||
# --8<-- [start:GithubCommentBlockExample]
|
||||
class GithubCommentBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
@@ -68,10 +62,27 @@ class GithubCommentBlock(Block):
|
||||
def post_comment(
|
||||
credentials: GithubCredentials, issue_url: str, body_text: str
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials)
|
||||
if "/pull/" in issue_url:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
+ "/comments"
|
||||
)
|
||||
else:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos") + "/comments"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"body": body_text}
|
||||
comments_url = issue_url + "/comments"
|
||||
response = api.post(comments_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
comment = response.json()
|
||||
return comment["id"], comment["html_url"]
|
||||
|
||||
@@ -145,10 +156,16 @@ class GithubMakeIssueBlock(Block):
|
||||
def create_issue(
|
||||
credentials: GithubCredentials, repo_url: str, title: str, body: str
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials)
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/issues"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"title": title, "body": body}
|
||||
issues_url = repo_url + "/issues"
|
||||
response = api.post(issues_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
issue = response.json()
|
||||
return issue["number"], issue["html_url"]
|
||||
|
||||
@@ -215,12 +232,21 @@ class GithubReadIssueBlock(Block):
|
||||
def read_issue(
|
||||
credentials: GithubCredentials, issue_url: str
|
||||
) -> tuple[str, str, str]:
|
||||
api = get_api(credentials)
|
||||
response = api.get(issue_url)
|
||||
api_url = issue_url.replace("github.com", "api.github.com/repos")
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
title = data.get("title", "No title found")
|
||||
body = data.get("body", "No body content found")
|
||||
user = data.get("user", {}).get("login", "No user found")
|
||||
|
||||
return title, body, user
|
||||
|
||||
def run(
|
||||
@@ -292,13 +318,20 @@ class GithubListIssuesBlock(Block):
|
||||
def list_issues(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.IssueItem]:
|
||||
api = get_api(credentials)
|
||||
issues_url = repo_url + "/issues"
|
||||
response = api.get(issues_url)
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/issues"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
issues: list[GithubListIssuesBlock.Output.IssueItem] = [
|
||||
{"title": issue["title"], "url": issue["html_url"]} for issue in data
|
||||
]
|
||||
|
||||
return issues
|
||||
|
||||
def run(
|
||||
@@ -352,10 +385,28 @@ class GithubAddLabelBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def add_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
|
||||
api = get_api(credentials)
|
||||
# Convert the provided GitHub URL to the API URL
|
||||
if "/pull/" in issue_url:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
+ "/labels"
|
||||
)
|
||||
else:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos") + "/labels"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"labels": [label]}
|
||||
labels_url = issue_url + "/labels"
|
||||
api.post(labels_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Label added successfully"
|
||||
|
||||
def run(
|
||||
@@ -412,9 +463,31 @@ class GithubRemoveLabelBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def remove_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
|
||||
api = get_api(credentials)
|
||||
label_url = issue_url + f"/labels/{label}"
|
||||
api.delete(label_url)
|
||||
# Convert the provided GitHub URL to the API URL
|
||||
if "/pull/" in issue_url:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
+ f"/labels/{label}"
|
||||
)
|
||||
else:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos")
|
||||
+ f"/labels/{label}"
|
||||
)
|
||||
|
||||
# Log the constructed API URL for debugging
|
||||
print(f"Constructed API URL: {api_url}")
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.delete(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Label removed successfully"
|
||||
|
||||
def run(
|
||||
@@ -477,10 +550,23 @@ class GithubAssignIssueBlock(Block):
|
||||
issue_url: str,
|
||||
assignee: str,
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
assignees_url = issue_url + "/assignees"
|
||||
# Extracting repo path and issue number from the issue URL
|
||||
repo_path, issue_number = issue_url.replace("https://github.com/", "").split(
|
||||
"/issues/"
|
||||
)
|
||||
api_url = (
|
||||
f"https://api.github.com/repos/{repo_path}/issues/{issue_number}/assignees"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"assignees": [assignee]}
|
||||
api.post(assignees_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Issue assigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -543,10 +629,23 @@ class GithubUnassignIssueBlock(Block):
|
||||
issue_url: str,
|
||||
assignee: str,
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
assignees_url = issue_url + "/assignees"
|
||||
# Extracting repo path and issue number from the issue URL
|
||||
repo_path, issue_number = issue_url.replace("https://github.com/", "").split(
|
||||
"/issues/"
|
||||
)
|
||||
api_url = (
|
||||
f"https://api.github.com/repos/{repo_path}/issues/{issue_number}/assignees"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"assignees": [assignee]}
|
||||
api.delete(assignees_url, json=data)
|
||||
|
||||
response = requests.delete(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Issue unassigned successfully"
|
||||
|
||||
def run(
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import requests
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -64,13 +64,20 @@ class GithubListPullRequestsBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def list_prs(credentials: GithubCredentials, repo_url: str) -> list[Output.PRItem]:
|
||||
api = get_api(credentials)
|
||||
pulls_url = repo_url + "/pulls"
|
||||
response = api.get(pulls_url)
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/pulls"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
pull_requests: list[GithubListPullRequestsBlock.Output.PRItem] = [
|
||||
{"title": pr["title"], "url": pr["html_url"]} for pr in data
|
||||
]
|
||||
|
||||
return pull_requests
|
||||
|
||||
def run(
|
||||
@@ -103,11 +110,7 @@ class GithubMakePullRequestBlock(Block):
|
||||
placeholder="Enter the pull request body",
|
||||
)
|
||||
head: str = SchemaField(
|
||||
description=(
|
||||
"The name of the branch where your changes are implemented. "
|
||||
"For cross-repository pull requests in the same network, "
|
||||
"namespace head with a user like this: username:branch."
|
||||
),
|
||||
description="The name of the branch where your changes are implemented. For cross-repository pull requests in the same network, namespace head with a user like this: username:branch.",
|
||||
placeholder="Enter the head branch",
|
||||
)
|
||||
base: str = SchemaField(
|
||||
@@ -159,10 +162,17 @@ class GithubMakePullRequestBlock(Block):
|
||||
head: str,
|
||||
base: str,
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials)
|
||||
pulls_url = repo_url + "/pulls"
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/pulls"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"title": title, "body": body, "head": head, "base": base}
|
||||
response = api.post(pulls_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
pr_data = response.json()
|
||||
return pr_data["number"], pr_data["html_url"]
|
||||
|
||||
@@ -184,8 +194,13 @@ class GithubMakePullRequestBlock(Block):
|
||||
)
|
||||
yield "number", number
|
||||
yield "url", url
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
if http_err.response.status_code == 422:
|
||||
error_details = http_err.response.json()
|
||||
error_message = error_details.get("message", "Unknown error")
|
||||
else:
|
||||
error_message = str(http_err)
|
||||
raise RuntimeError(f"Failed to create pull request: {error_message}")
|
||||
|
||||
|
||||
class GithubReadPullRequestBlock(Block):
|
||||
@@ -240,21 +255,42 @@ class GithubReadPullRequestBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def read_pr(credentials: GithubCredentials, pr_url: str) -> tuple[str, str, str]:
|
||||
api = get_api(credentials)
|
||||
# Adjust the URL to access the issue endpoint for PR metadata
|
||||
issue_url = pr_url.replace("/pull/", "/issues/")
|
||||
response = api.get(issue_url)
|
||||
api_url = pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
title = data.get("title", "No title found")
|
||||
body = data.get("body", "No body content found")
|
||||
author = data.get("user", {}).get("login", "No user found")
|
||||
|
||||
return title, body, author
|
||||
|
||||
@staticmethod
|
||||
def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str:
|
||||
api = get_api(credentials)
|
||||
files_url = pr_url + "/files"
|
||||
response = api.get(files_url)
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/files"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
files = response.json()
|
||||
changes = []
|
||||
for file in files:
|
||||
@@ -262,6 +298,7 @@ class GithubReadPullRequestBlock(Block):
|
||||
patch = file.get("patch")
|
||||
if filename and patch:
|
||||
changes.append(f"File: {filename}\n{patch}")
|
||||
|
||||
return "\n\n".join(changes)
|
||||
|
||||
def run(
|
||||
@@ -330,10 +367,23 @@ class GithubAssignPRReviewerBlock(Block):
|
||||
def assign_reviewer(
|
||||
credentials: GithubCredentials, pr_url: str, reviewer: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
reviewers_url = pr_url + "/requested_reviewers"
|
||||
# Convert the PR URL to the appropriate API endpoint
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/requested_reviewers"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"reviewers": [reviewer]}
|
||||
api.post(reviewers_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Reviewer assigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -350,8 +400,17 @@ class GithubAssignPRReviewerBlock(Block):
|
||||
input_data.reviewer,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
if http_err.response.status_code == 422:
|
||||
error_msg = (
|
||||
"Failed to assign reviewer: "
|
||||
f"The reviewer '{input_data.reviewer}' may not have permission "
|
||||
"or the pull request is not in a valid state. "
|
||||
f"Detailed error: {http_err.response.text}"
|
||||
)
|
||||
else:
|
||||
error_msg = f"HTTP error: {http_err} - {http_err.response.text}"
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
|
||||
class GithubUnassignPRReviewerBlock(Block):
|
||||
@@ -397,10 +456,21 @@ class GithubUnassignPRReviewerBlock(Block):
|
||||
def unassign_reviewer(
|
||||
credentials: GithubCredentials, pr_url: str, reviewer: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
reviewers_url = pr_url + "/requested_reviewers"
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/requested_reviewers"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"reviewers": [reviewer]}
|
||||
api.delete(reviewers_url, json=data)
|
||||
|
||||
response = requests.delete(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Reviewer unassigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -410,15 +480,12 @@ class GithubUnassignPRReviewerBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.unassign_reviewer(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
input_data.reviewer,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
status = self.unassign_reviewer(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
input_data.reviewer,
|
||||
)
|
||||
yield "status", status
|
||||
|
||||
|
||||
class GithubListPRReviewersBlock(Block):
|
||||
@@ -477,14 +544,26 @@ class GithubListPRReviewersBlock(Block):
|
||||
def list_reviewers(
|
||||
credentials: GithubCredentials, pr_url: str
|
||||
) -> list[Output.ReviewerItem]:
|
||||
api = get_api(credentials)
|
||||
reviewers_url = pr_url + "/requested_reviewers"
|
||||
response = api.get(reviewers_url)
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/requested_reviewers"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
reviewers: list[GithubListPRReviewersBlock.Output.ReviewerItem] = [
|
||||
{"username": reviewer["login"], "url": reviewer["html_url"]}
|
||||
for reviewer in data.get("users", [])
|
||||
]
|
||||
|
||||
return reviewers
|
||||
|
||||
def run(
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import base64
|
||||
|
||||
import requests
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -68,11 +68,17 @@ class GithubListTagsBlock(Block):
|
||||
def list_tags(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.TagItem]:
|
||||
api = get_api(credentials)
|
||||
tags_url = repo_url + "/tags"
|
||||
response = api.get(tags_url)
|
||||
data = response.json()
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/tags"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
tags: list[GithubListTagsBlock.Output.TagItem] = [
|
||||
{
|
||||
"name": tag["name"],
|
||||
@@ -80,6 +86,7 @@ class GithubListTagsBlock(Block):
|
||||
}
|
||||
for tag in data
|
||||
]
|
||||
|
||||
return tags
|
||||
|
||||
def run(
|
||||
@@ -150,18 +157,20 @@ class GithubListBranchesBlock(Block):
|
||||
def list_branches(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.BranchItem]:
|
||||
api = get_api(credentials)
|
||||
branches_url = repo_url + "/branches"
|
||||
response = api.get(branches_url)
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/branches"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
branches: list[GithubListBranchesBlock.Output.BranchItem] = [
|
||||
{
|
||||
"name": branch["name"],
|
||||
"url": f"https://github.com/{repo_path}/tree/{branch['name']}",
|
||||
}
|
||||
for branch in data
|
||||
{"name": branch["name"], "url": branch["commit"]["url"]} for branch in data
|
||||
]
|
||||
|
||||
return branches
|
||||
|
||||
def run(
|
||||
@@ -237,8 +246,6 @@ class GithubListDiscussionsBlock(Block):
|
||||
def list_discussions(
|
||||
credentials: GithubCredentials, repo_url: str, num_discussions: int
|
||||
) -> list[Output.DiscussionItem]:
|
||||
api = get_api(credentials)
|
||||
# GitHub GraphQL API endpoint is different; we'll use api.post with custom URL
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
owner, repo = repo_path.split("/")
|
||||
query = """
|
||||
@@ -254,15 +261,24 @@ class GithubListDiscussionsBlock(Block):
|
||||
}
|
||||
"""
|
||||
variables = {"owner": owner, "repo": repo, "num": num_discussions}
|
||||
response = api.post(
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://api.github.com/graphql",
|
||||
json={"query": query, "variables": variables},
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
discussions: list[GithubListDiscussionsBlock.Output.DiscussionItem] = [
|
||||
{"title": discussion["title"], "url": discussion["url"]}
|
||||
for discussion in data["data"]["repository"]["discussions"]["nodes"]
|
||||
]
|
||||
|
||||
return discussions
|
||||
|
||||
def run(
|
||||
@@ -332,13 +348,21 @@ class GithubListReleasesBlock(Block):
|
||||
def list_releases(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.ReleaseItem]:
|
||||
api = get_api(credentials)
|
||||
releases_url = repo_url + "/releases"
|
||||
response = api.get(releases_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/releases"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
releases: list[GithubListReleasesBlock.Output.ReleaseItem] = [
|
||||
{"name": release["name"], "url": release["html_url"]} for release in data
|
||||
]
|
||||
|
||||
return releases
|
||||
|
||||
def run(
|
||||
@@ -408,9 +432,16 @@ class GithubReadFileBlock(Block):
|
||||
def read_file(
|
||||
credentials: GithubCredentials, repo_url: str, file_path: str, branch: str
|
||||
) -> tuple[str, int]:
|
||||
api = get_api(credentials)
|
||||
content_url = repo_url + f"/contents/{file_path}?ref={branch}"
|
||||
response = api.get(content_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/contents/{file_path}?ref={branch}"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
content = response.json()
|
||||
|
||||
if isinstance(content, list):
|
||||
@@ -518,33 +549,46 @@ class GithubReadFolderBlock(Block):
|
||||
def read_folder(
|
||||
credentials: GithubCredentials, repo_url: str, folder_path: str, branch: str
|
||||
) -> tuple[list[Output.FileEntry], list[Output.DirEntry]]:
|
||||
api = get_api(credentials)
|
||||
contents_url = repo_url + f"/contents/{folder_path}?ref={branch}"
|
||||
response = api.get(contents_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/contents/{folder_path}?ref={branch}"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
content = response.json()
|
||||
|
||||
if not isinstance(content, list):
|
||||
if isinstance(content, list):
|
||||
# Multiple entries of different types exist at this path
|
||||
if not (dir := next((d for d in content if d["type"] == "dir"), None)):
|
||||
raise TypeError("Not a folder")
|
||||
content = dir
|
||||
|
||||
if content["type"] != "dir":
|
||||
raise TypeError("Not a folder")
|
||||
|
||||
files = [
|
||||
GithubReadFolderBlock.Output.FileEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
size=entry["size"],
|
||||
)
|
||||
for entry in content
|
||||
if entry["type"] == "file"
|
||||
]
|
||||
dirs = [
|
||||
GithubReadFolderBlock.Output.DirEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
)
|
||||
for entry in content
|
||||
if entry["type"] == "dir"
|
||||
]
|
||||
|
||||
return files, dirs
|
||||
return (
|
||||
[
|
||||
GithubReadFolderBlock.Output.FileEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
size=entry["size"],
|
||||
)
|
||||
for entry in content["entries"]
|
||||
if entry["type"] == "file"
|
||||
],
|
||||
[
|
||||
GithubReadFolderBlock.Output.DirEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
)
|
||||
for entry in content["entries"]
|
||||
if entry["type"] == "dir"
|
||||
],
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
@@ -612,16 +656,26 @@ class GithubMakeBranchBlock(Block):
|
||||
new_branch: str,
|
||||
source_branch: str,
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
# Get the SHA of the source branch
|
||||
ref_url = repo_url + f"/git/refs/heads/{source_branch}"
|
||||
response = api.get(ref_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
ref_api_url = (
|
||||
f"https://api.github.com/repos/{repo_path}/git/refs/heads/{source_branch}"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(ref_api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
sha = response.json()["object"]["sha"]
|
||||
|
||||
# Create the new branch
|
||||
create_ref_url = repo_url + "/git/refs"
|
||||
create_branch_api_url = f"https://api.github.com/repos/{repo_path}/git/refs"
|
||||
data = {"ref": f"refs/heads/{new_branch}", "sha": sha}
|
||||
response = api.post(create_ref_url, json=data)
|
||||
|
||||
response = requests.post(create_branch_api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Branch created successfully"
|
||||
|
||||
def run(
|
||||
@@ -681,9 +735,16 @@ class GithubDeleteBranchBlock(Block):
|
||||
def delete_branch(
|
||||
credentials: GithubCredentials, repo_url: str, branch: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
ref_url = repo_url + f"/git/refs/heads/{branch}"
|
||||
api.delete(ref_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/git/refs/heads/{branch}"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.delete(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Branch deleted successfully"
|
||||
|
||||
def run(
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class GetRequest:
|
||||
@classmethod
|
||||
def get_request(
|
||||
cls, url: str, headers: Optional[dict] = None, json: bool = False
|
||||
) -> Any:
|
||||
if headers is None:
|
||||
headers = {}
|
||||
response = requests.get(url, headers=headers)
|
||||
return response.json() if json else response.text
|
||||
@@ -1,10 +1,10 @@
|
||||
import json
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class HttpMethod(Enum):
|
||||
@@ -31,14 +31,9 @@ class SendWebRequestBlock(Block):
|
||||
description="The headers to include in the request",
|
||||
default={},
|
||||
)
|
||||
json_format: bool = SchemaField(
|
||||
title="JSON format",
|
||||
description="Whether to send and receive body as JSON",
|
||||
default=True,
|
||||
)
|
||||
body: Any = SchemaField(
|
||||
body: object = SchemaField(
|
||||
description="The body of the request",
|
||||
default=None,
|
||||
default={},
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -63,16 +58,13 @@ class SendWebRequestBlock(Block):
|
||||
input_data.method.value,
|
||||
input_data.url,
|
||||
headers=input_data.headers,
|
||||
json=input_data.body if input_data.json_format else None,
|
||||
data=input_data.body if not input_data.json_format else None,
|
||||
json=input_data.body,
|
||||
)
|
||||
result = response.json() if input_data.json_format else response.text
|
||||
|
||||
if response.status_code // 100 == 2:
|
||||
yield "response", result
|
||||
yield "response", response.json()
|
||||
elif response.status_code // 100 == 4:
|
||||
yield "client_error", result
|
||||
yield "client_error", response.json()
|
||||
elif response.status_code // 100 == 5:
|
||||
yield "server_error", result
|
||||
yield "server_error", response.json()
|
||||
else:
|
||||
raise ValueError(f"Unexpected status code: {response.status_code}")
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Literal, Optional
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -243,8 +242,9 @@ class IdeogramModelBlock(Block):
|
||||
|
||||
try:
|
||||
response = requests.post(url, json=data, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()["data"][0]["url"]
|
||||
except RequestException as e:
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise Exception(f"Failed to fetch image: {str(e)}")
|
||||
|
||||
def upscale_image(self, api_key: SecretStr, image_url: str):
|
||||
@@ -256,6 +256,7 @@ class IdeogramModelBlock(Block):
|
||||
try:
|
||||
# Step 1: Download the image from the provided URL
|
||||
image_response = requests.get(image_url)
|
||||
image_response.raise_for_status()
|
||||
|
||||
# Step 2: Send the downloaded image to the upscale API
|
||||
files = {
|
||||
@@ -271,7 +272,8 @@ class IdeogramModelBlock(Block):
|
||||
files=files,
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
return response.json()["data"][0]["url"]
|
||||
|
||||
except RequestException as e:
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise Exception(f"Failed to upscale image: {str(e)}")
|
||||
|
||||
@@ -2,28 +2,13 @@ from typing import Any
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.json import json
|
||||
|
||||
|
||||
class StepThroughItemsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
items: list = SchemaField(
|
||||
advanced=False,
|
||||
items: list | dict = SchemaField(
|
||||
description="The list or dictionary of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
|
||||
default=[],
|
||||
)
|
||||
items_object: dict = SchemaField(
|
||||
advanced=False,
|
||||
description="The list or dictionary of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
|
||||
default={},
|
||||
)
|
||||
items_str: str = SchemaField(
|
||||
advanced=False,
|
||||
description="The list or dictionary of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
|
||||
default="",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -54,20 +39,14 @@ class StepThroughItemsBlock(Block):
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
for data in [input_data.items, input_data.items_object, input_data.items_str]:
|
||||
if not data:
|
||||
continue
|
||||
if isinstance(data, str):
|
||||
items = json.loads(data)
|
||||
else:
|
||||
items = data
|
||||
if isinstance(items, dict):
|
||||
# If items is a dictionary, iterate over its values
|
||||
for item in items.values():
|
||||
yield "item", item
|
||||
yield "key", item
|
||||
else:
|
||||
# If items is a list, iterate over the list
|
||||
for index, item in enumerate(items):
|
||||
yield "item", item
|
||||
yield "key", index
|
||||
items = input_data.items
|
||||
if isinstance(items, dict):
|
||||
# If items is a dictionary, iterate over its values
|
||||
for item in items.values():
|
||||
yield "item", item
|
||||
yield "key", item
|
||||
else:
|
||||
# If items is a list, iterate over the list
|
||||
for index, item in enumerate(items):
|
||||
yield "item", item
|
||||
yield "key", index
|
||||
|
||||
@@ -11,20 +11,6 @@ JinaCredentialsInput = CredentialsMetaInput[
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="jina",
|
||||
api_key=SecretStr("mock-jina-api-key"),
|
||||
title="Mock Jina API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
def JinaCredentialsField() -> JinaCredentialsInput:
|
||||
"""
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import requests
|
||||
|
||||
from backend.blocks.jina._auth import (
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
@@ -5,7 +7,6 @@ from backend.blocks.jina._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class JinaChunkingBlock(Block):
|
||||
@@ -56,6 +57,7 @@ class JinaChunkingBlock(Block):
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
all_chunks.extend(result.get("chunks", []))
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import requests
|
||||
|
||||
from backend.blocks.jina._auth import (
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
@@ -5,7 +7,6 @@ from backend.blocks.jina._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class JinaEmbeddingBlock(Block):
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
from groq._utils._utils import quote
|
||||
|
||||
from backend.blocks.jina._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
JinaCredentialsInput,
|
||||
)
|
||||
from backend.blocks.search import GetRequest
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class SearchTheWebBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
credentials: JinaCredentialsInput = JinaCredentialsField()
|
||||
query: str = SchemaField(description="The search query to search the web for")
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: str = SchemaField(
|
||||
description="The search results including content from top 5 URLs"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the search fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="87840993-2053-44b7-8da4-187ad4ee518c",
|
||||
description="This block searches the internet for the given search query.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchTheWebBlock.Input,
|
||||
output_schema=SearchTheWebBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"query": "Artificial Intelligence",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=("results", "search content"),
|
||||
test_mock={"get_request": lambda *args, **kwargs: "search content"},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Encode the search query
|
||||
encoded_query = quote(input_data.query)
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
|
||||
# Prepend the Jina Search URL to the encoded query
|
||||
jina_search_url = f"https://s.jina.ai/{encoded_query}"
|
||||
results = self.get_request(jina_search_url, headers=headers, json=False)
|
||||
|
||||
# Output the search results
|
||||
yield "results", results
|
||||
@@ -30,12 +30,11 @@ logger = logging.getLogger(__name__)
|
||||
# "ollama": BlockSecret(value=""),
|
||||
# }
|
||||
|
||||
LLMProviderName = Literal["anthropic", "groq", "openai", "ollama", "open_router"]
|
||||
AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]]
|
||||
AICredentials = CredentialsMetaInput[Literal["llm"], Literal["api_key"]]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
|
||||
provider="openai",
|
||||
provider="llm",
|
||||
api_key=SecretStr("mock-openai-api-key"),
|
||||
title="Mock OpenAI API key",
|
||||
expires_at=None,
|
||||
@@ -51,18 +50,15 @@ TEST_CREDENTIALS_INPUT = {
|
||||
def AICredentialsField() -> AICredentials:
|
||||
return CredentialsField(
|
||||
description="API key for the LLM provider.",
|
||||
provider=["anthropic", "groq", "openai", "ollama", "open_router"],
|
||||
provider="llm",
|
||||
supported_credential_types={"api_key"},
|
||||
discriminator="model",
|
||||
discriminator_mapping={
|
||||
model.value: model.metadata.provider for model in LlmModel
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class ModelMetadata(NamedTuple):
|
||||
provider: str
|
||||
context_window: int
|
||||
cost_factor: int
|
||||
|
||||
|
||||
class LlmModelMeta(EnumMeta):
|
||||
@@ -108,18 +104,6 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
# Ollama models
|
||||
OLLAMA_LLAMA3_8B = "llama3"
|
||||
OLLAMA_LLAMA3_405B = "llama3.1:405b"
|
||||
# OpenRouter models
|
||||
GEMINI_FLASH_1_5_8B = "google/gemini-flash-1.5"
|
||||
GEMINI_FLASH_1_5_EXP = "google/gemini-flash-1.5-exp"
|
||||
GROK_BETA = "x-ai/grok-beta"
|
||||
MISTRAL_NEMO = "mistralai/mistral-nemo"
|
||||
COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024"
|
||||
COHERE_COMMAND_R_PLUS_08_2024 = "cohere/command-r-plus-08-2024"
|
||||
EVA_QWEN_2_5_32B = "eva-unit-01/eva-qwen-2.5-32b"
|
||||
DEEPSEEK_CHAT = "deepseek/deepseek-chat"
|
||||
PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE = (
|
||||
"perplexity/llama-3.1-sonar-large-128k-online"
|
||||
)
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
@@ -133,38 +117,31 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
def context_window(self) -> int:
|
||||
return self.metadata.context_window
|
||||
|
||||
@property
|
||||
def cost_factor(self) -> int:
|
||||
return self.metadata.cost_factor
|
||||
|
||||
|
||||
MODEL_METADATA = {
|
||||
LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000),
|
||||
LlmModel.O1_MINI: ModelMetadata("openai", 62000),
|
||||
LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000),
|
||||
LlmModel.GPT4O: ModelMetadata("openai", 128000),
|
||||
LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000),
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385),
|
||||
LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000),
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000),
|
||||
LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192),
|
||||
LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192),
|
||||
LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768),
|
||||
LlmModel.GEMMA_7B: ModelMetadata("groq", 8192),
|
||||
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192),
|
||||
LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192),
|
||||
LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000, cost_factor=16),
|
||||
LlmModel.O1_MINI: ModelMetadata("openai", 62000, cost_factor=4),
|
||||
LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=1),
|
||||
LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=3),
|
||||
LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=10),
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=1),
|
||||
LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=4),
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=1),
|
||||
LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=1),
|
||||
LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=1),
|
||||
LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=1),
|
||||
LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=1),
|
||||
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=1),
|
||||
LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=1),
|
||||
# Limited to 16k during preview
|
||||
LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192),
|
||||
LlmModel.GEMINI_FLASH_1_5_8B: ModelMetadata("open_router", 8192),
|
||||
LlmModel.GEMINI_FLASH_1_5_EXP: ModelMetadata("open_router", 8192),
|
||||
LlmModel.GROK_BETA: ModelMetadata("open_router", 8192),
|
||||
LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 4000),
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 4000),
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 4000),
|
||||
LlmModel.EVA_QWEN_2_5_32B: ModelMetadata("open_router", 4000),
|
||||
LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 8192),
|
||||
LlmModel.PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE: ModelMetadata(
|
||||
"open_router", 8192
|
||||
),
|
||||
LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=1),
|
||||
LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=1),
|
||||
LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=1),
|
||||
LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=1),
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
@@ -377,34 +354,6 @@ class AIStructuredResponseGeneratorBlock(Block):
|
||||
response.get("prompt_eval_count") or 0,
|
||||
response.get("eval_count") or 0,
|
||||
)
|
||||
elif provider == "open_router":
|
||||
client = openai.OpenAI(
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
extra_headers={
|
||||
"HTTP-Referer": "https://agpt.co",
|
||||
"X-Title": "AutoGPT",
|
||||
},
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
|
||||
# If there's no response, raise an error
|
||||
if not response.choices:
|
||||
if response:
|
||||
raise ValueError(f"OpenRouter error: {response}")
|
||||
else:
|
||||
raise ValueError("No response from OpenRouter.")
|
||||
|
||||
return (
|
||||
response.choices[0].message.content or "",
|
||||
response.usage.prompt_tokens if response.usage else 0,
|
||||
response.usage.completion_tokens if response.usage else 0,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from enum import Enum
|
||||
from typing import List, Literal
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
@@ -12,7 +13,6 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
SecretField,
|
||||
)
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import uuid
|
||||
from typing import Any, Literal
|
||||
from typing import Literal
|
||||
|
||||
from autogpt_libs.supabase_integration_credentials_store import APIKeyCredentials
|
||||
from pinecone import Pinecone, ServerlessSpec
|
||||
@@ -99,14 +98,10 @@ class PineconeQueryBlock(Block):
|
||||
include_metadata: bool = SchemaField(
|
||||
description="Whether to include metadata in the response", default=True
|
||||
)
|
||||
host: str = SchemaField(description="Host for pinecone", default="")
|
||||
idx_name: str = SchemaField(description="Index name for pinecone")
|
||||
host: str = SchemaField(description="Host for pinecone")
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: Any = SchemaField(description="Query results from Pinecone")
|
||||
combined_results: Any = SchemaField(
|
||||
description="Combined results from Pinecone"
|
||||
)
|
||||
results: dict = SchemaField(description="Query results from Pinecone")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -124,105 +119,13 @@ class PineconeQueryBlock(Block):
|
||||
credentials: APIKeyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
# Create a new client instance
|
||||
pc = Pinecone(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
# Get the index
|
||||
idx = pc.Index(input_data.idx_name)
|
||||
|
||||
# Ensure query_vector is in correct format
|
||||
query_vector = input_data.query_vector
|
||||
if isinstance(query_vector, list) and len(query_vector) > 0:
|
||||
if isinstance(query_vector[0], list):
|
||||
query_vector = query_vector[0]
|
||||
|
||||
results = idx.query(
|
||||
namespace=input_data.namespace,
|
||||
vector=query_vector,
|
||||
top_k=input_data.top_k,
|
||||
include_values=input_data.include_values,
|
||||
include_metadata=input_data.include_metadata,
|
||||
).to_dict()
|
||||
combined_text = ""
|
||||
if results["matches"]:
|
||||
texts = [
|
||||
match["metadata"]["text"]
|
||||
for match in results["matches"]
|
||||
if match.get("metadata", {}).get("text")
|
||||
]
|
||||
combined_text = "\n\n".join(texts)
|
||||
|
||||
# Return both the raw matches and combined text
|
||||
yield "results", {
|
||||
"matches": results["matches"],
|
||||
"combined_text": combined_text,
|
||||
}
|
||||
yield "combined_results", combined_text
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error querying Pinecone: {str(e)}"
|
||||
raise RuntimeError(error_msg) from e
|
||||
|
||||
|
||||
class PineconeInsertBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: PineconeCredentialsInput = PineconeCredentialsField()
|
||||
index: str = SchemaField(description="Initialized Pinecone index")
|
||||
chunks: list = SchemaField(description="List of text chunks to ingest")
|
||||
embeddings: list = SchemaField(
|
||||
description="List of embeddings corresponding to the chunks"
|
||||
pc = Pinecone(api_key=credentials.api_key.get_secret_value())
|
||||
idx = pc.Index(host=input_data.host)
|
||||
results = idx.query(
|
||||
namespace=input_data.namespace,
|
||||
vector=input_data.query_vector,
|
||||
top_k=input_data.top_k,
|
||||
include_values=input_data.include_values,
|
||||
include_metadata=input_data.include_metadata,
|
||||
)
|
||||
namespace: str = SchemaField(
|
||||
description="Namespace to use in Pinecone", default=""
|
||||
)
|
||||
metadata: dict = SchemaField(
|
||||
description="Additional metadata to store with each vector", default={}
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
upsert_response: str = SchemaField(
|
||||
description="Response from Pinecone upsert operation"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="477f2168-cd91-475a-8146-9499a5982434",
|
||||
description="Upload data to a Pinecone index",
|
||||
categories={BlockCategory.LOGIC},
|
||||
input_schema=PineconeInsertBlock.Input,
|
||||
output_schema=PineconeInsertBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
# Create a new client instance
|
||||
pc = Pinecone(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
# Get the index
|
||||
idx = pc.Index(input_data.index)
|
||||
|
||||
vectors = []
|
||||
for chunk, embedding in zip(input_data.chunks, input_data.embeddings):
|
||||
vector_metadata = input_data.metadata.copy()
|
||||
vector_metadata["text"] = chunk
|
||||
vectors.append(
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"values": embedding,
|
||||
"metadata": vector_metadata,
|
||||
}
|
||||
)
|
||||
idx.upsert(vectors=vectors, namespace=input_data.namespace)
|
||||
|
||||
yield "upsert_response", "successfully upserted"
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error uploading to Pinecone: {str(e)}"
|
||||
raise RuntimeError(error_msg) from e
|
||||
yield "results", results
|
||||
|
||||
@@ -1,14 +1,22 @@
|
||||
from typing import Literal
|
||||
from typing import Any, Literal
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.blocks.helpers.http import GetRequest
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
|
||||
|
||||
class GetRequest:
|
||||
@classmethod
|
||||
def get_request(cls, url: str, json=False) -> Any:
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
return response.json() if json else response.text
|
||||
|
||||
|
||||
class GetWikipediaSummaryBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
topic: str = SchemaField(description="The topic to fetch the summary for")
|
||||
@@ -40,6 +48,42 @@ class GetWikipediaSummaryBlock(Block, GetRequest):
|
||||
yield "summary", response["extract"]
|
||||
|
||||
|
||||
class SearchTheWebBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
query: str = SchemaField(description="The search query to search the web for")
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: str = SchemaField(
|
||||
description="The search results including content from top 5 URLs"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the search fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="87840993-2053-44b7-8da4-187ad4ee518c",
|
||||
description="This block searches the internet for the given search query.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchTheWebBlock.Input,
|
||||
output_schema=SearchTheWebBlock.Output,
|
||||
test_input={"query": "Artificial Intelligence"},
|
||||
test_output=("results", "search content"),
|
||||
test_mock={"get_request": lambda url, json: "search content"},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# Encode the search query
|
||||
encoded_query = quote(input_data.query)
|
||||
|
||||
# Prepend the Jina Search URL to the encoded query
|
||||
jina_search_url = f"https://s.jina.ai/{encoded_query}"
|
||||
|
||||
# Make the request to Jina Search
|
||||
response = self.get_request(jina_search_url, json=False)
|
||||
|
||||
# Output the search results
|
||||
yield "results", response
|
||||
|
||||
|
||||
class ExtractWebsiteContentBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
url: str = SchemaField(description="The URL to scrape the content from")
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import time
|
||||
from typing import Literal
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -118,6 +118,7 @@ class CreateTalkingAvatarVideoBlock(Block):
|
||||
"authorization": f"Basic {api_key.get_secret_value()}",
|
||||
}
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def get_clip_status(self, api_key: SecretStr, clip_id: str) -> dict:
|
||||
@@ -127,6 +128,7 @@ class CreateTalkingAvatarVideoBlock(Block):
|
||||
"authorization": f"Basic {api_key.get_secret_value()}",
|
||||
}
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def run(
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from typing import Any, Literal
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -86,6 +86,7 @@ class UnrealTextToSpeechBlock(Block):
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def run(
|
||||
|
||||
@@ -140,25 +140,20 @@ class GetCurrentDateAndTimeBlock(Block):
|
||||
class CountdownTimerBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
input_message: Any = SchemaField(
|
||||
advanced=False,
|
||||
description="Message to output after the timer finishes",
|
||||
default="timer finished",
|
||||
)
|
||||
seconds: Union[int, str] = SchemaField(
|
||||
advanced=False, description="Duration in seconds", default=0
|
||||
description="Duration in seconds", default=0
|
||||
)
|
||||
minutes: Union[int, str] = SchemaField(
|
||||
advanced=False, description="Duration in minutes", default=0
|
||||
)
|
||||
hours: Union[int, str] = SchemaField(
|
||||
advanced=False, description="Duration in hours", default=0
|
||||
)
|
||||
days: Union[int, str] = SchemaField(
|
||||
advanced=False, description="Duration in days", default=0
|
||||
description="Duration in minutes", default=0
|
||||
)
|
||||
hours: Union[int, str] = SchemaField(description="Duration in hours", default=0)
|
||||
days: Union[int, str] = SchemaField(description="Duration in days", default=0)
|
||||
|
||||
class Output(BlockSchema):
|
||||
output_message: Any = SchemaField(
|
||||
output_message: str = SchemaField(
|
||||
description="Message after the timer finishes"
|
||||
)
|
||||
|
||||
|
||||
@@ -62,22 +62,7 @@ class TranscribeYoutubeVideoBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def get_transcript(video_id: str):
|
||||
try:
|
||||
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
|
||||
|
||||
if not transcript_list:
|
||||
raise ValueError(f"No transcripts found for the video: {video_id}")
|
||||
|
||||
for transcript in transcript_list:
|
||||
first_transcript = transcript_list.find_transcript(
|
||||
[transcript.language_code]
|
||||
)
|
||||
return YouTubeTranscriptApi.get_transcript(
|
||||
video_id, languages=[first_transcript.language_code]
|
||||
)
|
||||
|
||||
except Exception:
|
||||
raise ValueError(f"No transcripts found for the video: {video_id}")
|
||||
return YouTubeTranscriptApi.get_transcript(video_id)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
video_id = self.extract_video_id(input_data.youtube_url)
|
||||
|
||||
@@ -1,325 +0,0 @@
|
||||
import logging
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from typing import List, Optional
|
||||
|
||||
from autogpt_libs.api_key.key_manager import APIKeyManager
|
||||
from prisma.enums import APIKeyPermission, APIKeyStatus
|
||||
from prisma.errors import PrismaError
|
||||
from prisma.models import APIKey as PrismaAPIKey
|
||||
from prisma.types import (
|
||||
APIKeyCreateInput,
|
||||
APIKeyUpdateInput,
|
||||
APIKeyWhereInput,
|
||||
APIKeyWhereUniqueInput,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.db import BaseDbModel
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Some basic exceptions
|
||||
class APIKeyError(Exception):
|
||||
"""Base exception for API key operations"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class APIKeyNotFoundError(APIKeyError):
|
||||
"""Raised when an API key is not found"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class APIKeyPermissionError(APIKeyError):
|
||||
"""Raised when there are permission issues with API key operations"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class APIKeyValidationError(APIKeyError):
|
||||
"""Raised when API key validation fails"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class APIKey(BaseDbModel):
|
||||
name: str
|
||||
prefix: str
|
||||
key: str
|
||||
status: APIKeyStatus = APIKeyStatus.ACTIVE
|
||||
permissions: List[APIKeyPermission]
|
||||
postfix: str
|
||||
created_at: datetime
|
||||
last_used_at: Optional[datetime] = None
|
||||
revoked_at: Optional[datetime] = None
|
||||
description: Optional[str] = None
|
||||
user_id: str
|
||||
|
||||
@staticmethod
|
||||
def from_db(api_key: PrismaAPIKey):
|
||||
try:
|
||||
return APIKey(
|
||||
id=api_key.id,
|
||||
name=api_key.name,
|
||||
prefix=api_key.prefix,
|
||||
postfix=api_key.postfix,
|
||||
key=api_key.key,
|
||||
status=APIKeyStatus(api_key.status),
|
||||
permissions=[APIKeyPermission(p) for p in api_key.permissions],
|
||||
created_at=api_key.createdAt,
|
||||
last_used_at=api_key.lastUsedAt,
|
||||
revoked_at=api_key.revokedAt,
|
||||
description=api_key.description,
|
||||
user_id=api_key.userId,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating APIKey from db: {str(e)}")
|
||||
raise APIKeyError(f"Failed to create API key object: {str(e)}")
|
||||
|
||||
|
||||
class APIKeyWithoutHash(BaseModel):
|
||||
id: str
|
||||
name: str
|
||||
prefix: str
|
||||
postfix: str
|
||||
status: APIKeyStatus
|
||||
permissions: List[APIKeyPermission]
|
||||
created_at: datetime
|
||||
last_used_at: Optional[datetime]
|
||||
revoked_at: Optional[datetime]
|
||||
description: Optional[str]
|
||||
user_id: str
|
||||
|
||||
@staticmethod
|
||||
def from_db(api_key: PrismaAPIKey):
|
||||
try:
|
||||
return APIKeyWithoutHash(
|
||||
id=api_key.id,
|
||||
name=api_key.name,
|
||||
prefix=api_key.prefix,
|
||||
postfix=api_key.postfix,
|
||||
status=APIKeyStatus(api_key.status),
|
||||
permissions=[APIKeyPermission(p) for p in api_key.permissions],
|
||||
created_at=api_key.createdAt,
|
||||
last_used_at=api_key.lastUsedAt,
|
||||
revoked_at=api_key.revokedAt,
|
||||
description=api_key.description,
|
||||
user_id=api_key.userId,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating APIKeyWithoutHash from db: {str(e)}")
|
||||
raise APIKeyError(f"Failed to create API key object: {str(e)}")
|
||||
|
||||
|
||||
async def generate_api_key(
|
||||
name: str,
|
||||
user_id: str,
|
||||
permissions: List[APIKeyPermission],
|
||||
description: Optional[str] = None,
|
||||
) -> tuple[APIKeyWithoutHash, str]:
|
||||
"""
|
||||
Generate a new API key and store it in the database.
|
||||
Returns the API key object (without hash) and the plain text key.
|
||||
"""
|
||||
try:
|
||||
api_manager = APIKeyManager()
|
||||
key = api_manager.generate_api_key()
|
||||
|
||||
api_key = await PrismaAPIKey.prisma().create(
|
||||
data=APIKeyCreateInput(
|
||||
id=str(uuid.uuid4()),
|
||||
name=name,
|
||||
prefix=key.prefix,
|
||||
postfix=key.postfix,
|
||||
key=key.hash,
|
||||
permissions=[p for p in permissions],
|
||||
description=description,
|
||||
userId=user_id,
|
||||
)
|
||||
)
|
||||
|
||||
api_key_without_hash = APIKeyWithoutHash.from_db(api_key)
|
||||
return api_key_without_hash, key.raw
|
||||
except PrismaError as e:
|
||||
logger.error(f"Database error while generating API key: {str(e)}")
|
||||
raise APIKeyError(f"Failed to generate API key: {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error while generating API key: {str(e)}")
|
||||
raise APIKeyError(f"Failed to generate API key: {str(e)}")
|
||||
|
||||
|
||||
async def validate_api_key(plain_text_key: str) -> Optional[APIKey]:
|
||||
"""
|
||||
Validate an API key and return the API key object if valid.
|
||||
"""
|
||||
try:
|
||||
if not plain_text_key.startswith(APIKeyManager.PREFIX):
|
||||
logger.warning("Invalid API key format")
|
||||
return None
|
||||
|
||||
prefix = plain_text_key[: APIKeyManager.PREFIX_LENGTH]
|
||||
api_manager = APIKeyManager()
|
||||
|
||||
api_key = await PrismaAPIKey.prisma().find_first(
|
||||
where=APIKeyWhereInput(prefix=prefix, status=(APIKeyStatus.ACTIVE))
|
||||
)
|
||||
|
||||
if not api_key:
|
||||
logger.warning(f"No active API key found with prefix {prefix}")
|
||||
return None
|
||||
|
||||
is_valid = api_manager.verify_api_key(plain_text_key, api_key.key)
|
||||
if not is_valid:
|
||||
logger.warning("API key verification failed")
|
||||
return None
|
||||
|
||||
return APIKey.from_db(api_key)
|
||||
except Exception as e:
|
||||
logger.error(f"Error validating API key: {str(e)}")
|
||||
raise APIKeyValidationError(f"Failed to validate API key: {str(e)}")
|
||||
|
||||
|
||||
async def revoke_api_key(key_id: str, user_id: str) -> Optional[APIKeyWithoutHash]:
|
||||
try:
|
||||
api_key = await PrismaAPIKey.prisma().find_unique(where={"id": key_id})
|
||||
|
||||
if not api_key:
|
||||
raise APIKeyNotFoundError(f"API key with id {key_id} not found")
|
||||
|
||||
if api_key.userId != user_id:
|
||||
raise APIKeyPermissionError(
|
||||
"You do not have permission to revoke this API key."
|
||||
)
|
||||
|
||||
where_clause: APIKeyWhereUniqueInput = {"id": key_id}
|
||||
updated_api_key = await PrismaAPIKey.prisma().update(
|
||||
where=where_clause,
|
||||
data=APIKeyUpdateInput(
|
||||
status=APIKeyStatus.REVOKED, revokedAt=datetime.now(timezone.utc)
|
||||
),
|
||||
)
|
||||
|
||||
if updated_api_key:
|
||||
return APIKeyWithoutHash.from_db(updated_api_key)
|
||||
return None
|
||||
except (APIKeyNotFoundError, APIKeyPermissionError) as e:
|
||||
raise e
|
||||
except PrismaError as e:
|
||||
logger.error(f"Database error while revoking API key: {str(e)}")
|
||||
raise APIKeyError(f"Failed to revoke API key: {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error while revoking API key: {str(e)}")
|
||||
raise APIKeyError(f"Failed to revoke API key: {str(e)}")
|
||||
|
||||
|
||||
async def list_user_api_keys(user_id: str) -> List[APIKeyWithoutHash]:
|
||||
try:
|
||||
where_clause: APIKeyWhereInput = {"userId": user_id}
|
||||
|
||||
api_keys = await PrismaAPIKey.prisma().find_many(
|
||||
where=where_clause, order={"createdAt": "desc"}
|
||||
)
|
||||
|
||||
return [APIKeyWithoutHash.from_db(key) for key in api_keys]
|
||||
except PrismaError as e:
|
||||
logger.error(f"Database error while listing API keys: {str(e)}")
|
||||
raise APIKeyError(f"Failed to list API keys: {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error while listing API keys: {str(e)}")
|
||||
raise APIKeyError(f"Failed to list API keys: {str(e)}")
|
||||
|
||||
|
||||
async def suspend_api_key(key_id: str, user_id: str) -> Optional[APIKeyWithoutHash]:
|
||||
try:
|
||||
api_key = await PrismaAPIKey.prisma().find_unique(where={"id": key_id})
|
||||
|
||||
if not api_key:
|
||||
raise APIKeyNotFoundError(f"API key with id {key_id} not found")
|
||||
|
||||
if api_key.userId != user_id:
|
||||
raise APIKeyPermissionError(
|
||||
"You do not have permission to suspend this API key."
|
||||
)
|
||||
|
||||
where_clause: APIKeyWhereUniqueInput = {"id": key_id}
|
||||
updated_api_key = await PrismaAPIKey.prisma().update(
|
||||
where=where_clause,
|
||||
data=APIKeyUpdateInput(status=APIKeyStatus.SUSPENDED),
|
||||
)
|
||||
|
||||
if updated_api_key:
|
||||
return APIKeyWithoutHash.from_db(updated_api_key)
|
||||
return None
|
||||
except (APIKeyNotFoundError, APIKeyPermissionError) as e:
|
||||
raise e
|
||||
except PrismaError as e:
|
||||
logger.error(f"Database error while suspending API key: {str(e)}")
|
||||
raise APIKeyError(f"Failed to suspend API key: {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error while suspending API key: {str(e)}")
|
||||
raise APIKeyError(f"Failed to suspend API key: {str(e)}")
|
||||
|
||||
|
||||
def has_permission(api_key: APIKey, required_permission: APIKeyPermission) -> bool:
|
||||
try:
|
||||
return required_permission in api_key.permissions
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking API key permissions: {str(e)}")
|
||||
return False
|
||||
|
||||
|
||||
async def get_api_key_by_id(key_id: str, user_id: str) -> Optional[APIKeyWithoutHash]:
|
||||
try:
|
||||
api_key = await PrismaAPIKey.prisma().find_first(
|
||||
where=APIKeyWhereInput(id=key_id, userId=user_id)
|
||||
)
|
||||
|
||||
if not api_key:
|
||||
return None
|
||||
|
||||
return APIKeyWithoutHash.from_db(api_key)
|
||||
except PrismaError as e:
|
||||
logger.error(f"Database error while getting API key: {str(e)}")
|
||||
raise APIKeyError(f"Failed to get API key: {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error while getting API key: {str(e)}")
|
||||
raise APIKeyError(f"Failed to get API key: {str(e)}")
|
||||
|
||||
|
||||
async def update_api_key_permissions(
|
||||
key_id: str, user_id: str, permissions: List[APIKeyPermission]
|
||||
) -> Optional[APIKeyWithoutHash]:
|
||||
"""
|
||||
Update the permissions of an API key.
|
||||
"""
|
||||
try:
|
||||
api_key = await PrismaAPIKey.prisma().find_unique(where={"id": key_id})
|
||||
|
||||
if api_key is None:
|
||||
raise APIKeyNotFoundError("No such API key found.")
|
||||
|
||||
if api_key.userId != user_id:
|
||||
raise APIKeyPermissionError(
|
||||
"You do not have permission to update this API key."
|
||||
)
|
||||
|
||||
where_clause: APIKeyWhereUniqueInput = {"id": key_id}
|
||||
updated_api_key = await PrismaAPIKey.prisma().update(
|
||||
where=where_clause,
|
||||
data=APIKeyUpdateInput(permissions=permissions),
|
||||
)
|
||||
|
||||
if updated_api_key:
|
||||
return APIKeyWithoutHash.from_db(updated_api_key)
|
||||
return None
|
||||
except (APIKeyNotFoundError, APIKeyPermissionError) as e:
|
||||
raise e
|
||||
except PrismaError as e:
|
||||
logger.error(f"Database error while updating API key permissions: {str(e)}")
|
||||
raise APIKeyError(f"Failed to update API key permissions: {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error while updating API key permissions: {str(e)}")
|
||||
raise APIKeyError(f"Failed to update API key permissions: {str(e)}")
|
||||
@@ -34,7 +34,6 @@ class BlockType(Enum):
|
||||
INPUT = "Input"
|
||||
OUTPUT = "Output"
|
||||
NOTE = "Note"
|
||||
AGENT = "Agent"
|
||||
|
||||
|
||||
class BlockCategory(Enum):
|
||||
@@ -49,7 +48,6 @@ class BlockCategory(Enum):
|
||||
COMMUNICATION = "Block that interacts with communication platforms."
|
||||
DEVELOPER_TOOLS = "Developer tools such as GitHub blocks."
|
||||
DATA = "Block that interacts with structured data."
|
||||
AGENT = "Block that interacts with other agents."
|
||||
|
||||
def dict(self) -> dict[str, str]:
|
||||
return {"category": self.name, "description": self.value}
|
||||
@@ -301,9 +299,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
|
||||
):
|
||||
if output_name == "error":
|
||||
raise RuntimeError(output_data)
|
||||
if self.block_type == BlockType.STANDARD and (
|
||||
error := self.output_schema.validate_field(output_name, output_data)
|
||||
):
|
||||
if error := self.output_schema.validate_field(output_name, output_data):
|
||||
raise ValueError(f"Block produced an invalid output data: {error}")
|
||||
yield output_name, output_data
|
||||
|
||||
|
||||
@@ -1,263 +0,0 @@
|
||||
from typing import Type
|
||||
|
||||
from autogpt_libs.supabase_integration_credentials_store.store import (
|
||||
anthropic_credentials,
|
||||
did_credentials,
|
||||
groq_credentials,
|
||||
ideogram_credentials,
|
||||
jina_credentials,
|
||||
open_router_credentials,
|
||||
openai_credentials,
|
||||
replicate_credentials,
|
||||
revid_credentials,
|
||||
unreal_credentials,
|
||||
)
|
||||
|
||||
from backend.blocks.ai_music_generator import AIMusicGeneratorBlock
|
||||
from backend.blocks.ai_shortform_video_block import AIShortformVideoCreatorBlock
|
||||
from backend.blocks.ideogram import IdeogramModelBlock
|
||||
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
|
||||
from backend.blocks.jina.search import SearchTheWebBlock
|
||||
from backend.blocks.llm import (
|
||||
MODEL_METADATA,
|
||||
AIConversationBlock,
|
||||
AIListGeneratorBlock,
|
||||
AIStructuredResponseGeneratorBlock,
|
||||
AITextGeneratorBlock,
|
||||
AITextSummarizerBlock,
|
||||
LlmModel,
|
||||
)
|
||||
from backend.blocks.replicate_flux_advanced import ReplicateFluxAdvancedModelBlock
|
||||
from backend.blocks.search import ExtractWebsiteContentBlock
|
||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from backend.blocks.text_to_speech_block import UnrealTextToSpeechBlock
|
||||
from backend.data.block import Block
|
||||
from backend.data.cost import BlockCost, BlockCostType
|
||||
|
||||
# =============== Configure the cost for each LLM Model call =============== #
|
||||
|
||||
MODEL_COST: dict[LlmModel, int] = {
|
||||
LlmModel.O1_PREVIEW: 16,
|
||||
LlmModel.O1_MINI: 4,
|
||||
LlmModel.GPT4O_MINI: 1,
|
||||
LlmModel.GPT4O: 3,
|
||||
LlmModel.GPT4_TURBO: 10,
|
||||
LlmModel.GPT3_5_TURBO: 1,
|
||||
LlmModel.CLAUDE_3_5_SONNET: 4,
|
||||
LlmModel.CLAUDE_3_HAIKU: 1,
|
||||
LlmModel.LLAMA3_8B: 1,
|
||||
LlmModel.LLAMA3_70B: 1,
|
||||
LlmModel.MIXTRAL_8X7B: 1,
|
||||
LlmModel.GEMMA_7B: 1,
|
||||
LlmModel.GEMMA2_9B: 1,
|
||||
LlmModel.LLAMA3_1_405B: 1,
|
||||
LlmModel.LLAMA3_1_70B: 1,
|
||||
LlmModel.LLAMA3_1_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_8B: 1,
|
||||
LlmModel.OLLAMA_LLAMA3_405B: 1,
|
||||
LlmModel.GEMINI_FLASH_1_5_8B: 1,
|
||||
LlmModel.GEMINI_FLASH_1_5_EXP: 1,
|
||||
LlmModel.GROK_BETA: 5,
|
||||
LlmModel.MISTRAL_NEMO: 1,
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: 1,
|
||||
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,
|
||||
LlmModel.EVA_QWEN_2_5_32B: 1,
|
||||
LlmModel.DEEPSEEK_CHAT: 2,
|
||||
LlmModel.PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE: 1,
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
if model not in MODEL_COST:
|
||||
raise ValueError(f"Missing MODEL_COST for model: {model}")
|
||||
|
||||
|
||||
LLM_COST = (
|
||||
[
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"api_key": None, # Running LLM with user own API key is free.
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
]
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": anthropic_credentials.id,
|
||||
"provider": anthropic_credentials.provider,
|
||||
"type": anthropic_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "anthropic"
|
||||
]
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": openai_credentials.id,
|
||||
"provider": openai_credentials.provider,
|
||||
"type": openai_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "openai"
|
||||
]
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {"id": groq_credentials.id},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "groq"
|
||||
]
|
||||
+ [
|
||||
BlockCost(
|
||||
# Default cost is running LlmModel.GPT4O.
|
||||
cost_amount=MODEL_COST[LlmModel.GPT4O],
|
||||
cost_filter={"api_key": None},
|
||||
),
|
||||
]
|
||||
# Open Router Models
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": open_router_credentials.id,
|
||||
"provider": open_router_credentials.provider,
|
||||
"type": open_router_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=cost,
|
||||
)
|
||||
for model, cost in MODEL_COST.items()
|
||||
if MODEL_METADATA[model].provider == "open_router"
|
||||
]
|
||||
)
|
||||
|
||||
# =============== This is the exhaustive list of cost for each Block =============== #
|
||||
|
||||
BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
AIConversationBlock: LLM_COST,
|
||||
AITextGeneratorBlock: LLM_COST,
|
||||
AIStructuredResponseGeneratorBlock: LLM_COST,
|
||||
AITextSummarizerBlock: LLM_COST,
|
||||
AIListGeneratorBlock: LLM_COST,
|
||||
CreateTalkingAvatarVideoBlock: [
|
||||
BlockCost(
|
||||
cost_amount=15,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": did_credentials.id,
|
||||
"provider": did_credentials.provider,
|
||||
"type": did_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
SearchTheWebBlock: [
|
||||
BlockCost(
|
||||
cost_amount=1,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": jina_credentials.id,
|
||||
"provider": jina_credentials.provider,
|
||||
"type": jina_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
ExtractWebsiteContentBlock: [
|
||||
BlockCost(cost_amount=1, cost_filter={"raw_content": False})
|
||||
],
|
||||
IdeogramModelBlock: [
|
||||
BlockCost(
|
||||
cost_amount=16,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": ideogram_credentials.id,
|
||||
"provider": ideogram_credentials.provider,
|
||||
"type": ideogram_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
AIShortformVideoCreatorBlock: [
|
||||
BlockCost(
|
||||
cost_amount=50,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": revid_credentials.id,
|
||||
"provider": revid_credentials.provider,
|
||||
"type": revid_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
ReplicateFluxAdvancedModelBlock: [
|
||||
BlockCost(
|
||||
cost_amount=10,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
AIMusicGeneratorBlock: [
|
||||
BlockCost(
|
||||
cost_amount=11,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
JinaEmbeddingBlock: [
|
||||
BlockCost(
|
||||
cost_amount=12,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": jina_credentials.id,
|
||||
"provider": jina_credentials.provider,
|
||||
"type": jina_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
UnrealTextToSpeechBlock: [
|
||||
BlockCost(
|
||||
cost_amount=5,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": unreal_credentials.id,
|
||||
"provider": unreal_credentials.provider,
|
||||
"type": unreal_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import BlockInput
|
||||
|
||||
|
||||
class BlockCostType(str, Enum):
|
||||
RUN = "run" # cost X credits per run
|
||||
BYTE = "byte" # cost X credits per byte
|
||||
SECOND = "second" # cost X credits per second
|
||||
|
||||
|
||||
class BlockCost(BaseModel):
|
||||
cost_amount: int
|
||||
cost_filter: BlockInput
|
||||
cost_type: BlockCostType
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cost_amount: int,
|
||||
cost_type: BlockCostType = BlockCostType.RUN,
|
||||
cost_filter: Optional[BlockInput] = None,
|
||||
**data: Any,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
cost_amount=cost_amount,
|
||||
cost_filter=cost_filter or {},
|
||||
cost_type=cost_type,
|
||||
**data,
|
||||
)
|
||||
@@ -1,17 +1,191 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import prisma.errors
|
||||
from autogpt_libs.supabase_integration_credentials_store.store import (
|
||||
anthropic_credentials,
|
||||
did_credentials,
|
||||
groq_credentials,
|
||||
ideogram_credentials,
|
||||
openai_credentials,
|
||||
replicate_credentials,
|
||||
revid_credentials,
|
||||
)
|
||||
from prisma import Json
|
||||
from prisma.enums import UserBlockCreditType
|
||||
from prisma.errors import UniqueViolationError
|
||||
from prisma.models import UserBlockCredit
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.blocks.ai_shortform_video_block import AIShortformVideoCreatorBlock
|
||||
from backend.blocks.ideogram import IdeogramModelBlock
|
||||
from backend.blocks.llm import (
|
||||
MODEL_METADATA,
|
||||
AIConversationBlock,
|
||||
AIStructuredResponseGeneratorBlock,
|
||||
AITextGeneratorBlock,
|
||||
AITextSummarizerBlock,
|
||||
LlmModel,
|
||||
)
|
||||
from backend.blocks.replicate_flux_advanced import ReplicateFluxAdvancedModelBlock
|
||||
from backend.blocks.search import ExtractWebsiteContentBlock, SearchTheWebBlock
|
||||
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
|
||||
from backend.data.block import Block, BlockInput, get_block
|
||||
from backend.data.block_cost_config import BLOCK_COSTS
|
||||
from backend.data.cost import BlockCost, BlockCostType
|
||||
from backend.util.settings import Config
|
||||
|
||||
|
||||
class BlockCostType(str, Enum):
|
||||
RUN = "run" # cost X credits per run
|
||||
BYTE = "byte" # cost X credits per byte
|
||||
SECOND = "second" # cost X credits per second
|
||||
|
||||
|
||||
class BlockCost(BaseModel):
|
||||
cost_amount: int
|
||||
cost_filter: BlockInput
|
||||
cost_type: BlockCostType
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cost_amount: int,
|
||||
cost_type: BlockCostType = BlockCostType.RUN,
|
||||
cost_filter: Optional[BlockInput] = None,
|
||||
**data: Any,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
cost_amount=cost_amount,
|
||||
cost_filter=cost_filter or {},
|
||||
cost_type=cost_type,
|
||||
**data,
|
||||
)
|
||||
|
||||
|
||||
llm_cost = (
|
||||
[
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"api_key": None, # Running LLM with user own API key is free.
|
||||
},
|
||||
cost_amount=metadata.cost_factor,
|
||||
)
|
||||
for model, metadata in MODEL_METADATA.items()
|
||||
]
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": anthropic_credentials.id,
|
||||
"provider": anthropic_credentials.provider,
|
||||
"type": anthropic_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=metadata.cost_factor,
|
||||
)
|
||||
for model, metadata in MODEL_METADATA.items()
|
||||
if metadata.provider == "anthropic"
|
||||
]
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {
|
||||
"id": openai_credentials.id,
|
||||
"provider": openai_credentials.provider,
|
||||
"type": openai_credentials.type,
|
||||
},
|
||||
},
|
||||
cost_amount=metadata.cost_factor,
|
||||
)
|
||||
for model, metadata in MODEL_METADATA.items()
|
||||
if metadata.provider == "openai"
|
||||
]
|
||||
+ [
|
||||
BlockCost(
|
||||
cost_type=BlockCostType.RUN,
|
||||
cost_filter={
|
||||
"model": model,
|
||||
"credentials": {"id": groq_credentials.id},
|
||||
},
|
||||
cost_amount=metadata.cost_factor,
|
||||
)
|
||||
for model, metadata in MODEL_METADATA.items()
|
||||
if metadata.provider == "groq"
|
||||
]
|
||||
+ [
|
||||
BlockCost(
|
||||
# Default cost is running LlmModel.GPT4O.
|
||||
cost_amount=MODEL_METADATA[LlmModel.GPT4O].cost_factor,
|
||||
cost_filter={"api_key": None},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
AIConversationBlock: llm_cost,
|
||||
AITextGeneratorBlock: llm_cost,
|
||||
AIStructuredResponseGeneratorBlock: llm_cost,
|
||||
AITextSummarizerBlock: llm_cost,
|
||||
CreateTalkingAvatarVideoBlock: [
|
||||
BlockCost(
|
||||
cost_amount=15,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": did_credentials.id,
|
||||
"provider": did_credentials.provider,
|
||||
"type": did_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
SearchTheWebBlock: [BlockCost(cost_amount=1)],
|
||||
ExtractWebsiteContentBlock: [
|
||||
BlockCost(cost_amount=1, cost_filter={"raw_content": False})
|
||||
],
|
||||
IdeogramModelBlock: [
|
||||
BlockCost(
|
||||
cost_amount=1,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": ideogram_credentials.id,
|
||||
"provider": ideogram_credentials.provider,
|
||||
"type": ideogram_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
AIShortformVideoCreatorBlock: [
|
||||
BlockCost(
|
||||
cost_amount=10,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": revid_credentials.id,
|
||||
"provider": revid_credentials.provider,
|
||||
"type": revid_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
ReplicateFluxAdvancedModelBlock: [
|
||||
BlockCost(
|
||||
cost_amount=10,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": replicate_credentials.id,
|
||||
"provider": replicate_credentials.provider,
|
||||
"type": replicate_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class UserCreditBase(ABC):
|
||||
def __init__(self, num_user_credits_refill: int):
|
||||
self.num_user_credits_refill = num_user_credits_refill
|
||||
@@ -96,7 +270,7 @@ class UserCredit(UserCreditBase):
|
||||
"createdAt": self.time_now(),
|
||||
}
|
||||
)
|
||||
except UniqueViolationError:
|
||||
except prisma.errors.UniqueViolationError:
|
||||
pass # Already refilled this month
|
||||
|
||||
return self.num_user_credits_refill
|
||||
|
||||
@@ -64,7 +64,6 @@ class ExecutionResult(BaseModel):
|
||||
graph_exec_id: str
|
||||
node_exec_id: str
|
||||
node_id: str
|
||||
block_id: str
|
||||
status: ExecutionStatus
|
||||
input_data: BlockInput
|
||||
output_data: CompletedBlockOutput
|
||||
@@ -73,31 +72,11 @@ class ExecutionResult(BaseModel):
|
||||
start_time: datetime | None
|
||||
end_time: datetime | None
|
||||
|
||||
@staticmethod
|
||||
def from_graph(graph: AgentGraphExecution):
|
||||
return ExecutionResult(
|
||||
graph_id=graph.agentGraphId,
|
||||
graph_version=graph.agentGraphVersion,
|
||||
graph_exec_id=graph.id,
|
||||
node_exec_id="",
|
||||
node_id="",
|
||||
block_id="",
|
||||
status=graph.executionStatus,
|
||||
# TODO: Populate input_data & output_data from AgentNodeExecutions
|
||||
# Input & Output comes AgentInputBlock & AgentOutputBlock.
|
||||
input_data={},
|
||||
output_data={},
|
||||
add_time=graph.createdAt,
|
||||
queue_time=graph.createdAt,
|
||||
start_time=graph.startedAt,
|
||||
end_time=graph.updatedAt,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_db(execution: AgentNodeExecution):
|
||||
if execution.executionData:
|
||||
# Execution that has been queued for execution will persist its data.
|
||||
input_data = json.loads(execution.executionData, target_type=dict[str, Any])
|
||||
input_data = json.loads(execution.executionData)
|
||||
else:
|
||||
# For incomplete execution, executionData will not be yet available.
|
||||
input_data: BlockInput = defaultdict()
|
||||
@@ -114,10 +93,9 @@ class ExecutionResult(BaseModel):
|
||||
graph_id=graph_execution.agentGraphId if graph_execution else "",
|
||||
graph_version=graph_execution.agentGraphVersion if graph_execution else 0,
|
||||
graph_exec_id=execution.agentGraphExecutionId,
|
||||
block_id=execution.AgentNode.agentBlockId if execution.AgentNode else "",
|
||||
node_exec_id=execution.id,
|
||||
node_id=execution.agentNodeId,
|
||||
status=execution.executionStatus,
|
||||
status=ExecutionStatus(execution.executionStatus),
|
||||
input_data=input_data,
|
||||
output_data=output_data,
|
||||
add_time=execution.addedTime,
|
||||
@@ -270,20 +248,15 @@ async def update_graph_execution_start_time(graph_exec_id: str):
|
||||
async def update_graph_execution_stats(
|
||||
graph_exec_id: str,
|
||||
stats: dict[str, Any],
|
||||
) -> ExecutionResult:
|
||||
|
||||
):
|
||||
status = ExecutionStatus.FAILED if stats.get("error") else ExecutionStatus.COMPLETED
|
||||
res = await AgentGraphExecution.prisma().update(
|
||||
await AgentGraphExecution.prisma().update(
|
||||
where={"id": graph_exec_id},
|
||||
data={
|
||||
"executionStatus": status,
|
||||
"stats": json.dumps(stats),
|
||||
},
|
||||
)
|
||||
if not res:
|
||||
raise ValueError(f"Execution {graph_exec_id} not found.")
|
||||
|
||||
return ExecutionResult.from_graph(res)
|
||||
|
||||
|
||||
async def update_node_execution_stats(node_exec_id: str, stats: dict[str, Any]):
|
||||
|
||||
@@ -5,12 +5,10 @@ from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Literal, Type
|
||||
|
||||
import prisma
|
||||
from prisma.models import AgentGraph, AgentGraphExecution, AgentNode, AgentNodeLink
|
||||
from prisma.types import AgentGraphWhereInput
|
||||
from pydantic.fields import computed_field
|
||||
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.blocks.basic import AgentInputBlock, AgentOutputBlock
|
||||
from backend.data.block import BlockInput, BlockType, get_block, get_blocks
|
||||
from backend.data.db import BaseDbModel, transaction
|
||||
@@ -57,8 +55,8 @@ class Node(BaseDbModel):
|
||||
obj = Node(
|
||||
id=node.id,
|
||||
block_id=node.AgentBlock.id,
|
||||
input_default=json.loads(node.constantInput, target_type=dict[str, Any]),
|
||||
metadata=json.loads(node.metadata, target_type=dict[str, Any]),
|
||||
input_default=json.loads(node.constantInput),
|
||||
metadata=json.loads(node.metadata),
|
||||
)
|
||||
obj.input_links = [Link.from_db(link) for link in node.Input or []]
|
||||
obj.output_links = [Link.from_db(link) for link in node.Output or []]
|
||||
@@ -81,13 +79,10 @@ class GraphExecution(BaseDbModel):
|
||||
duration = (end_time - start_time).total_seconds()
|
||||
total_run_time = duration
|
||||
|
||||
try:
|
||||
stats = json.loads(execution.stats or "{}", target_type=dict[str, Any])
|
||||
except ValueError:
|
||||
stats = {}
|
||||
|
||||
duration = stats.get("walltime", duration)
|
||||
total_run_time = stats.get("nodes_walltime", total_run_time)
|
||||
if execution.stats:
|
||||
stats = json.loads(execution.stats)
|
||||
duration = stats.get("walltime", duration)
|
||||
total_run_time = stats.get("nodes_walltime", total_run_time)
|
||||
|
||||
return GraphExecution(
|
||||
id=execution.id,
|
||||
@@ -179,35 +174,24 @@ class Graph(BaseDbModel):
|
||||
if node.id not in outbound_nodes or node.id in input_nodes
|
||||
]
|
||||
|
||||
def reassign_ids(self, user_id: str, reassign_graph_id: bool = False):
|
||||
def reassign_ids(self, reassign_graph_id: bool = False):
|
||||
"""
|
||||
Reassigns all IDs in the graph to new UUIDs.
|
||||
This method can be used before storing a new graph to the database.
|
||||
"""
|
||||
self.validate_graph()
|
||||
|
||||
# Reassign Graph ID
|
||||
id_map = {node.id: str(uuid.uuid4()) for node in self.nodes}
|
||||
if reassign_graph_id:
|
||||
self.id = str(uuid.uuid4())
|
||||
|
||||
# Reassign Node IDs
|
||||
for node in self.nodes:
|
||||
node.id = id_map[node.id]
|
||||
|
||||
# Reassign Link IDs
|
||||
for link in self.links:
|
||||
link.source_id = id_map[link.source_id]
|
||||
link.sink_id = id_map[link.sink_id]
|
||||
|
||||
# Reassign User IDs for agent blocks
|
||||
for node in self.nodes:
|
||||
if node.block_id != AgentExecutorBlock().id:
|
||||
continue
|
||||
node.input_default["user_id"] = user_id
|
||||
node.input_default.setdefault("data", {})
|
||||
|
||||
self.validate_graph()
|
||||
|
||||
def validate_graph(self, for_run: bool = False):
|
||||
def sanitize(name):
|
||||
return name.split("_#_")[0].split("_@_")[0].split("_$_")[0]
|
||||
@@ -231,7 +215,6 @@ class Graph(BaseDbModel):
|
||||
for_run # Skip input completion validation, unless when executing.
|
||||
or block.block_type == BlockType.INPUT
|
||||
or block.block_type == BlockType.OUTPUT
|
||||
or block.block_type == BlockType.AGENT
|
||||
):
|
||||
raise ValueError(
|
||||
f"Node {block.name} #{node.id} required input missing: `{name}`"
|
||||
@@ -265,26 +248,18 @@ class Graph(BaseDbModel):
|
||||
)
|
||||
|
||||
sanitized_name = sanitize(name)
|
||||
vals = node.input_default
|
||||
if i == 0:
|
||||
fields = (
|
||||
block.output_schema.get_fields()
|
||||
if block.block_type != BlockType.AGENT
|
||||
else vals.get("output_schema", {}).get("properties", {}).keys()
|
||||
)
|
||||
fields = f"Valid output fields: {block.output_schema.get_fields()}"
|
||||
else:
|
||||
fields = (
|
||||
block.input_schema.get_fields()
|
||||
if block.block_type != BlockType.AGENT
|
||||
else vals.get("input_schema", {}).get("properties", {}).keys()
|
||||
)
|
||||
fields = f"Valid input fields: {block.input_schema.get_fields()}"
|
||||
if sanitized_name not in fields:
|
||||
fields_msg = f"Allowed fields: {fields}"
|
||||
raise ValueError(f"{suffix}, `{name}` invalid, {fields_msg}")
|
||||
raise ValueError(f"{suffix}, `{name}` invalid, {fields}")
|
||||
|
||||
if is_static_output_block(link.source_id):
|
||||
link.is_static = True # Each value block output should be static.
|
||||
|
||||
# TODO: Add type compatibility check here.
|
||||
|
||||
@staticmethod
|
||||
def from_db(graph: AgentGraph, hide_credentials: bool = False):
|
||||
executions = [
|
||||
@@ -315,9 +290,7 @@ class Graph(BaseDbModel):
|
||||
def _process_node(node: AgentNode, hide_credentials: bool) -> Node:
|
||||
node_dict = node.model_dump()
|
||||
if hide_credentials and "constantInput" in node_dict:
|
||||
constant_input = json.loads(
|
||||
node_dict["constantInput"], target_type=dict[str, Any]
|
||||
)
|
||||
constant_input = json.loads(node_dict["constantInput"])
|
||||
constant_input = Graph._hide_credentials_in_input(constant_input)
|
||||
node_dict["constantInput"] = json.dumps(constant_input)
|
||||
return Node.from_db(AgentNode(**node_dict))
|
||||
@@ -529,84 +502,3 @@ async def __create_graph(tx, graph: Graph, user_id: str):
|
||||
for link in graph.links
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
# ------------------------ UTILITIES ------------------------ #
|
||||
|
||||
|
||||
async def fix_llm_provider_credentials():
|
||||
"""Fix node credentials with provider `llm`"""
|
||||
from autogpt_libs.supabase_integration_credentials_store import (
|
||||
SupabaseIntegrationCredentialsStore,
|
||||
)
|
||||
|
||||
from .redis import get_redis
|
||||
from .user import get_user_integrations
|
||||
|
||||
store = SupabaseIntegrationCredentialsStore(get_redis())
|
||||
|
||||
broken_nodes = await prisma.get_client().query_raw(
|
||||
"""
|
||||
SELECT "User".id user_id,
|
||||
node.id node_id,
|
||||
node."constantInput" node_preset_input
|
||||
FROM platform."AgentNode" node
|
||||
LEFT JOIN platform."AgentGraph" graph
|
||||
ON node."agentGraphId" = graph.id
|
||||
LEFT JOIN platform."User" "User"
|
||||
ON graph."userId" = "User".id
|
||||
WHERE node."constantInput"::jsonb->'credentials'->>'provider' = 'llm'
|
||||
ORDER BY user_id;
|
||||
"""
|
||||
)
|
||||
logger.info(f"Fixing LLM credential inputs on {len(broken_nodes)} nodes")
|
||||
|
||||
user_id: str = ""
|
||||
user_integrations = None
|
||||
for node in broken_nodes:
|
||||
if node["user_id"] != user_id:
|
||||
# Save queries by only fetching once per user
|
||||
user_id = node["user_id"]
|
||||
user_integrations = await get_user_integrations(user_id)
|
||||
elif not user_integrations:
|
||||
raise RuntimeError(f"Impossible state while processing node {node}")
|
||||
|
||||
node_id: str = node["node_id"]
|
||||
node_preset_input: dict = json.loads(node["node_preset_input"])
|
||||
credentials_meta: dict = node_preset_input["credentials"]
|
||||
|
||||
credentials = next(
|
||||
(
|
||||
c
|
||||
for c in user_integrations.credentials
|
||||
if c.id == credentials_meta["id"]
|
||||
),
|
||||
None,
|
||||
)
|
||||
if not credentials:
|
||||
continue
|
||||
if credentials.type != "api_key":
|
||||
logger.warning(
|
||||
f"User {user_id} credentials {credentials.id} with provider 'llm' "
|
||||
f"has invalid type '{credentials.type}'"
|
||||
)
|
||||
continue
|
||||
|
||||
api_key = credentials.api_key.get_secret_value()
|
||||
if api_key.startswith("sk-ant-api03-"):
|
||||
credentials.provider = credentials_meta["provider"] = "anthropic"
|
||||
elif api_key.startswith("sk-"):
|
||||
credentials.provider = credentials_meta["provider"] = "openai"
|
||||
elif api_key.startswith("gsk_"):
|
||||
credentials.provider = credentials_meta["provider"] = "groq"
|
||||
else:
|
||||
logger.warning(
|
||||
f"Could not identify provider from key prefix {api_key[:13]}*****"
|
||||
)
|
||||
continue
|
||||
|
||||
store.update_creds(user_id, credentials)
|
||||
await AgentNode.prisma().update(
|
||||
where={"id": node_id},
|
||||
data={"constantInput": json.dumps(node_preset_input)},
|
||||
)
|
||||
|
||||
@@ -151,22 +151,11 @@ class CredentialsMetaInput(BaseModel, Generic[CP, CT]):
|
||||
type: CT
|
||||
|
||||
|
||||
class CredentialsFieldSchemaExtra(BaseModel, Generic[CP, CT]):
|
||||
# TODO: move discrimination mechanism out of CredentialsField (frontend + backend)
|
||||
credentials_provider: list[CP]
|
||||
credentials_scopes: Optional[list[str]]
|
||||
credentials_types: list[CT]
|
||||
discriminator: Optional[str] = None
|
||||
discriminator_mapping: Optional[dict[str, CP]] = None
|
||||
|
||||
|
||||
def CredentialsField(
|
||||
provider: CP | list[CP],
|
||||
provider: CP,
|
||||
supported_credential_types: set[CT],
|
||||
required_scopes: set[str] = set(),
|
||||
*,
|
||||
discriminator: Optional[str] = None,
|
||||
discriminator_mapping: Optional[dict[str, Any]] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
**kwargs,
|
||||
@@ -175,21 +164,20 @@ def CredentialsField(
|
||||
`CredentialsField` must and can only be used on fields named `credentials`.
|
||||
This is enforced by the `BlockSchema` base class.
|
||||
"""
|
||||
if not isinstance(provider, str) and len(provider) > 1 and not discriminator:
|
||||
raise TypeError("Multi-provider CredentialsField requires discriminator!")
|
||||
|
||||
field_schema_extra = CredentialsFieldSchemaExtra[CP, CT](
|
||||
credentials_provider=[provider] if isinstance(provider, str) else provider,
|
||||
credentials_scopes=list(required_scopes) or None, # omit if empty
|
||||
credentials_types=list(supported_credential_types),
|
||||
discriminator=discriminator,
|
||||
discriminator_mapping=discriminator_mapping,
|
||||
)
|
||||
json_extra = {
|
||||
k: v
|
||||
for k, v in {
|
||||
"credentials_provider": provider,
|
||||
"credentials_scopes": list(required_scopes) or None, # omit if empty
|
||||
"credentials_types": list(supported_credential_types),
|
||||
}.items()
|
||||
if v is not None
|
||||
}
|
||||
|
||||
return Field(
|
||||
title=title,
|
||||
description=description,
|
||||
json_schema_extra=field_schema_extra.model_dump(exclude_none=True),
|
||||
json_schema_extra=json_extra,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
81
autogpt_platform/backend/backend/data/schedule.py
Normal file
81
autogpt_platform/backend/backend/data/schedule.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from prisma.models import AgentGraphExecutionSchedule
|
||||
|
||||
from backend.data.block import BlockInput
|
||||
from backend.data.db import BaseDbModel
|
||||
from backend.util import json
|
||||
|
||||
|
||||
class ExecutionSchedule(BaseDbModel):
|
||||
graph_id: str
|
||||
user_id: str
|
||||
graph_version: int
|
||||
schedule: str
|
||||
is_enabled: bool
|
||||
input_data: BlockInput
|
||||
last_updated: Optional[datetime] = None
|
||||
|
||||
def __init__(self, is_enabled: Optional[bool] = None, **kwargs):
|
||||
kwargs["is_enabled"] = (is_enabled is None) or is_enabled
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@staticmethod
|
||||
def from_db(schedule: AgentGraphExecutionSchedule):
|
||||
return ExecutionSchedule(
|
||||
id=schedule.id,
|
||||
graph_id=schedule.agentGraphId,
|
||||
user_id=schedule.userId,
|
||||
graph_version=schedule.agentGraphVersion,
|
||||
schedule=schedule.schedule,
|
||||
is_enabled=schedule.isEnabled,
|
||||
last_updated=schedule.lastUpdated.replace(tzinfo=None),
|
||||
input_data=json.loads(schedule.inputData),
|
||||
)
|
||||
|
||||
|
||||
async def get_active_schedules(last_fetch_time: datetime) -> list[ExecutionSchedule]:
|
||||
query = AgentGraphExecutionSchedule.prisma().find_many(
|
||||
where={"isEnabled": True, "lastUpdated": {"gt": last_fetch_time}},
|
||||
order={"lastUpdated": "asc"},
|
||||
)
|
||||
return [ExecutionSchedule.from_db(schedule) for schedule in await query]
|
||||
|
||||
|
||||
async def disable_schedule(schedule_id: str):
|
||||
await AgentGraphExecutionSchedule.prisma().update(
|
||||
where={"id": schedule_id}, data={"isEnabled": False}
|
||||
)
|
||||
|
||||
|
||||
async def get_schedules(graph_id: str, user_id: str) -> list[ExecutionSchedule]:
|
||||
query = AgentGraphExecutionSchedule.prisma().find_many(
|
||||
where={
|
||||
"isEnabled": True,
|
||||
"agentGraphId": graph_id,
|
||||
"userId": user_id,
|
||||
},
|
||||
)
|
||||
return [ExecutionSchedule.from_db(schedule) for schedule in await query]
|
||||
|
||||
|
||||
async def add_schedule(schedule: ExecutionSchedule) -> ExecutionSchedule:
|
||||
obj = await AgentGraphExecutionSchedule.prisma().create(
|
||||
data={
|
||||
"id": schedule.id,
|
||||
"userId": schedule.user_id,
|
||||
"agentGraphId": schedule.graph_id,
|
||||
"agentGraphVersion": schedule.graph_version,
|
||||
"schedule": schedule.schedule,
|
||||
"isEnabled": schedule.is_enabled,
|
||||
"inputData": json.dumps(schedule.input_data),
|
||||
}
|
||||
)
|
||||
return ExecutionSchedule.from_db(obj)
|
||||
|
||||
|
||||
async def update_schedule(schedule_id: str, is_enabled: bool, user_id: str):
|
||||
await AgentGraphExecutionSchedule.prisma().update(
|
||||
where={"id": schedule_id}, data={"isEnabled": is_enabled}
|
||||
)
|
||||
@@ -1,7 +1,6 @@
|
||||
import logging
|
||||
from typing import Optional, cast
|
||||
|
||||
from autogpt_libs.auth.models import DEFAULT_USER_ID
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import (
|
||||
UserIntegrations,
|
||||
UserMetadata,
|
||||
@@ -16,6 +15,9 @@ from backend.util.encryption import JSONCryptor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
DEFAULT_EMAIL = "default@example.com"
|
||||
|
||||
|
||||
async def get_or_create_user(user_data: dict) -> User:
|
||||
user_id = user_data.get("sub")
|
||||
|
||||
@@ -41,8 +41,8 @@ class DatabaseManager(AppService):
|
||||
return Config().database_api_port
|
||||
|
||||
@expose
|
||||
def send_execution_update(self, execution_result: ExecutionResult):
|
||||
self.event_queue.publish(execution_result)
|
||||
def send_execution_update(self, execution_result_dict: dict[Any, Any]):
|
||||
self.event_queue.publish(ExecutionResult(**execution_result_dict))
|
||||
|
||||
@staticmethod
|
||||
def exposed_run_and_wait(
|
||||
|
||||
@@ -125,7 +125,7 @@ def execute_node(
|
||||
|
||||
def update_execution(status: ExecutionStatus) -> ExecutionResult:
|
||||
exec_update = db_client.update_execution_status(node_exec_id, status)
|
||||
db_client.send_execution_update(exec_update)
|
||||
db_client.send_execution_update(exec_update.model_dump())
|
||||
return exec_update
|
||||
|
||||
node = db_client.get_node(node_id)
|
||||
@@ -251,7 +251,7 @@ def _enqueue_next_nodes(
|
||||
exec_update = db_client.update_execution_status(
|
||||
node_exec_id, ExecutionStatus.QUEUED, data
|
||||
)
|
||||
db_client.send_execution_update(exec_update)
|
||||
db_client.send_execution_update(exec_update.model_dump())
|
||||
return NodeExecution(
|
||||
user_id=user_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
@@ -572,11 +572,10 @@ class Executor:
|
||||
exec_stats["walltime"] = timing_info.wall_time
|
||||
exec_stats["cputime"] = timing_info.cpu_time
|
||||
exec_stats["error"] = str(error) if error else None
|
||||
result = cls.db_client.update_graph_execution_stats(
|
||||
cls.db_client.update_graph_execution_stats(
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
stats=exec_stats,
|
||||
)
|
||||
cls.db_client.send_execution_update(result)
|
||||
|
||||
@classmethod
|
||||
@time_measured
|
||||
@@ -730,7 +729,7 @@ class ExecutionManager(AppService):
|
||||
)
|
||||
self.active_graph_runs[graph_exec_id] = (future, cancel_event)
|
||||
future.add_done_callback(
|
||||
lambda _: self.active_graph_runs.pop(graph_exec_id, None)
|
||||
lambda _: self.active_graph_runs.pop(graph_exec_id)
|
||||
)
|
||||
|
||||
def cleanup(self):
|
||||
@@ -745,17 +744,11 @@ class ExecutionManager(AppService):
|
||||
|
||||
@expose
|
||||
def add_execution(
|
||||
self,
|
||||
graph_id: str,
|
||||
data: BlockInput,
|
||||
user_id: str,
|
||||
graph_version: int | None = None,
|
||||
) -> GraphExecution:
|
||||
graph: Graph | None = self.db_client.get_graph(
|
||||
graph_id=graph_id, user_id=user_id, version=graph_version
|
||||
)
|
||||
self, graph_id: str, data: BlockInput, user_id: str
|
||||
) -> dict[str, Any]:
|
||||
graph: Graph | None = self.db_client.get_graph(graph_id, user_id=user_id)
|
||||
if not graph:
|
||||
raise ValueError(f"Graph #{graph_id} not found.")
|
||||
raise Exception(f"Graph #{graph_id} not found.")
|
||||
|
||||
graph.validate_graph(for_run=True)
|
||||
self._validate_node_input_credentials(graph, user_id)
|
||||
@@ -777,7 +770,7 @@ class ExecutionManager(AppService):
|
||||
|
||||
input_data, error = validate_exec(node, input_data)
|
||||
if input_data is None:
|
||||
raise ValueError(error)
|
||||
raise Exception(error)
|
||||
else:
|
||||
nodes_input.append((node.id, input_data))
|
||||
|
||||
@@ -803,7 +796,7 @@ class ExecutionManager(AppService):
|
||||
exec_update = self.db_client.update_execution_status(
|
||||
node_exec.node_exec_id, ExecutionStatus.QUEUED, node_exec.input_data
|
||||
)
|
||||
self.db_client.send_execution_update(exec_update)
|
||||
self.db_client.send_execution_update(exec_update.model_dump())
|
||||
|
||||
graph_exec = GraphExecution(
|
||||
user_id=user_id,
|
||||
@@ -813,7 +806,7 @@ class ExecutionManager(AppService):
|
||||
)
|
||||
self.queue.add(graph_exec)
|
||||
|
||||
return graph_exec
|
||||
return graph_exec.model_dump()
|
||||
|
||||
@expose
|
||||
def cancel_execution(self, graph_exec_id: str) -> None:
|
||||
@@ -850,7 +843,7 @@ class ExecutionManager(AppService):
|
||||
exec_update = self.db_client.update_execution_status(
|
||||
node_exec.node_exec_id, ExecutionStatus.FAILED
|
||||
)
|
||||
self.db_client.send_execution_update(exec_update)
|
||||
self.db_client.send_execution_update(exec_update.model_dump())
|
||||
|
||||
def _validate_node_input_credentials(self, graph: Graph, user_id: str):
|
||||
"""Checks all credentials for all nodes of the graph"""
|
||||
|
||||
@@ -1,98 +1,37 @@
|
||||
import logging
|
||||
import os
|
||||
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_EXECUTED
|
||||
from apscheduler.job import Job as JobObj
|
||||
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
|
||||
from apscheduler.schedulers.blocking import BlockingScheduler
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy import MetaData, create_engine
|
||||
|
||||
from backend.data.block import BlockInput
|
||||
from backend.data.schedule import (
|
||||
ExecutionSchedule,
|
||||
add_schedule,
|
||||
get_active_schedules,
|
||||
get_schedules,
|
||||
update_schedule,
|
||||
)
|
||||
from backend.executor.manager import ExecutionManager
|
||||
from backend.util.service import AppService, expose, get_service_client
|
||||
from backend.util.settings import Config
|
||||
|
||||
|
||||
def _extract_schema_from_url(database_url) -> tuple[str, str]:
|
||||
"""
|
||||
Extracts the schema from the DATABASE_URL and returns the schema and cleaned URL.
|
||||
"""
|
||||
parsed_url = urlparse(database_url)
|
||||
query_params = parse_qs(parsed_url.query)
|
||||
|
||||
# Extract the 'schema' parameter
|
||||
schema_list = query_params.pop("schema", None)
|
||||
schema = schema_list[0] if schema_list else "public"
|
||||
|
||||
# Reconstruct the query string without the 'schema' parameter
|
||||
new_query = urlencode(query_params, doseq=True)
|
||||
new_parsed_url = parsed_url._replace(query=new_query)
|
||||
database_url_clean = str(urlunparse(new_parsed_url))
|
||||
|
||||
return schema, database_url_clean
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def log(msg, **kwargs):
|
||||
logger.info("[ExecutionScheduler] " + msg, **kwargs)
|
||||
|
||||
|
||||
def job_listener(event):
|
||||
"""Logs job execution outcomes for better monitoring."""
|
||||
if event.exception:
|
||||
log(f"Job {event.job_id} failed.")
|
||||
else:
|
||||
log(f"Job {event.job_id} completed successfully.")
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_execution_client() -> ExecutionManager:
|
||||
return get_service_client(ExecutionManager)
|
||||
|
||||
|
||||
def execute_graph(**kwargs):
|
||||
args = JobArgs(**kwargs)
|
||||
try:
|
||||
log(f"Executing recurring job for graph #{args.graph_id}")
|
||||
get_execution_client().add_execution(
|
||||
args.graph_id, args.input_data, args.user_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(f"Error executing graph {args.graph_id}: {e}")
|
||||
|
||||
|
||||
class JobArgs(BaseModel):
|
||||
graph_id: str
|
||||
input_data: BlockInput
|
||||
user_id: str
|
||||
graph_version: int
|
||||
cron: str
|
||||
|
||||
|
||||
class JobInfo(JobArgs):
|
||||
id: str
|
||||
name: str
|
||||
next_run_time: str
|
||||
|
||||
@staticmethod
|
||||
def from_db(job_args: JobArgs, job_obj: JobObj) -> "JobInfo":
|
||||
return JobInfo(
|
||||
id=job_obj.id,
|
||||
name=job_obj.name,
|
||||
next_run_time=job_obj.next_run_time.isoformat(),
|
||||
**job_args.model_dump(),
|
||||
)
|
||||
logger.warning("[ExecutionScheduler] " + msg, **kwargs)
|
||||
|
||||
|
||||
class ExecutionScheduler(AppService):
|
||||
scheduler: BlockingScheduler
|
||||
|
||||
def __init__(self, refresh_interval=10):
|
||||
super().__init__()
|
||||
self.use_db = True
|
||||
self.last_check = datetime.min
|
||||
self.refresh_interval = refresh_interval
|
||||
|
||||
@classmethod
|
||||
def get_port(cls) -> int:
|
||||
@@ -104,18 +43,43 @@ class ExecutionScheduler(AppService):
|
||||
return get_service_client(ExecutionManager)
|
||||
|
||||
def run_service(self):
|
||||
load_dotenv()
|
||||
db_schema, db_url = _extract_schema_from_url(os.getenv("DATABASE_URL"))
|
||||
self.scheduler = BlockingScheduler(
|
||||
jobstores={
|
||||
"default": SQLAlchemyJobStore(
|
||||
engine=create_engine(db_url),
|
||||
metadata=MetaData(schema=db_schema),
|
||||
)
|
||||
}
|
||||
)
|
||||
self.scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
|
||||
self.scheduler.start()
|
||||
scheduler = BackgroundScheduler()
|
||||
scheduler.start()
|
||||
while True:
|
||||
self.__refresh_jobs_from_db(scheduler)
|
||||
time.sleep(self.refresh_interval)
|
||||
|
||||
def __refresh_jobs_from_db(self, scheduler: BackgroundScheduler):
|
||||
schedules = self.run_and_wait(get_active_schedules(self.last_check))
|
||||
for schedule in schedules:
|
||||
if schedule.last_updated:
|
||||
self.last_check = max(self.last_check, schedule.last_updated)
|
||||
|
||||
if not schedule.is_enabled:
|
||||
log(f"Removing recurring job {schedule.id}: {schedule.schedule}")
|
||||
scheduler.remove_job(schedule.id)
|
||||
continue
|
||||
|
||||
log(f"Adding recurring job {schedule.id}: {schedule.schedule}")
|
||||
scheduler.add_job(
|
||||
self.__execute_graph,
|
||||
CronTrigger.from_crontab(schedule.schedule),
|
||||
id=schedule.id,
|
||||
args=[schedule.graph_id, schedule.input_data, schedule.user_id],
|
||||
replace_existing=True,
|
||||
)
|
||||
|
||||
def __execute_graph(self, graph_id: str, input_data: dict, user_id: str):
|
||||
try:
|
||||
log(f"Executing recurring job for graph #{graph_id}")
|
||||
self.execution_client.add_execution(graph_id, input_data, user_id)
|
||||
except Exception as e:
|
||||
logger.exception(f"Error executing graph {graph_id}: {e}")
|
||||
|
||||
@expose
|
||||
def update_schedule(self, schedule_id: str, is_enabled: bool, user_id: str) -> str:
|
||||
self.run_and_wait(update_schedule(schedule_id, is_enabled, user_id))
|
||||
return schedule_id
|
||||
|
||||
@expose
|
||||
def add_execution_schedule(
|
||||
@@ -125,50 +89,17 @@ class ExecutionScheduler(AppService):
|
||||
cron: str,
|
||||
input_data: BlockInput,
|
||||
user_id: str,
|
||||
) -> JobInfo:
|
||||
job_args = JobArgs(
|
||||
) -> str:
|
||||
schedule = ExecutionSchedule(
|
||||
graph_id=graph_id,
|
||||
input_data=input_data,
|
||||
user_id=user_id,
|
||||
graph_version=graph_version,
|
||||
cron=cron,
|
||||
schedule=cron,
|
||||
input_data=input_data,
|
||||
)
|
||||
job = self.scheduler.add_job(
|
||||
execute_graph,
|
||||
CronTrigger.from_crontab(cron),
|
||||
kwargs=job_args.model_dump(),
|
||||
replace_existing=True,
|
||||
)
|
||||
log(f"Added job {job.id} with cron schedule '{cron}' input data: {input_data}")
|
||||
return JobInfo.from_db(job_args, job)
|
||||
return self.run_and_wait(add_schedule(schedule)).id
|
||||
|
||||
@expose
|
||||
def delete_schedule(self, schedule_id: str, user_id: str) -> JobInfo:
|
||||
job = self.scheduler.get_job(schedule_id)
|
||||
if not job:
|
||||
log(f"Job {schedule_id} not found.")
|
||||
raise ValueError(f"Job #{schedule_id} not found.")
|
||||
|
||||
job_args = JobArgs(**job.kwargs)
|
||||
if job_args.user_id != user_id:
|
||||
raise ValueError("User ID does not match the job's user ID.")
|
||||
|
||||
log(f"Deleting job {schedule_id}")
|
||||
job.remove()
|
||||
|
||||
return JobInfo.from_db(job_args, job)
|
||||
|
||||
@expose
|
||||
def get_execution_schedules(
|
||||
self, graph_id: str | None = None, user_id: str | None = None
|
||||
) -> list[JobInfo]:
|
||||
schedules = []
|
||||
for job in self.scheduler.get_jobs():
|
||||
job_args = JobArgs(**job.kwargs)
|
||||
if (
|
||||
job.next_run_time is not None
|
||||
and (graph_id is None or job_args.graph_id == graph_id)
|
||||
and (user_id is None or job_args.user_id == user_id)
|
||||
):
|
||||
schedules.append(JobInfo.from_db(job_args, job))
|
||||
return schedules
|
||||
def get_execution_schedules(self, graph_id: str, user_id: str) -> dict[str, str]:
|
||||
schedules = self.run_and_wait(get_schedules(graph_id, user_id=user_id))
|
||||
return {v.id: v.schedule for v in schedules}
|
||||
|
||||
@@ -2,10 +2,9 @@ import time
|
||||
from typing import Optional
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
|
||||
|
||||
from backend.util.request import requests
|
||||
|
||||
from .base import BaseOAuthHandler
|
||||
|
||||
|
||||
@@ -57,12 +56,13 @@ class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
}
|
||||
|
||||
requests.delete(
|
||||
response = requests.delete(
|
||||
url=self.revoke_url.format(client_id=self.client_id),
|
||||
auth=(self.client_id, self.client_secret),
|
||||
headers=headers,
|
||||
json={"access_token": credentials.access_token.get_secret_value()},
|
||||
)
|
||||
response.raise_for_status()
|
||||
return True
|
||||
|
||||
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
|
||||
@@ -88,6 +88,7 @@ class GitHubOAuthHandler(BaseOAuthHandler):
|
||||
}
|
||||
headers = {"Accept": "application/json"}
|
||||
response = requests.post(self.token_url, data=request_body, headers=headers)
|
||||
response.raise_for_status()
|
||||
token_data: dict = response.json()
|
||||
|
||||
username = self._request_username(token_data["access_token"])
|
||||
|
||||
@@ -103,11 +103,12 @@ class GoogleOAuthHandler(BaseOAuthHandler):
|
||||
|
||||
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
|
||||
session = AuthorizedSession(credentials)
|
||||
session.post(
|
||||
response = session.post(
|
||||
self.revoke_uri,
|
||||
params={"token": credentials.access_token.get_secret_value()},
|
||||
headers={"content-type": "application/x-www-form-urlencoded"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
return True
|
||||
|
||||
def _request_email(
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
from base64 import b64encode
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
|
||||
|
||||
from backend.util.request import requests
|
||||
|
||||
from .base import BaseOAuthHandler
|
||||
|
||||
|
||||
@@ -50,6 +49,7 @@ class NotionOAuthHandler(BaseOAuthHandler):
|
||||
"Accept": "application/json",
|
||||
}
|
||||
response = requests.post(self.token_url, json=request_body, headers=headers)
|
||||
response.raise_for_status()
|
||||
token_data = response.json()
|
||||
# Email is only available for non-bot users
|
||||
email = (
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
import enum
|
||||
from typing import Any, List, Optional, Union
|
||||
import typing
|
||||
|
||||
import pydantic
|
||||
|
||||
import backend.data.graph
|
||||
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
|
||||
|
||||
|
||||
class Methods(enum.Enum):
|
||||
@@ -12,12 +11,11 @@ class Methods(enum.Enum):
|
||||
UNSUBSCRIBE = "unsubscribe"
|
||||
EXECUTION_EVENT = "execution_event"
|
||||
ERROR = "error"
|
||||
HEARTBEAT = "heartbeat"
|
||||
|
||||
|
||||
class WsMessage(pydantic.BaseModel):
|
||||
method: Methods
|
||||
data: Optional[Union[dict[str, Any], list[Any], str]] = None
|
||||
data: typing.Dict[str, typing.Any] | list[typing.Any] | None = None
|
||||
success: bool | None = None
|
||||
channel: str | None = None
|
||||
error: str | None = None
|
||||
@@ -39,20 +37,5 @@ class CreateGraph(pydantic.BaseModel):
|
||||
graph: backend.data.graph.Graph | None = None
|
||||
|
||||
|
||||
class CreateAPIKeyRequest(pydantic.BaseModel):
|
||||
name: str
|
||||
permissions: List[APIKeyPermission]
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
class CreateAPIKeyResponse(pydantic.BaseModel):
|
||||
api_key: APIKeyWithoutHash
|
||||
plain_text_key: str
|
||||
|
||||
|
||||
class SetGraphActiveVersion(pydantic.BaseModel):
|
||||
active_graph_version: int
|
||||
|
||||
|
||||
class UpdatePermissionsRequest(pydantic.BaseModel):
|
||||
permissions: List[APIKeyPermission]
|
||||
|
||||
@@ -9,7 +9,6 @@ import uvicorn
|
||||
|
||||
import backend.data.block
|
||||
import backend.data.db
|
||||
import backend.data.graph
|
||||
import backend.data.user
|
||||
import backend.server.routers.v1
|
||||
import backend.util.service
|
||||
@@ -24,7 +23,6 @@ async def lifespan_context(app: fastapi.FastAPI):
|
||||
await backend.data.db.connect()
|
||||
await backend.data.block.initialize_blocks()
|
||||
await backend.data.user.migrate_and_encrypt_user_integrations()
|
||||
await backend.data.graph.fix_llm_provider_credentials()
|
||||
yield
|
||||
await backend.data.db.disconnect()
|
||||
|
||||
|
||||
@@ -1,42 +1,23 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from typing import Annotated, Any, List
|
||||
from typing import Annotated, Any, Dict
|
||||
|
||||
import pydantic
|
||||
from autogpt_libs.auth.middleware import auth_middleware
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from typing_extensions import Optional, TypedDict
|
||||
from fastapi import APIRouter, Body, Depends, HTTPException
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
import backend.data.block
|
||||
import backend.server.integrations.router
|
||||
import backend.server.routers.analytics
|
||||
from backend.data import execution as execution_db
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data.api_key import (
|
||||
APIKeyError,
|
||||
APIKeyNotFoundError,
|
||||
APIKeyPermissionError,
|
||||
APIKeyWithoutHash,
|
||||
generate_api_key,
|
||||
get_api_key_by_id,
|
||||
list_user_api_keys,
|
||||
revoke_api_key,
|
||||
suspend_api_key,
|
||||
update_api_key_permissions,
|
||||
)
|
||||
from backend.data.block import BlockInput, CompletedBlockOutput
|
||||
from backend.data.credit import get_block_costs, get_user_credit_model
|
||||
from backend.data.user import get_or_create_user
|
||||
from backend.executor import ExecutionManager, ExecutionScheduler, scheduler
|
||||
from backend.server.model import (
|
||||
CreateAPIKeyRequest,
|
||||
CreateAPIKeyResponse,
|
||||
CreateGraph,
|
||||
SetGraphActiveVersion,
|
||||
UpdatePermissionsRequest,
|
||||
)
|
||||
from backend.executor import ExecutionManager, ExecutionScheduler
|
||||
from backend.server.model import CreateGraph, SetGraphActiveVersion
|
||||
from backend.server.utils import get_user_id
|
||||
from backend.util.service import get_service_client
|
||||
from backend.util.settings import Settings
|
||||
@@ -124,8 +105,7 @@ def execute_graph_block(block_id: str, data: BlockInput) -> CompletedBlockOutput
|
||||
async def get_user_credits(
|
||||
user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> dict[str, int]:
|
||||
# Credits can go negative, so ensure it's at least 0 for user to see.
|
||||
return {"credits": max(await _user_credit_model.get_or_refill_credit(user_id), 0)}
|
||||
return {"credits": await _user_credit_model.get_or_refill_credit(user_id)}
|
||||
|
||||
|
||||
########################################################
|
||||
@@ -229,7 +209,7 @@ async def update_graph(
|
||||
400, detail="Changing is_template on an existing graph is forbidden"
|
||||
)
|
||||
graph.is_active = not graph.is_template
|
||||
graph.reassign_ids(user_id=user_id)
|
||||
graph.reassign_ids()
|
||||
|
||||
new_graph_version = await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
@@ -285,7 +265,7 @@ async def execute_graph(
|
||||
graph_exec = execution_manager_client().add_execution(
|
||||
graph_id, node_input, user_id=user_id
|
||||
)
|
||||
return {"id": graph_exec.graph_exec_id}
|
||||
return {"id": graph_exec["graph_exec_id"]}
|
||||
except Exception as e:
|
||||
msg = e.__str__().encode().decode("unicode_escape")
|
||||
raise HTTPException(status_code=400, detail=msg)
|
||||
@@ -423,7 +403,7 @@ async def do_create_graph(
|
||||
|
||||
graph.is_template = is_template
|
||||
graph.is_active = not is_template
|
||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||
graph.reassign_ids(reassign_graph_id=True)
|
||||
|
||||
return await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
@@ -444,191 +424,100 @@ async def create_new_template(
|
||||
########################################################
|
||||
|
||||
|
||||
class ScheduleCreationRequest(pydantic.BaseModel):
|
||||
cron: str
|
||||
input_data: dict[Any, Any]
|
||||
graph_id: str
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
path="/schedules",
|
||||
tags=["schedules"],
|
||||
path="/graphs/{graph_id}/schedules",
|
||||
tags=["graphs"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def create_schedule(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
schedule: ScheduleCreationRequest,
|
||||
) -> scheduler.JobInfo:
|
||||
graph = await graph_db.get_graph(schedule.graph_id, user_id=user_id)
|
||||
if not graph:
|
||||
raise HTTPException(
|
||||
status_code=404, detail=f"Graph #{schedule.graph_id} not found."
|
||||
)
|
||||
|
||||
return await asyncio.to_thread(
|
||||
lambda: execution_scheduler_client().add_execution_schedule(
|
||||
graph_id=schedule.graph_id,
|
||||
graph_version=graph.version,
|
||||
cron=schedule.cron,
|
||||
input_data=schedule.input_data,
|
||||
user_id=user_id,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@v1_router.delete(
|
||||
path="/schedules/{schedule_id}",
|
||||
tags=["schedules"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def delete_schedule(
|
||||
schedule_id: str,
|
||||
graph_id: str,
|
||||
cron: str,
|
||||
input_data: dict[Any, Any],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> dict[Any, Any]:
|
||||
execution_scheduler_client().delete_schedule(schedule_id, user_id=user_id)
|
||||
graph = await graph_db.get_graph(graph_id, user_id=user_id)
|
||||
if not graph:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
|
||||
return {
|
||||
"id": await asyncio.to_thread(
|
||||
lambda: execution_scheduler_client().add_execution_schedule(
|
||||
graph_id=graph_id,
|
||||
graph_version=graph.version,
|
||||
cron=cron,
|
||||
input_data=input_data,
|
||||
user_id=user_id,
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@v1_router.put(
|
||||
path="/graphs/schedules/{schedule_id}",
|
||||
tags=["graphs"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def update_schedule(
|
||||
schedule_id: str,
|
||||
input_data: dict[Any, Any],
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> dict[Any, Any]:
|
||||
is_enabled = input_data.get("is_enabled", False)
|
||||
execution_scheduler_client().update_schedule(
|
||||
schedule_id, is_enabled, user_id=user_id
|
||||
)
|
||||
return {"id": schedule_id}
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
path="/schedules",
|
||||
tags=["schedules"],
|
||||
path="/graphs/{graph_id}/schedules",
|
||||
tags=["graphs"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def get_execution_schedules(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
graph_id: str | None = None,
|
||||
) -> list[scheduler.JobInfo]:
|
||||
return execution_scheduler_client().get_execution_schedules(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
)
|
||||
graph_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> dict[str, str]:
|
||||
return execution_scheduler_client().get_execution_schedules(graph_id, user_id)
|
||||
|
||||
|
||||
########################################################
|
||||
##################### API KEY ##############################
|
||||
##################### Settings ########################
|
||||
########################################################
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
"/api-keys",
|
||||
response_model=CreateAPIKeyResponse,
|
||||
tags=["api-keys"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
path="/settings", tags=["settings"], dependencies=[Depends(auth_middleware)]
|
||||
)
|
||||
async def create_api_key(
|
||||
request: CreateAPIKeyRequest, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> CreateAPIKeyResponse:
|
||||
"""Create a new API key"""
|
||||
async def update_configuration(
|
||||
updated_settings: Annotated[
|
||||
Dict[str, Any],
|
||||
Body(
|
||||
examples=[
|
||||
{
|
||||
"config": {
|
||||
"num_graph_workers": 10,
|
||||
"num_node_workers": 10,
|
||||
}
|
||||
}
|
||||
]
|
||||
),
|
||||
],
|
||||
):
|
||||
settings = Settings()
|
||||
try:
|
||||
api_key, plain_text = await generate_api_key(
|
||||
name=request.name,
|
||||
user_id=user_id,
|
||||
permissions=request.permissions,
|
||||
description=request.description,
|
||||
)
|
||||
return CreateAPIKeyResponse(api_key=api_key, plain_text_key=plain_text)
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to create API key: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/api-keys",
|
||||
response_model=List[APIKeyWithoutHash],
|
||||
tags=["api-keys"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def get_api_keys(
|
||||
user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> List[APIKeyWithoutHash]:
|
||||
"""List all API keys for the user"""
|
||||
try:
|
||||
return await list_user_api_keys(user_id)
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to list API keys: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/api-keys/{key_id}",
|
||||
response_model=APIKeyWithoutHash,
|
||||
tags=["api-keys"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def get_api_key(
|
||||
key_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> APIKeyWithoutHash:
|
||||
"""Get a specific API key"""
|
||||
try:
|
||||
api_key = await get_api_key_by_id(key_id, user_id)
|
||||
if not api_key:
|
||||
raise HTTPException(status_code=404, detail="API key not found")
|
||||
return api_key
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to get API key: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
|
||||
@v1_router.delete(
|
||||
"/api-keys/{key_id}",
|
||||
response_model=APIKeyWithoutHash,
|
||||
tags=["api-keys"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def delete_api_key(
|
||||
key_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> Optional[APIKeyWithoutHash]:
|
||||
"""Revoke an API key"""
|
||||
try:
|
||||
return await revoke_api_key(key_id, user_id)
|
||||
except APIKeyNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="API key not found")
|
||||
except APIKeyPermissionError:
|
||||
raise HTTPException(status_code=403, detail="Permission denied")
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to revoke API key: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
|
||||
@v1_router.post(
|
||||
"/api-keys/{key_id}/suspend",
|
||||
response_model=APIKeyWithoutHash,
|
||||
tags=["api-keys"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def suspend_key(
|
||||
key_id: str, user_id: Annotated[str, Depends(get_user_id)]
|
||||
) -> Optional[APIKeyWithoutHash]:
|
||||
"""Suspend an API key"""
|
||||
try:
|
||||
return await suspend_api_key(key_id, user_id)
|
||||
except APIKeyNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="API key not found")
|
||||
except APIKeyPermissionError:
|
||||
raise HTTPException(status_code=403, detail="Permission denied")
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to suspend API key: {str(e)}")
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
|
||||
@v1_router.put(
|
||||
"/api-keys/{key_id}/permissions",
|
||||
response_model=APIKeyWithoutHash,
|
||||
tags=["api-keys"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def update_permissions(
|
||||
key_id: str,
|
||||
request: UpdatePermissionsRequest,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> Optional[APIKeyWithoutHash]:
|
||||
"""Update API key permissions"""
|
||||
try:
|
||||
return await update_api_key_permissions(key_id, user_id, request.permissions)
|
||||
except APIKeyNotFoundError:
|
||||
raise HTTPException(status_code=404, detail="API key not found")
|
||||
except APIKeyPermissionError:
|
||||
raise HTTPException(status_code=403, detail="Permission denied")
|
||||
except APIKeyError as e:
|
||||
logger.error(f"Failed to update API key permissions: {str(e)}")
|
||||
updated_fields: dict[Any, Any] = {"config": [], "secrets": []}
|
||||
for key, value in updated_settings.get("config", {}).items():
|
||||
if hasattr(settings.config, key):
|
||||
setattr(settings.config, key, value)
|
||||
updated_fields["config"].append(key)
|
||||
for key, value in updated_settings.get("secrets", {}).items():
|
||||
if hasattr(settings.secrets, key):
|
||||
setattr(settings.secrets, key, value)
|
||||
updated_fields["secrets"].append(key)
|
||||
settings.save()
|
||||
return {
|
||||
"message": "Settings updated successfully",
|
||||
"updated_fields": updated_fields,
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
from autogpt_libs.auth.depends import requires_user
|
||||
from autogpt_libs.auth.models import User
|
||||
from fastapi import Depends
|
||||
from autogpt_libs.auth.middleware import auth_middleware
|
||||
from fastapi import Depends, HTTPException
|
||||
|
||||
from backend.data.user import DEFAULT_USER_ID
|
||||
from backend.util.settings import Settings
|
||||
|
||||
settings = Settings()
|
||||
|
||||
|
||||
def get_user_id(user: User = Depends(requires_user)) -> str:
|
||||
return user.user_id
|
||||
def get_user_id(payload: dict = Depends(auth_middleware)) -> str:
|
||||
if not payload:
|
||||
# This handles the case when authentication is disabled
|
||||
return DEFAULT_USER_ID
|
||||
|
||||
user_id = payload.get("sub")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User ID not found in token")
|
||||
return user_id
|
||||
|
||||
@@ -53,24 +53,24 @@ async def event_broadcaster(manager: ConnectionManager):
|
||||
|
||||
|
||||
async def authenticate_websocket(websocket: WebSocket) -> str:
|
||||
if not settings.config.enable_auth:
|
||||
return DEFAULT_USER_ID
|
||||
|
||||
token = websocket.query_params.get("token")
|
||||
if not token:
|
||||
await websocket.close(code=4001, reason="Missing authentication token")
|
||||
return ""
|
||||
|
||||
try:
|
||||
payload = parse_jwt_token(token)
|
||||
user_id = payload.get("sub")
|
||||
if not user_id:
|
||||
await websocket.close(code=4002, reason="Invalid token")
|
||||
if settings.config.enable_auth:
|
||||
token = websocket.query_params.get("token")
|
||||
if not token:
|
||||
await websocket.close(code=4001, reason="Missing authentication token")
|
||||
return ""
|
||||
return user_id
|
||||
except ValueError:
|
||||
await websocket.close(code=4003, reason="Invalid token")
|
||||
return ""
|
||||
|
||||
try:
|
||||
payload = parse_jwt_token(token)
|
||||
user_id = payload.get("sub")
|
||||
if not user_id:
|
||||
await websocket.close(code=4002, reason="Invalid token")
|
||||
return ""
|
||||
return user_id
|
||||
except ValueError:
|
||||
await websocket.close(code=4003, reason="Invalid token")
|
||||
return ""
|
||||
else:
|
||||
return DEFAULT_USER_ID
|
||||
|
||||
|
||||
async def handle_subscribe(
|
||||
@@ -138,13 +138,6 @@ async def websocket_router(
|
||||
while True:
|
||||
data = await websocket.receive_text()
|
||||
message = WsMessage.model_validate_json(data)
|
||||
|
||||
if message.method == Methods.HEARTBEAT:
|
||||
await websocket.send_json(
|
||||
{"method": Methods.HEARTBEAT.value, "data": "pong", "success": True}
|
||||
)
|
||||
continue
|
||||
|
||||
if message.method == Methods.SUBSCRIBE:
|
||||
await handle_subscribe(websocket, manager, message)
|
||||
|
||||
|
||||
@@ -3,6 +3,14 @@ import pathlib
|
||||
import sys
|
||||
|
||||
|
||||
def get_secrets_path() -> pathlib.Path:
|
||||
return get_data_path() / "secrets"
|
||||
|
||||
|
||||
def get_config_path() -> pathlib.Path:
|
||||
return get_data_path()
|
||||
|
||||
|
||||
def get_frontend_path() -> pathlib.Path:
|
||||
if getattr(sys, "frozen", False):
|
||||
# The application is frozen
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
import json
|
||||
from typing import Any, Type, TypeVar, overload
|
||||
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
|
||||
from .type import type_match
|
||||
|
||||
|
||||
def to_dict(data) -> dict:
|
||||
return jsonable_encoder(data)
|
||||
@@ -14,19 +11,4 @@ def dumps(data) -> str:
|
||||
return json.dumps(jsonable_encoder(data))
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@overload
|
||||
def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ...
|
||||
|
||||
|
||||
@overload
|
||||
def loads(data: str, *args, **kwargs) -> Any: ...
|
||||
|
||||
|
||||
def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any:
|
||||
parsed = json.loads(data, *args, **kwargs)
|
||||
if target_type:
|
||||
return type_match(parsed, target_type)
|
||||
return parsed
|
||||
loads = json.loads
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
import ipaddress
|
||||
import socket
|
||||
from typing import Callable
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests as req
|
||||
|
||||
from backend.util.settings import Config
|
||||
|
||||
# List of IP networks to block
|
||||
BLOCKED_IP_NETWORKS = [
|
||||
# --8<-- [start:BLOCKED_IP_NETWORKS]
|
||||
ipaddress.ip_network("0.0.0.0/8"), # "This" Network
|
||||
ipaddress.ip_network("10.0.0.0/8"), # Private-Use
|
||||
ipaddress.ip_network("127.0.0.0/8"), # Loopback
|
||||
ipaddress.ip_network("169.254.0.0/16"), # Link Local
|
||||
ipaddress.ip_network("172.16.0.0/12"), # Private-Use
|
||||
ipaddress.ip_network("192.168.0.0/16"), # Private-Use
|
||||
ipaddress.ip_network("224.0.0.0/4"), # Multicast
|
||||
ipaddress.ip_network("240.0.0.0/4"), # Reserved for Future Use
|
||||
# --8<-- [end:BLOCKED_IP_NETWORKS]
|
||||
]
|
||||
|
||||
|
||||
def is_ip_blocked(ip: str) -> bool:
|
||||
"""
|
||||
Checks if the IP address is in a blocked network.
|
||||
"""
|
||||
ip_addr = ipaddress.ip_address(ip)
|
||||
return any(ip_addr in network for network in BLOCKED_IP_NETWORKS)
|
||||
|
||||
|
||||
def validate_url(url: str, trusted_origins: list[str]) -> str:
|
||||
"""
|
||||
Validates the URL to prevent SSRF attacks by ensuring it does not point to a private
|
||||
or untrusted IP address, unless whitelisted.
|
||||
"""
|
||||
url = url.strip().strip("/")
|
||||
if not url.startswith(("http://", "https://")):
|
||||
url = "http://" + url
|
||||
|
||||
parsed_url = urlparse(url)
|
||||
hostname = parsed_url.hostname
|
||||
|
||||
if not hostname:
|
||||
raise ValueError(f"Invalid URL: Unable to determine hostname from {url}")
|
||||
|
||||
if any(hostname == origin for origin in trusted_origins):
|
||||
return url
|
||||
|
||||
# Resolve all IP addresses for the hostname
|
||||
ip_addresses = {result[4][0] for result in socket.getaddrinfo(hostname, None)}
|
||||
if not ip_addresses:
|
||||
raise ValueError(f"Unable to resolve IP address for {hostname}")
|
||||
|
||||
# Check if all IP addresses are global
|
||||
for ip in ip_addresses:
|
||||
if is_ip_blocked(ip):
|
||||
raise ValueError(
|
||||
f"Access to private IP address at {hostname}: {ip} is not allowed."
|
||||
)
|
||||
|
||||
return url
|
||||
|
||||
|
||||
class Requests:
|
||||
"""
|
||||
A wrapper around the requests library that validates URLs before making requests.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
trusted_origins: list[str] | None = None,
|
||||
raise_for_status: bool = True,
|
||||
extra_url_validator: Callable[[str], str] | None = None,
|
||||
extra_headers: dict[str, str] | None = None,
|
||||
):
|
||||
self.trusted_origins = []
|
||||
for url in trusted_origins or []:
|
||||
hostname = urlparse(url).hostname
|
||||
if not hostname:
|
||||
raise ValueError(f"Invalid URL: Unable to determine hostname of {url}")
|
||||
self.trusted_origins.append(hostname)
|
||||
|
||||
self.raise_for_status = raise_for_status
|
||||
self.extra_url_validator = extra_url_validator
|
||||
self.extra_headers = extra_headers
|
||||
|
||||
def request(
|
||||
self, method, url, headers=None, allow_redirects=False, *args, **kwargs
|
||||
) -> req.Response:
|
||||
if self.extra_headers is not None:
|
||||
headers = {**(headers or {}), **self.extra_headers}
|
||||
|
||||
url = validate_url(url, self.trusted_origins)
|
||||
if self.extra_url_validator is not None:
|
||||
url = self.extra_url_validator(url)
|
||||
|
||||
response = req.request(
|
||||
method,
|
||||
url,
|
||||
headers=headers,
|
||||
allow_redirects=allow_redirects,
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
if self.raise_for_status:
|
||||
response.raise_for_status()
|
||||
|
||||
return response
|
||||
|
||||
def get(self, url, *args, **kwargs) -> req.Response:
|
||||
return self.request("GET", url, *args, **kwargs)
|
||||
|
||||
def post(self, url, *args, **kwargs) -> req.Response:
|
||||
return self.request("POST", url, *args, **kwargs)
|
||||
|
||||
def put(self, url, *args, **kwargs) -> req.Response:
|
||||
return self.request("PUT", url, *args, **kwargs)
|
||||
|
||||
def delete(self, url, *args, **kwargs) -> req.Response:
|
||||
return self.request("DELETE", url, *args, **kwargs)
|
||||
|
||||
def head(self, url, *args, **kwargs) -> req.Response:
|
||||
return self.request("HEAD", url, *args, **kwargs)
|
||||
|
||||
def options(self, url, *args, **kwargs) -> req.Response:
|
||||
return self.request("OPTIONS", url, *args, **kwargs)
|
||||
|
||||
def patch(self, url, *args, **kwargs) -> req.Response:
|
||||
return self.request("PATCH", url, *args, **kwargs)
|
||||
|
||||
|
||||
requests = Requests(trusted_origins=Config().trust_endpoints_for_requests)
|
||||
@@ -11,7 +11,7 @@ from pydantic_settings import (
|
||||
SettingsConfigDict,
|
||||
)
|
||||
|
||||
from backend.util.data import get_data_path
|
||||
from backend.util.data import get_config_path, get_data_path, get_secrets_path
|
||||
|
||||
T = TypeVar("T", bound=BaseSettings)
|
||||
|
||||
@@ -92,6 +92,10 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
# Add more configuration fields as needed
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
json_file=[
|
||||
get_config_path() / "config.default.json",
|
||||
get_config_path() / "config.json",
|
||||
],
|
||||
env_file=".env",
|
||||
extra="allow",
|
||||
)
|
||||
@@ -157,11 +161,6 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
description="Name of the event bus",
|
||||
)
|
||||
|
||||
trust_endpoints_for_requests: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="A whitelist of trusted internal endpoints for the backend to make requests to.",
|
||||
)
|
||||
|
||||
backend_cors_allow_origins: List[str] = Field(default_factory=list)
|
||||
|
||||
@field_validator("backend_cors_allow_origins")
|
||||
@@ -238,7 +237,6 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
openai_api_key: str = Field(default="", description="OpenAI API key")
|
||||
anthropic_api_key: str = Field(default="", description="Anthropic API key")
|
||||
groq_api_key: str = Field(default="", description="Groq API key")
|
||||
open_router_api_key: str = Field(default="", description="Open Router API Key")
|
||||
|
||||
reddit_client_id: str = Field(default="", description="Reddit client ID")
|
||||
reddit_client_secret: str = Field(default="", description="Reddit client secret")
|
||||
@@ -267,12 +265,11 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
replicate_api_key: str = Field(default="", description="Replicate API Key")
|
||||
unreal_speech_api_key: str = Field(default="", description="Unreal Speech API Key")
|
||||
ideogram_api_key: str = Field(default="", description="Ideogram API Key")
|
||||
jina_api_key: str = Field(default="", description="Jina API Key")
|
||||
unreal_speech_api_key: str = Field(default="", description="Unreal Speech API Key")
|
||||
|
||||
# Add more secret fields as needed
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
secrets_dir=get_secrets_path(),
|
||||
env_file=".env",
|
||||
env_file_encoding="utf-8",
|
||||
extra="allow",
|
||||
@@ -299,3 +296,11 @@ class Settings(BaseModel):
|
||||
with open(config_path, "w") as f:
|
||||
json.dump(config_to_save, f, indent=2)
|
||||
self.config.clear_updates()
|
||||
|
||||
# Save updated secrets to individual files
|
||||
secrets_dir = get_secrets_path()
|
||||
for key in self.secrets.updated_fields:
|
||||
secret_file = os.path.join(secrets_dir, key)
|
||||
with open(secret_file, "w") as f:
|
||||
f.write(str(getattr(self.secrets, key)))
|
||||
self.secrets.clear_updates()
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import json
|
||||
from typing import Any, Type, TypeVar, cast, get_args, get_origin
|
||||
from typing import Any, Type, TypeVar, get_args, get_origin
|
||||
|
||||
|
||||
class ConversionError(ValueError):
|
||||
class ConversionError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@@ -102,7 +102,7 @@ def __convert_bool(value: Any) -> bool:
|
||||
return bool(value)
|
||||
|
||||
|
||||
def _try_convert(value: Any, target_type: Type, raise_on_mismatch: bool) -> Any:
|
||||
def convert(value: Any, target_type: Type):
|
||||
origin = get_origin(target_type)
|
||||
args = get_args(target_type)
|
||||
if origin is None:
|
||||
@@ -133,8 +133,6 @@ def _try_convert(value: Any, target_type: Type, raise_on_mismatch: bool) -> Any:
|
||||
return {convert(v, args[0]) for v in value}
|
||||
else:
|
||||
return value
|
||||
elif raise_on_mismatch:
|
||||
raise TypeError(f"Value {value} is not of expected type {target_type}")
|
||||
else:
|
||||
# Need to convert value to the origin type
|
||||
if origin is list:
|
||||
@@ -177,17 +175,3 @@ def _try_convert(value: Any, target_type: Type, raise_on_mismatch: bool) -> Any:
|
||||
return __convert_bool(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def type_match(value: Any, target_type: Type[T]) -> T:
|
||||
return cast(T, _try_convert(value, target_type, raise_on_mismatch=True))
|
||||
|
||||
|
||||
def convert(value: Any, target_type: Type[T]) -> T:
|
||||
try:
|
||||
return cast(T, _try_convert(value, target_type, raise_on_mismatch=False))
|
||||
except Exception as e:
|
||||
raise ConversionError(f"Failed to convert {value} to {target_type}") from e
|
||||
|
||||
5
autogpt_platform/backend/config.default.json
Normal file
5
autogpt_platform/backend/config.default.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"num_graph_workers": 10,
|
||||
"num_node_workers": 5,
|
||||
"num_user_credits_refill": 1500
|
||||
}
|
||||
314
autogpt_platform/backend/graph_templates/Financial Analyst.json
Normal file
314
autogpt_platform/backend/graph_templates/Financial Analyst.json
Normal file
@@ -0,0 +1,314 @@
|
||||
{
|
||||
"id": "64c67d2c-f8c0-4a7d-b060-c50739a612cc",
|
||||
"name": "Financial Analysis Agent: Your Personalized Financial Insights Tool",
|
||||
"links": [
|
||||
{
|
||||
"id": "4aaee596-8ee9-40ab-85e0-9b3e7bddcd02",
|
||||
"sink_id": "0a5c749c-06ae-4c1c-a0b5-82c3e0f7b3b1",
|
||||
"is_static": false,
|
||||
"sink_name": "prompt",
|
||||
"source_id": "44492504-939e-4e07-bb8d-97ca5c9e4ef8",
|
||||
"source_name": "output"
|
||||
},
|
||||
{
|
||||
"id": "67c96e5b-97ec-4a9e-86ac-2b0b97d1e749",
|
||||
"sink_id": "44492504-939e-4e07-bb8d-97ca5c9e4ef8",
|
||||
"is_static": false,
|
||||
"sink_name": "values",
|
||||
"source_id": "9e67d259-99c5-4309-ba6e-f69da44fe223",
|
||||
"source_name": "updated_dictionary"
|
||||
},
|
||||
{
|
||||
"id": "03ed5a41-3b91-46f5-9e75-9fba055b2d6a",
|
||||
"sink_id": "44adde8c-9355-4608-b7fc-fbbeee7b2e15",
|
||||
"is_static": false,
|
||||
"sink_name": "dictionary",
|
||||
"source_id": "bbca8c10-6070-4c1b-ac05-0a6a96b16364",
|
||||
"source_name": "updated_dictionary"
|
||||
},
|
||||
{
|
||||
"id": "0a8a1d66-f463-48f4-8a6c-5703aba10e5e",
|
||||
"sink_id": "9e67d259-99c5-4309-ba6e-f69da44fe223",
|
||||
"is_static": false,
|
||||
"sink_name": "value",
|
||||
"source_id": "2977c6aa-f36f-4d19-b8a9-a0811549c1b2",
|
||||
"source_name": "result"
|
||||
},
|
||||
{
|
||||
"id": "23425de8-2923-4abd-812c-739d5544be00",
|
||||
"sink_id": "d98e97a1-36d0-4a6f-9b46-add0e6a1c9c5",
|
||||
"is_static": false,
|
||||
"sink_name": "value",
|
||||
"source_id": "0820fe90-1e94-4c61-8727-1ea50ab4cd2d",
|
||||
"source_name": "result"
|
||||
},
|
||||
{
|
||||
"id": "d6e82048-ec97-435b-90fa-7d8716a2fa8d",
|
||||
"sink_id": "d98e97a1-36d0-4a6f-9b46-add0e6a1c9c5",
|
||||
"is_static": false,
|
||||
"sink_name": "dictionary",
|
||||
"source_id": "d798fec4-609f-4bbf-aca5-63a493d5cfa8",
|
||||
"source_name": "updated_dictionary"
|
||||
},
|
||||
{
|
||||
"id": "e075a983-6b3c-4dc0-80c1-7cc704870aec",
|
||||
"sink_id": "44adde8c-9355-4608-b7fc-fbbeee7b2e15",
|
||||
"is_static": false,
|
||||
"sink_name": "value",
|
||||
"source_id": "aaf1d08f-d9ea-4fcf-bf29-82f5971f3899",
|
||||
"source_name": "result"
|
||||
},
|
||||
{
|
||||
"id": "c3ef5e32-aa8b-4bca-93f7-1cc1dd20eb6e",
|
||||
"sink_id": "d798fec4-609f-4bbf-aca5-63a493d5cfa8",
|
||||
"is_static": false,
|
||||
"sink_name": "value",
|
||||
"source_id": "729d37e9-51d6-49c6-9c5a-747cd1ccd698",
|
||||
"source_name": "result"
|
||||
},
|
||||
{
|
||||
"id": "2f3bb8ca-6e3f-4c4d-92bc-68b56fac3324",
|
||||
"sink_id": "a468d8d4-cb5e-4735-bdc4-bbcae796d47d",
|
||||
"is_static": false,
|
||||
"sink_name": "value",
|
||||
"source_id": "44492504-939e-4e07-bb8d-97ca5c9e4ef8",
|
||||
"source_name": "output"
|
||||
},
|
||||
{
|
||||
"id": "d4bcffa3-e562-49f9-82c1-6fb8c6d5e29c",
|
||||
"sink_id": "bbca8c10-6070-4c1b-ac05-0a6a96b16364",
|
||||
"is_static": false,
|
||||
"sink_name": "value",
|
||||
"source_id": "7e7c227c-6243-496d-8d8b-9f00ee4b4b92",
|
||||
"source_name": "result"
|
||||
},
|
||||
{
|
||||
"id": "8023edfc-f4f6-4e28-8a30-e23156990b0d",
|
||||
"sink_id": "d798fec4-609f-4bbf-aca5-63a493d5cfa8",
|
||||
"is_static": false,
|
||||
"sink_name": "dictionary",
|
||||
"source_id": "44adde8c-9355-4608-b7fc-fbbeee7b2e15",
|
||||
"source_name": "updated_dictionary"
|
||||
},
|
||||
{
|
||||
"id": "6e632dd8-bcd3-41c5-be61-9aed46e67908",
|
||||
"sink_id": "9e67d259-99c5-4309-ba6e-f69da44fe223",
|
||||
"is_static": false,
|
||||
"sink_name": "dictionary",
|
||||
"source_id": "d98e97a1-36d0-4a6f-9b46-add0e6a1c9c5",
|
||||
"source_name": "updated_dictionary"
|
||||
},
|
||||
{
|
||||
"id": "49ba9ecf-cd7b-49ec-850e-b7376e31d5ba",
|
||||
"sink_id": "4dfc13d8-c855-47c7-b608-bfd61998b849",
|
||||
"is_static": false,
|
||||
"sink_name": "value",
|
||||
"source_id": "0a5c749c-06ae-4c1c-a0b5-82c3e0f7b3b1",
|
||||
"source_name": "response"
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"id": "4dfc13d8-c855-47c7-b608-bfd61998b849",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 3533.995840713809,
|
||||
"y": 949.1258855834958
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"name": "Advice"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "0a5c749c-06ae-4c1c-a0b5-82c3e0f7b3b1",
|
||||
"block_id": "1f292d4a-41a4-4977-9684-7c8d560b9f91",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2415.719369966296,
|
||||
"y": 1348.2399127545737
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"model": "gpt-4o",
|
||||
"api_key": "...",
|
||||
"sys_prompt": "Instructions for Financial Analyst: Scope of Work: - Analyze the client's current financial situation based on the provided briefing. - Develop a comprehensive financial plan to help the client achieve their stated financial goal. - Provide recommendations on investment strategies utilizing the client's available capital. - Assess the client's income and expenses to identify opportunities for increasing savings or reducing costs. Considerations: - Take into account the economic conditions and financial regulations specific to {{ country }}. - Consider the client's risk tolerance and investment preferences when proposing strategies. - Ensure that any recommendations are compliant with legal and ethical standards within the financial industry. - Factor in any potential tax implications related to investment or financial planning. Expected Outputs: - A detailed financial plan outlining steps to achieve the client's goal. - An analysis report of the client's current financial health, including strengths and areas for improvement. - Specific recommendations for investment opportunities suitable for the client's capital and goals. - Actionable advice on managing income and expenses to enhance savings potential. Please ensure that all advice is tailored to the client's unique situation and aligns with their financial objectives.",
|
||||
"prompt_values": {}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "9e67d259-99c5-4309-ba6e-f69da44fe223",
|
||||
"block_id": "31d1064e-7446-4693-a7d4-65e5ca1180d1",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 899.9313906355574,
|
||||
"y": 1457.9364531936767
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"key": "capital"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "44adde8c-9355-4608-b7fc-fbbeee7b2e15",
|
||||
"block_id": "31d1064e-7446-4693-a7d4-65e5ca1180d1",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 877.350070306956,
|
||||
"y": 93.79000505901234
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"key": "income"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "44492504-939e-4e07-bb8d-97ca5c9e4ef8",
|
||||
"block_id": "db7d8f02-2f44-4c55-ab7a-eae0941f0c30",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 1695.5805808682992,
|
||||
"y": 918.2363585019502
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"format": "Financial Analyst Briefing:\n\nClient Country: {{ country }}\n\nClient's Financial Goal: {{ goal }}\n\nCurrent Capital Available for Investment: {{ capital }}\n\nIncome and Expenses Summary:\n- Income: {{ income }}\n- Expenses: {{ expenses }}\n\nAdvisor Notes:\nThe client aims to achieve the above financial goal with their current capital. Please evaluate their income and expense structure to recommend an optimal strategy for reaching their goal. Focus on ways to maximize the client's investment potential while considering their current financial standing. \n\nEnsure that their expenses are manageable and provide actionable steps to help them achieve better savings or investment returns to align with their goal.\n\nGive clear advise on specifically what there investment capital could be invested in to help them achieve there goals. Consider stocks, rental properties, savings accounts, gold, crypto, pension funds and every other possible investment."
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "aaf1d08f-d9ea-4fcf-bf29-82f5971f3899",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 171.2942539715716,
|
||||
"y": -30.512927444086387
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"name": "Income",
|
||||
"value": "10000",
|
||||
"description": "What is you annual income in your local currency?"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "0820fe90-1e94-4c61-8727-1ea50ab4cd2d",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 175.43249130331668,
|
||||
"y": 790.7562658250306
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"name": "Goal",
|
||||
"value": "I want to maximise the passive income I can gain from my investment capital",
|
||||
"description": "What are your financial goals?"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "729d37e9-51d6-49c6-9c5a-747cd1ccd698",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 180.77722789594975,
|
||||
"y": 384.2712961685803
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"name": "Expenses",
|
||||
"value": "6000",
|
||||
"description": "What's your current fixed expenses?"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "d98e97a1-36d0-4a6f-9b46-add0e6a1c9c5",
|
||||
"block_id": "31d1064e-7446-4693-a7d4-65e5ca1180d1",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 884.1360010186997,
|
||||
"y": 929.5088732842511
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"key": "goal"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "7e7c227c-6243-496d-8d8b-9f00ee4b4b92",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 168.72576263452936,
|
||||
"y": -436.53322204035607
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"name": "Country",
|
||||
"value": "USA",
|
||||
"description": "What Country do you live in?"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "2977c6aa-f36f-4d19-b8a9-a0811549c1b2",
|
||||
"block_id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 174.4312987379065,
|
||||
"y": 1209.83336705979
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"name": "Investment Capital",
|
||||
"value": "10000",
|
||||
"description": "How much money do you have to invest?"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "bbca8c10-6070-4c1b-ac05-0a6a96b16364",
|
||||
"block_id": "31d1064e-7446-4693-a7d4-65e5ca1180d1",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 901.5508526475596,
|
||||
"y": -263.1782978759104
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"key": "country"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "d798fec4-609f-4bbf-aca5-63a493d5cfa8",
|
||||
"block_id": "31d1064e-7446-4693-a7d4-65e5ca1180d1",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 878.495446124934,
|
||||
"y": 500.2646877565711
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"key": "expenses"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "a468d8d4-cb5e-4735-bdc4-bbcae796d47d",
|
||||
"block_id": "363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
"metadata": {
|
||||
"position": {
|
||||
"x": 2465.9560523193677,
|
||||
"y": 448.60897681592746
|
||||
}
|
||||
},
|
||||
"input_default": {
|
||||
"name": "Your Situation",
|
||||
"description": "Explanation of your current situation"
|
||||
}
|
||||
}
|
||||
],
|
||||
"version": 10,
|
||||
"is_active": true,
|
||||
"subgraphs": {},
|
||||
"executions": null,
|
||||
"description": "**Disclaimer: The information provided by this agent is for informational purposes only and should not be considered financial advice. Please consult a licensed financial professional for personalized advice.**\n\nThe Financial Guidance Agent is a comprehensive digital solution designed to provide personalized financial insights to users. This agent specializes in analyzing your financial data—including income, expenses, and available capital—to offer tailored suggestions that help you work towards your financial objectives.\n\n**Value Delivered:**\n1. **Personalized Financial Insights**: The agent provides customized insights based on your individual needs and circumstances. By carefully analyzing your financial situation, it offers actionable suggestions to help you optimize your income, manage expenses, and make informed financial decisions.\n\n2. **Comprehensive Analysis**: With an in-depth review of income and expenditure, the agent highlights areas for potential improvement, helping users make informed decisions about their financial situation. It aims to provide efficient use of capital to support your financial objectives.\n\n3. **Investment Considerations**: The agent identifies potential investment opportunities that align with your risk tolerance and financial objectives. However, any final decisions should be made in consultation with a qualified financial advisor.\n\n4. **Goal-Oriented Approach**: Focused on your stated financial goals, the agent provides a clear outline of strategies and tactics that could help you work towards achieving financial success.",
|
||||
"is_template": false
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
-- CreateEnum
|
||||
CREATE TYPE "APIKeyPermission" AS ENUM ('EXECUTE_GRAPH', 'READ_GRAPH', 'EXECUTE_BLOCK', 'READ_BLOCK');
|
||||
|
||||
-- CreateEnum
|
||||
CREATE TYPE "APIKeyStatus" AS ENUM ('ACTIVE', 'REVOKED', 'SUSPENDED');
|
||||
|
||||
-- CreateTable
|
||||
CREATE TABLE "APIKey" (
|
||||
"id" TEXT NOT NULL,
|
||||
"name" TEXT NOT NULL,
|
||||
"prefix" TEXT NOT NULL,
|
||||
"postfix" TEXT NOT NULL,
|
||||
"key" TEXT NOT NULL,
|
||||
"status" "APIKeyStatus" NOT NULL DEFAULT 'ACTIVE',
|
||||
"permissions" "APIKeyPermission"[],
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"lastUsedAt" TIMESTAMP(3),
|
||||
"revokedAt" TIMESTAMP(3),
|
||||
"description" TEXT,
|
||||
"userId" TEXT NOT NULL,
|
||||
|
||||
CONSTRAINT "APIKey_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "APIKey_key_key" ON "APIKey"("key");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "APIKey_key_idx" ON "APIKey"("key");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "APIKey_prefix_idx" ON "APIKey"("prefix");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "APIKey_userId_idx" ON "APIKey"("userId");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "APIKey_status_idx" ON "APIKey"("status");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "APIKey_userId_status_idx" ON "APIKey"("userId", "status");
|
||||
|
||||
-- AddForeignKey
|
||||
ALTER TABLE "APIKey" ADD CONSTRAINT "APIKey_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
@@ -1,14 +0,0 @@
|
||||
/*
|
||||
Warnings:
|
||||
|
||||
- You are about to drop the `AgentGraphExecutionSchedule` table. If the table is not empty, all the data it contains will be lost.
|
||||
|
||||
*/
|
||||
-- DropForeignKey
|
||||
ALTER TABLE "AgentGraphExecutionSchedule" DROP CONSTRAINT "AgentGraphExecutionSchedule_agentGraphId_agentGraphVersion_fkey";
|
||||
|
||||
-- DropForeignKey
|
||||
ALTER TABLE "AgentGraphExecutionSchedule" DROP CONSTRAINT "AgentGraphExecutionSchedule_userId_fkey";
|
||||
|
||||
-- DropTable
|
||||
DROP TABLE "AgentGraphExecutionSchedule";
|
||||
@@ -1,13 +0,0 @@
|
||||
-- Correct credentials.provider field on all nodes with 'llm' provider credentials
|
||||
UPDATE "AgentNode"
|
||||
SET "constantInput" = JSONB_SET(
|
||||
"constantInput"::jsonb,
|
||||
'{credentials,provider}',
|
||||
CASE
|
||||
WHEN "constantInput"::jsonb->'credentials'->>'id' = '53c25cb8-e3ee-465c-a4d1-e75a4c899c2a' THEN '"openai"'::jsonb
|
||||
WHEN "constantInput"::jsonb->'credentials'->>'id' = '24e5d942-d9e3-4798-8151-90143ee55629' THEN '"anthropic"'::jsonb
|
||||
WHEN "constantInput"::jsonb->'credentials'->>'id' = '4ec22295-8f97-4dd1-b42b-2c6957a02545' THEN '"groq"'::jsonb
|
||||
ELSE "constantInput"::jsonb->'credentials'->'provider'
|
||||
END
|
||||
)::text
|
||||
WHERE "constantInput"::jsonb->'credentials'->>'provider' = 'llm';
|
||||
358
autogpt_platform/backend/poetry.lock
generated
358
autogpt_platform/backend/poetry.lock
generated
@@ -1079,105 +1079,19 @@ requests = ">=2.20.0,<3.0"
|
||||
|
||||
[[package]]
|
||||
name = "gotrue"
|
||||
version = "2.10.0"
|
||||
version = "2.9.0"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
python-versions = "<4.0,>=3.8"
|
||||
files = [
|
||||
{file = "gotrue-2.10.0-py3-none-any.whl", hash = "sha256:768e58207488e5184ffbdc4351b7280d913daf97962f4e9f2cca05c80004b042"},
|
||||
{file = "gotrue-2.10.0.tar.gz", hash = "sha256:4edf4c251da3535f2b044e23deba221e848ca1210c17d0c7a9b19f79a1e3f3c0"},
|
||||
{file = "gotrue-2.9.0-py3-none-any.whl", hash = "sha256:9a6448479329771752cb93be65bc95f06f17d9262e814a95d03b218cf5dce87a"},
|
||||
{file = "gotrue-2.9.0.tar.gz", hash = "sha256:c50e75bd01b82a388eed6a921a1c373a7157fd405df2221a8532193a39df4159"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
|
||||
pydantic = ">=1.10,<3"
|
||||
|
||||
[[package]]
|
||||
name = "greenlet"
|
||||
version = "3.1.1"
|
||||
description = "Lightweight in-process concurrent programming"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"},
|
||||
{file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"},
|
||||
{file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"},
|
||||
{file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"},
|
||||
{file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"},
|
||||
{file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"},
|
||||
{file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"},
|
||||
{file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"},
|
||||
{file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"},
|
||||
{file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"},
|
||||
{file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"},
|
||||
{file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"},
|
||||
{file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"},
|
||||
{file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"},
|
||||
{file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"},
|
||||
{file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"},
|
||||
{file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"},
|
||||
{file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"},
|
||||
{file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"},
|
||||
{file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"},
|
||||
{file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"},
|
||||
{file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"},
|
||||
{file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"},
|
||||
{file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"},
|
||||
{file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"},
|
||||
{file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"},
|
||||
{file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"},
|
||||
{file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"},
|
||||
{file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"},
|
||||
{file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"},
|
||||
{file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"},
|
||||
{file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"},
|
||||
{file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"},
|
||||
{file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"},
|
||||
{file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"},
|
||||
{file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"},
|
||||
{file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"},
|
||||
{file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"},
|
||||
{file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"},
|
||||
{file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
docs = ["Sphinx", "furo"]
|
||||
test = ["objgraph", "psutil"]
|
||||
|
||||
[[package]]
|
||||
name = "groq"
|
||||
version = "0.11.0"
|
||||
@@ -1882,13 +1796,13 @@ httpx = ">=0.27.0,<0.28.0"
|
||||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.54.3"
|
||||
version = "1.54.1"
|
||||
description = "The official Python library for the openai API"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "openai-1.54.3-py3-none-any.whl", hash = "sha256:f18dbaf09c50d70c4185b892a2a553f80681d1d866323a2da7f7be2f688615d5"},
|
||||
{file = "openai-1.54.3.tar.gz", hash = "sha256:7511b74eeb894ac0b0253dc71f087a15d2e4d71d22d0088767205143d880cca6"},
|
||||
{file = "openai-1.54.1-py3-none-any.whl", hash = "sha256:3cb49ccb6bfdc724ad01cc397d323ef8314fc7d45e19e9de2afdd6484a533324"},
|
||||
{file = "openai-1.54.1.tar.gz", hash = "sha256:5b832bf82002ba8c4f6e5e25c1c0f5d468c22f043711544c716eaffdb30dd6f1"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2051,13 +1965,13 @@ testing = ["pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "poethepoet"
|
||||
version = "0.30.0"
|
||||
version = "0.29.0"
|
||||
description = "A task runner that works well with poetry."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "poethepoet-0.30.0-py3-none-any.whl", hash = "sha256:bf875741407a98da9e96f2f2d0b2c4c34f56d89939a7f53a4b6b3a64b546ec4e"},
|
||||
{file = "poethepoet-0.30.0.tar.gz", hash = "sha256:9f7ccda2d6525616ce989ca8ef973739fd668f50bef0b9d3631421d504d9ae4a"},
|
||||
{file = "poethepoet-0.29.0-py3-none-any.whl", hash = "sha256:f8dfe55006dcfb5cf31bcb1904e1262e1c642a4502fee3688cbf1bddfe5c7601"},
|
||||
{file = "poethepoet-0.29.0.tar.gz", hash = "sha256:676842302f2304a86b31ac56398dd672fae8471128d2086896393384dbafc095"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2070,13 +1984,13 @@ poetry-plugin = ["poetry (>=1.0,<2.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "0.18.0"
|
||||
version = "0.17.2"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "postgrest-0.18.0-py3-none-any.whl", hash = "sha256:200baad0d23fee986b3a0ffd3e07bfe0cdd40e09760f11e8e13a6c0c2376d5fa"},
|
||||
{file = "postgrest-0.18.0.tar.gz", hash = "sha256:29c1a94801a17eb9ad590189993fe5a7a6d8c1bfc11a3c9d0ce7ba146454ebb3"},
|
||||
{file = "postgrest-0.17.2-py3-none-any.whl", hash = "sha256:f7c4f448e5a5e2d4c1dcf192edae9d1007c4261e9a6fb5116783a0046846ece2"},
|
||||
{file = "postgrest-0.17.2.tar.gz", hash = "sha256:445cd4e4a191e279492549df0c4e827d32f9d01d0852599bb8a6efb0f07fcf78"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2221,82 +2135,6 @@ files = [
|
||||
dev = ["black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "wheel"]
|
||||
test = ["pytest", "pytest-xdist", "setuptools"]
|
||||
|
||||
[[package]]
|
||||
name = "psycopg2-binary"
|
||||
version = "2.9.10"
|
||||
description = "psycopg2 - Python-PostgreSQL Database Adapter"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:0ea8e3d0ae83564f2fc554955d327fa081d065c8ca5cc6d2abb643e2c9c1200f"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:3e9c76f0ac6f92ecfc79516a8034a544926430f7b080ec5a0537bca389ee0906"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ad26b467a405c798aaa1458ba09d7e2b6e5f96b1ce0ac15d82fd9f95dc38a92"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:270934a475a0e4b6925b5f804e3809dd5f90f8613621d062848dd82f9cd62007"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48b338f08d93e7be4ab2b5f1dbe69dc5e9ef07170fe1f86514422076d9c010d0"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4152f8f76d2023aac16285576a9ecd2b11a9895373a1f10fd9db54b3ff06b4"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32581b3020c72d7a421009ee1c6bf4a131ef5f0a968fab2e2de0c9d2bb4577f1"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:2ce3e21dc3437b1d960521eca599d57408a695a0d3c26797ea0f72e834c7ffe5"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e984839e75e0b60cfe75e351db53d6db750b00de45644c5d1f7ee5d1f34a1ce5"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c4745a90b78e51d9ba06e2088a2fe0c693ae19cc8cb051ccda44e8df8a6eb53"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-win32.whl", hash = "sha256:e5720a5d25e3b99cd0dc5c8a440570469ff82659bb09431c1439b92caf184d3b"},
|
||||
{file = "psycopg2_binary-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:3c18f74eb4386bf35e92ab2354a12c17e5eb4d9798e4c0ad3a00783eae7cd9f1"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:04392983d0bb89a8717772a193cfaac58871321e3ec69514e1c4e0d4957b5aff"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1a6784f0ce3fec4edc64e985865c17778514325074adf5ad8f80636cd029ef7c"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5f86c56eeb91dc3135b3fd8a95dc7ae14c538a2f3ad77a19645cf55bab1799c"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b3d2491d4d78b6b14f76881905c7a8a8abcf974aad4a8a0b065273a0ed7a2cb"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2286791ececda3a723d1910441c793be44625d86d1a4e79942751197f4d30341"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:512d29bb12608891e349af6a0cccedce51677725a921c07dba6342beaf576f9a"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5a507320c58903967ef7384355a4da7ff3f28132d679aeb23572753cbf2ec10b"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6d4fa1079cab9018f4d0bd2db307beaa612b0d13ba73b5c6304b9fe2fb441ff7"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:851485a42dbb0bdc1edcdabdb8557c09c9655dfa2ca0460ff210522e073e319e"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:35958ec9e46432d9076286dda67942ed6d968b9c3a6a2fd62b48939d1d78bf68"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-win32.whl", hash = "sha256:ecced182e935529727401b24d76634a357c71c9275b356efafd8a2a91ec07392"},
|
||||
{file = "psycopg2_binary-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:ee0e8c683a7ff25d23b55b11161c2663d4b099770f6085ff0a20d4505778d6b4"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:880845dfe1f85d9d5f7c412efea7a08946a46894537e4e5d091732eb1d34d9a0"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9440fa522a79356aaa482aa4ba500b65f28e5d0e63b801abf6aa152a29bd842a"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3923c1d9870c49a2d44f795df0c889a22380d36ef92440ff618ec315757e539"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b2c956c028ea5de47ff3a8d6b3cc3330ab45cf0b7c3da35a2d6ff8420896526"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f758ed67cab30b9a8d2833609513ce4d3bd027641673d4ebc9c067e4d208eec1"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cd9b4f2cfab88ed4a9106192de509464b75a906462fb846b936eabe45c2063e"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dc08420625b5a20b53551c50deae6e231e6371194fa0651dbe0fb206452ae1f"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7cd730dfa7c36dbe8724426bf5612798734bff2d3c3857f36f2733f5bfc7c00"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:155e69561d54d02b3c3209545fb08938e27889ff5a10c19de8d23eb5a41be8a5"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3cc28a6fd5a4a26224007712e79b81dbaee2ffb90ff406256158ec4d7b52b47"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-win32.whl", hash = "sha256:ec8a77f521a17506a24a5f626cb2aee7850f9b69a0afe704586f63a464f3cd64"},
|
||||
{file = "psycopg2_binary-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:18c5ee682b9c6dd3696dad6e54cc7ff3a1a9020df6a5c0f861ef8bfd338c3ca0"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:26540d4a9a4e2b096f1ff9cce51253d0504dca5a85872c7f7be23be5a53eb18d"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e217ce4d37667df0bc1c397fdcd8de5e81018ef305aed9415c3b093faaeb10fb"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:245159e7ab20a71d989da00f280ca57da7641fa2cdcf71749c193cea540a74f7"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c4ded1a24b20021ebe677b7b08ad10bf09aac197d6943bfe6fec70ac4e4690d"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3abb691ff9e57d4a93355f60d4f4c1dd2d68326c968e7db17ea96df3c023ef73"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8608c078134f0b3cbd9f89b34bd60a943b23fd33cc5f065e8d5f840061bd0673"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:230eeae2d71594103cd5b93fd29d1ace6420d0b86f4778739cb1a5a32f607d1f"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:eb09aa7f9cecb45027683bb55aebaaf45a0df8bf6de68801a6afdc7947bb09d4"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73d6d7f0ccdad7bc43e6d34273f70d587ef62f824d7261c4ae9b8b1b6af90e8"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce5ab4bf46a211a8e924d307c1b1fcda82368586a19d0a24f8ae166f5c784864"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:056470c3dc57904bbf63d6f534988bafc4e970ffd50f6271fc4ee7daad9498a5"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aa0e31fa4bb82578f3a6c74a73c273367727de397a7a0f07bd83cbea696baa"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8de718c0e1c4b982a54b41779667242bc630b2197948405b7bd8ce16bcecac92"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5c370b1e4975df846b0277b4deba86419ca77dbc25047f535b0bb03d1a544d44"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ffe8ed017e4ed70f68b7b371d84b7d4a790368db9203dfc2d222febd3a9c8863"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8aecc5e80c63f7459a1a2ab2c64df952051df196294d9f739933a9f6687e86b3"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:7a813c8bdbaaaab1f078014b9b0b13f5de757e2b5d9be6403639b298a04d218b"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d00924255d7fc916ef66e4bf22f354a940c67179ad3fd7067d7a0a9c84d2fbfc"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7559bce4b505762d737172556a4e6ea8a9998ecac1e39b5233465093e8cee697"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8b58f0a96e7a1e341fc894f62c1177a7c83febebb5ff9123b579418fdc8a481"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b269105e59ac96aba877c1707c600ae55711d9dcd3fc4b5012e4af68e30c648"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:79625966e176dc97ddabc142351e0409e28acf4660b88d1cf6adb876d20c490d"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8aabf1c1a04584c168984ac678a668094d831f152859d06e055288fa515e4d30"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:19721ac03892001ee8fdd11507e6a2e01f4e37014def96379411ca99d78aeb2c"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7f5d859928e635fa3ce3477704acee0f667b3a3d3e4bb109f2b18d4005f38287"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-win32.whl", hash = "sha256:3216ccf953b3f267691c90c6fe742e45d890d8272326b4a8b20850a03d05b7b8"},
|
||||
{file = "psycopg2_binary-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:30e34c4e97964805f715206c7b789d54a78b70f3ff19fbe590104b71c45600e5"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyasn1"
|
||||
version = "0.6.1"
|
||||
@@ -2532,13 +2370,13 @@ diagrams = ["jinja2", "railroad-diagrams"]
|
||||
|
||||
[[package]]
|
||||
name = "pyright"
|
||||
version = "1.1.389"
|
||||
version = "1.1.387"
|
||||
description = "Command line wrapper for pyright"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "pyright-1.1.389-py3-none-any.whl", hash = "sha256:41e9620bba9254406dc1f621a88ceab5a88af4c826feb4f614d95691ed243a60"},
|
||||
{file = "pyright-1.1.389.tar.gz", hash = "sha256:716bf8cc174ab8b4dcf6828c3298cac05c5ed775dda9910106a5dcfe4c7fe220"},
|
||||
{file = "pyright-1.1.387-py3-none-any.whl", hash = "sha256:6a1f495a261a72e12ad17e20d1ae3df4511223c773b19407cfa006229b1b08a5"},
|
||||
{file = "pyright-1.1.387.tar.gz", hash = "sha256:577de60224f7fe36505d5b181231e3a395d427b7873be0bbcaa962a29ea93a60"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2954,29 +2792,29 @@ pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.7.4"
|
||||
version = "0.7.2"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.7.4-py3-none-linux_armv6l.whl", hash = "sha256:a4919925e7684a3f18e18243cd6bea7cfb8e968a6eaa8437971f681b7ec51478"},
|
||||
{file = "ruff-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfb365c135b830778dda8c04fb7d4280ed0b984e1aec27f574445231e20d6c63"},
|
||||
{file = "ruff-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:63a569b36bc66fbadec5beaa539dd81e0527cb258b94e29e0531ce41bacc1f20"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d06218747d361d06fd2fdac734e7fa92df36df93035db3dc2ad7aa9852cb109"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0cea28d0944f74ebc33e9f934238f15c758841f9f5edd180b5315c203293452"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80094ecd4793c68b2571b128f91754d60f692d64bc0d7272ec9197fdd09bf9ea"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:997512325c6620d1c4c2b15db49ef59543ef9cd0f4aa8065ec2ae5103cedc7e7"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00b4cf3a6b5fad6d1a66e7574d78956bbd09abfd6c8a997798f01f5da3d46a05"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7dbdc7d8274e1422722933d1edddfdc65b4336abf0b16dfcb9dedd6e6a517d06"},
|
||||
{file = "ruff-0.7.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e92dfb5f00eaedb1501b2f906ccabfd67b2355bdf117fea9719fc99ac2145bc"},
|
||||
{file = "ruff-0.7.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3bd726099f277d735dc38900b6a8d6cf070f80828877941983a57bca1cd92172"},
|
||||
{file = "ruff-0.7.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2e32829c429dd081ee5ba39aef436603e5b22335c3d3fff013cd585806a6486a"},
|
||||
{file = "ruff-0.7.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:662a63b4971807623f6f90c1fb664613f67cc182dc4d991471c23c541fee62dd"},
|
||||
{file = "ruff-0.7.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:876f5e09eaae3eb76814c1d3b68879891d6fde4824c015d48e7a7da4cf066a3a"},
|
||||
{file = "ruff-0.7.4-py3-none-win32.whl", hash = "sha256:75c53f54904be42dd52a548728a5b572344b50d9b2873d13a3f8c5e3b91f5cac"},
|
||||
{file = "ruff-0.7.4-py3-none-win_amd64.whl", hash = "sha256:745775c7b39f914238ed1f1b0bebed0b9155a17cd8bc0b08d3c87e4703b990d6"},
|
||||
{file = "ruff-0.7.4-py3-none-win_arm64.whl", hash = "sha256:11bff065102c3ae9d3ea4dc9ecdfe5a5171349cdd0787c1fc64761212fc9cf1f"},
|
||||
{file = "ruff-0.7.4.tar.gz", hash = "sha256:cd12e35031f5af6b9b93715d8c4f40360070b2041f81273d0527683d5708fce2"},
|
||||
{file = "ruff-0.7.2-py3-none-linux_armv6l.whl", hash = "sha256:b73f873b5f52092e63ed540adefc3c36f1f803790ecf2590e1df8bf0a9f72cb8"},
|
||||
{file = "ruff-0.7.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5b813ef26db1015953daf476202585512afd6a6862a02cde63f3bafb53d0b2d4"},
|
||||
{file = "ruff-0.7.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:853277dbd9675810c6826dad7a428d52a11760744508340e66bf46f8be9701d9"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21aae53ab1490a52bf4e3bf520c10ce120987b047c494cacf4edad0ba0888da2"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ccc7e0fc6e0cb3168443eeadb6445285abaae75142ee22b2b72c27d790ab60ba"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd77877a4e43b3a98e5ef4715ba3862105e299af0c48942cc6d51ba3d97dc859"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e00163fb897d35523c70d71a46fbaa43bf7bf9af0f4534c53ea5b96b2e03397b"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3c54b538633482dc342e9b634d91168fe8cc56b30a4b4f99287f4e339103e88"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b792468e9804a204be221b14257566669d1db5c00d6bb335996e5cd7004ba80"},
|
||||
{file = "ruff-0.7.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dba53ed84ac19ae4bfb4ea4bf0172550a2285fa27fbb13e3746f04c80f7fa088"},
|
||||
{file = "ruff-0.7.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b19fafe261bf741bca2764c14cbb4ee1819b67adb63ebc2db6401dcd652e3748"},
|
||||
{file = "ruff-0.7.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:28bd8220f4d8f79d590db9e2f6a0674f75ddbc3847277dd44ac1f8d30684b828"},
|
||||
{file = "ruff-0.7.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9fd67094e77efbea932e62b5d2483006154794040abb3a5072e659096415ae1e"},
|
||||
{file = "ruff-0.7.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:576305393998b7bd6c46018f8104ea3a9cb3fa7908c21d8580e3274a3b04b691"},
|
||||
{file = "ruff-0.7.2-py3-none-win32.whl", hash = "sha256:fa993cfc9f0ff11187e82de874dfc3611df80852540331bc85c75809c93253a8"},
|
||||
{file = "ruff-0.7.2-py3-none-win_amd64.whl", hash = "sha256:dd8800cbe0254e06b8fec585e97554047fb82c894973f7ff18558eee33d1cb88"},
|
||||
{file = "ruff-0.7.2-py3-none-win_arm64.whl", hash = "sha256:bb8368cd45bba3f57bb29cbb8d64b4a33f8415d0149d2655c5c8539452ce7760"},
|
||||
{file = "ruff-0.7.2.tar.gz", hash = "sha256:2b14e77293380e475b4e3a7a368e14549288ed2931fce259a6f99978669e844f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3076,101 +2914,6 @@ files = [
|
||||
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlalchemy"
|
||||
version = "2.0.36"
|
||||
description = "Database Abstraction Library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"},
|
||||
{file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"},
|
||||
{file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"},
|
||||
{file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"},
|
||||
{file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"},
|
||||
{file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"},
|
||||
{file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"},
|
||||
{file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"},
|
||||
{file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"},
|
||||
{file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"},
|
||||
{file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"},
|
||||
{file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"},
|
||||
{file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"},
|
||||
{file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"},
|
||||
{file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"},
|
||||
{file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"},
|
||||
{file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"},
|
||||
{file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"},
|
||||
{file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"},
|
||||
{file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"},
|
||||
{file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"},
|
||||
{file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"},
|
||||
{file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"},
|
||||
{file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"},
|
||||
{file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"},
|
||||
{file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"},
|
||||
{file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"},
|
||||
{file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"},
|
||||
{file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"},
|
||||
{file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"},
|
||||
{file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"},
|
||||
{file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"},
|
||||
{file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"},
|
||||
{file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"},
|
||||
{file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"},
|
||||
{file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"},
|
||||
{file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"},
|
||||
{file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"},
|
||||
{file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"},
|
||||
{file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"},
|
||||
{file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"},
|
||||
{file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"},
|
||||
{file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"},
|
||||
{file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"},
|
||||
{file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"},
|
||||
{file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"},
|
||||
{file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"},
|
||||
{file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"},
|
||||
{file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"},
|
||||
{file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"},
|
||||
{file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"},
|
||||
{file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"},
|
||||
{file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"},
|
||||
{file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"},
|
||||
{file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"},
|
||||
{file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"},
|
||||
{file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"}
|
||||
typing-extensions = ">=4.6.0"
|
||||
|
||||
[package.extras]
|
||||
aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"]
|
||||
aioodbc = ["aioodbc", "greenlet (!=0.4.17)"]
|
||||
aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"]
|
||||
asyncio = ["greenlet (!=0.4.17)"]
|
||||
asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"]
|
||||
mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"]
|
||||
mssql = ["pyodbc"]
|
||||
mssql-pymssql = ["pymssql"]
|
||||
mssql-pyodbc = ["pyodbc"]
|
||||
mypy = ["mypy (>=0.910)"]
|
||||
mysql = ["mysqlclient (>=1.4.0)"]
|
||||
mysql-connector = ["mysql-connector-python"]
|
||||
oracle = ["cx_oracle (>=8)"]
|
||||
oracle-oracledb = ["oracledb (>=1.0.1)"]
|
||||
postgresql = ["psycopg2 (>=2.7)"]
|
||||
postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"]
|
||||
postgresql-pg8000 = ["pg8000 (>=1.29.1)"]
|
||||
postgresql-psycopg = ["psycopg (>=3.0.7)"]
|
||||
postgresql-psycopg2binary = ["psycopg2-binary"]
|
||||
postgresql-psycopg2cffi = ["psycopg2cffi"]
|
||||
postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
|
||||
pymysql = ["pymysql"]
|
||||
sqlcipher = ["sqlcipher3_binary"]
|
||||
|
||||
[[package]]
|
||||
name = "starlette"
|
||||
version = "0.41.2"
|
||||
@@ -3190,18 +2933,19 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "0.9.0"
|
||||
version = "0.8.2"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "storage3-0.9.0-py3-none-any.whl", hash = "sha256:8b2fb91f0c61583a2f4eac74a8bae67e00d41ff38095c8a6cd3f2ce5e0ab76e7"},
|
||||
{file = "storage3-0.9.0.tar.gz", hash = "sha256:e16697f60894c94e1d9df0d2e4af783c1b3f7dd08c9013d61978825c624188c4"},
|
||||
{file = "storage3-0.8.2-py3-none-any.whl", hash = "sha256:f2e995b18c77a2a9265d1a33047d43e4d6abb11eb3ca5067959f68281c305de3"},
|
||||
{file = "storage3-0.8.2.tar.gz", hash = "sha256:db05d3fe8fb73bd30c814c4c4749664f37a5dfc78b629e8c058ef558c2b89f5a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
python-dateutil = ">=2.8.2,<3.0.0"
|
||||
typing-extensions = ">=4.2.0,<5.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "strenum"
|
||||
@@ -3221,32 +2965,32 @@ test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"]
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.10.0"
|
||||
version = "2.9.1"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "supabase-2.10.0-py3-none-any.whl", hash = "sha256:183fb23c04528593f8f81c24ceb8178f3a56bff40fec7ed873b6c55ebc2e420a"},
|
||||
{file = "supabase-2.10.0.tar.gz", hash = "sha256:9ac095f8947bf60780e67c0edcbab53e2db3f6f3f022329397b093500bf2607c"},
|
||||
{file = "supabase-2.9.1-py3-none-any.whl", hash = "sha256:a96f857a465712cb551679c1df66ba772c834f861756ce4aa2aa4cb703f6aeb7"},
|
||||
{file = "supabase-2.9.1.tar.gz", hash = "sha256:51fce39c9eb50573126dabb342541ec5e1f13e7476938768f4b0ccfdb8c522cd"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
gotrue = ">=2.10.0,<3.0.0"
|
||||
gotrue = ">=2.9.0,<3.0.0"
|
||||
httpx = ">=0.26,<0.28"
|
||||
postgrest = ">=0.18,<0.19"
|
||||
postgrest = ">=0.17.0,<0.18.0"
|
||||
realtime = ">=2.0.0,<3.0.0"
|
||||
storage3 = ">=0.9.0,<0.10.0"
|
||||
supafunc = ">=0.7.0,<0.8.0"
|
||||
storage3 = ">=0.8.0,<0.9.0"
|
||||
supafunc = ">=0.6.0,<0.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "supafunc"
|
||||
version = "0.7.0"
|
||||
version = "0.6.2"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "supafunc-0.7.0-py3-none-any.whl", hash = "sha256:4160260dc02bdd906be1e2ffd7cb3ae8b74ae437c892bb475352b6a99d9ff8eb"},
|
||||
{file = "supafunc-0.7.0.tar.gz", hash = "sha256:5b1c415fba1395740b2b4eedd1d786384bd58b98f6333a11ba7889820a48b6a7"},
|
||||
{file = "supafunc-0.6.2-py3-none-any.whl", hash = "sha256:101b30616b0a1ce8cf938eca1df362fa4cf1deacb0271f53ebbd674190fb0da5"},
|
||||
{file = "supafunc-0.6.2.tar.gz", hash = "sha256:c7dfa20db7182f7fe4ae436e94e05c06cd7ed98d697fed75d68c7b9792822adc"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -3933,4 +3677,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.10"
|
||||
content-hash = "e33b0da31247495e8704fee5224f7b0cf53859cd0ce8bafb39889548a649f5fb"
|
||||
content-hash = "b761f200e8ad7560321fca7bbefbe79377740952ace5dcbecf0371bb8aa16df1"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "autogpt-platform-backend"
|
||||
version = "0.3.3"
|
||||
version = "0.1.0"
|
||||
description = "A platform for building AI-powered agentic workflows"
|
||||
authors = ["AutoGPT <info@agpt.co>"]
|
||||
readme = "README.md"
|
||||
@@ -26,7 +26,7 @@ jinja2 = "^3.1.4"
|
||||
jsonref = "^1.1.0"
|
||||
jsonschema = "^4.22.0"
|
||||
ollama = "^0.3.0"
|
||||
openai = "^1.54.3"
|
||||
openai = "^1.54.1"
|
||||
praw = "~7.8.1"
|
||||
prisma = "^0.15.0"
|
||||
psutil = "^6.1.0"
|
||||
@@ -38,7 +38,7 @@ pytest-asyncio = "^0.24.0"
|
||||
python-dotenv = "^1.0.1"
|
||||
redis = "^5.2.0"
|
||||
sentry-sdk = "2.18.0"
|
||||
supabase = "^2.10.0"
|
||||
supabase = "^2.7.2"
|
||||
tenacity = "^9.0.0"
|
||||
uvicorn = { extras = ["standard"], version = "^0.32.0" }
|
||||
websockets = "^13.1"
|
||||
@@ -47,15 +47,13 @@ googlemaps = "^4.10.0"
|
||||
replicate = "^1.0.3"
|
||||
pinecone = "^5.3.1"
|
||||
cryptography = "^43.0.3"
|
||||
sqlalchemy = "^2.0.36"
|
||||
psycopg2-binary = "^2.9.10"
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
poethepoet = "^0.30.0"
|
||||
poethepoet = "^0.29.0"
|
||||
httpx = "^0.27.0"
|
||||
pytest-watcher = "^0.4.2"
|
||||
requests = "^2.32.3"
|
||||
ruff = "^0.7.4"
|
||||
pyright = "^1.1.389"
|
||||
ruff = "^0.7.2"
|
||||
pyright = "^1.1.387"
|
||||
isort = "^5.13.2"
|
||||
black = "^24.10.0"
|
||||
aiohappyeyeballs = "^2.4.3"
|
||||
@@ -74,9 +72,6 @@ format = "linter:format"
|
||||
lint = "linter:lint"
|
||||
test = "run_tests:test"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
|
||||
[tool.pytest-watcher]
|
||||
now = false
|
||||
clear = true
|
||||
|
||||
@@ -23,10 +23,10 @@ model User {
|
||||
// Relations
|
||||
AgentGraphs AgentGraph[]
|
||||
AgentGraphExecutions AgentGraphExecution[]
|
||||
AgentGraphExecutionSchedules AgentGraphExecutionSchedule[]
|
||||
AnalyticsDetails AnalyticsDetails[]
|
||||
AnalyticsMetrics AnalyticsMetrics[]
|
||||
UserBlockCredit UserBlockCredit[]
|
||||
APIKeys APIKey[]
|
||||
|
||||
@@index([id])
|
||||
@@index([email])
|
||||
@@ -50,6 +50,7 @@ model AgentGraph {
|
||||
|
||||
AgentNodes AgentNode[]
|
||||
AgentGraphExecution AgentGraphExecution[]
|
||||
AgentGraphExecutionSchedule AgentGraphExecutionSchedule[]
|
||||
|
||||
@@id(name: "graphVersionId", [id, version])
|
||||
}
|
||||
@@ -186,6 +187,30 @@ model AgentNodeExecutionInputOutput {
|
||||
@@unique([referencedByInputExecId, referencedByOutputExecId, name])
|
||||
}
|
||||
|
||||
// This model describes the recurring execution schedule of an Agent.
|
||||
model AgentGraphExecutionSchedule {
|
||||
id String @id
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime? @updatedAt
|
||||
|
||||
agentGraphId String
|
||||
agentGraphVersion Int @default(1)
|
||||
AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version], onDelete: Cascade)
|
||||
|
||||
schedule String // cron expression
|
||||
isEnabled Boolean @default(true)
|
||||
inputData String // JSON serialized object
|
||||
|
||||
// default and set the value on each update, lastUpdated field has no time zone.
|
||||
lastUpdated DateTime @updatedAt
|
||||
|
||||
// Link to User model
|
||||
userId String
|
||||
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@index([isEnabled])
|
||||
}
|
||||
|
||||
model AnalyticsDetails {
|
||||
// PK uses gen_random_uuid() to allow the db inserts to happen outside of prisma
|
||||
// typical uuid() inserts are handled by prisma
|
||||
@@ -252,42 +277,3 @@ model UserBlockCredit {
|
||||
|
||||
@@id(name: "creditTransactionIdentifier", [transactionKey, userId])
|
||||
}
|
||||
|
||||
enum APIKeyPermission {
|
||||
EXECUTE_GRAPH // Can execute agent graphs
|
||||
READ_GRAPH // Can get graph versions and details
|
||||
EXECUTE_BLOCK // Can execute individual blocks
|
||||
READ_BLOCK // Can get block information
|
||||
}
|
||||
|
||||
model APIKey {
|
||||
id String @id @default(uuid())
|
||||
name String
|
||||
prefix String // First 8 chars for identification
|
||||
postfix String
|
||||
key String @unique // Hashed key
|
||||
status APIKeyStatus @default(ACTIVE)
|
||||
permissions APIKeyPermission[]
|
||||
|
||||
createdAt DateTime @default(now())
|
||||
lastUsedAt DateTime?
|
||||
revokedAt DateTime?
|
||||
|
||||
description String?
|
||||
|
||||
// Relation to user
|
||||
userId String
|
||||
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@index([key])
|
||||
@@index([prefix])
|
||||
@@index([userId])
|
||||
@@index([status])
|
||||
@@index([userId, status])
|
||||
}
|
||||
|
||||
enum APIKeyStatus {
|
||||
ACTIVE
|
||||
REVOKED
|
||||
SUSPENDED
|
||||
}
|
||||
|
||||
@@ -22,19 +22,19 @@ async def test_agent_schedule(server: SpinTestServer):
|
||||
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
|
||||
assert len(schedules) == 0
|
||||
|
||||
schedule = scheduler.add_execution_schedule(
|
||||
schedule_id = scheduler.add_execution_schedule(
|
||||
graph_id=test_graph.id,
|
||||
user_id=test_user.id,
|
||||
graph_version=1,
|
||||
cron="0 0 * * *",
|
||||
input_data={"input": "data"},
|
||||
)
|
||||
assert schedule
|
||||
assert schedule_id
|
||||
|
||||
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
|
||||
assert len(schedules) == 1
|
||||
assert schedules[0].cron == "0 0 * * *"
|
||||
assert schedules[schedule_id] == "0 0 * * *"
|
||||
|
||||
scheduler.delete_schedule(schedule.id, user_id=test_user.id)
|
||||
scheduler.update_schedule(schedule_id, is_enabled=False, user_id=test_user.id)
|
||||
schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id)
|
||||
assert len(schedules) == 0
|
||||
|
||||
@@ -72,7 +72,6 @@ async def test_send_execution_result(
|
||||
graph_exec_id="test_exec_id",
|
||||
node_exec_id="test_node_exec_id",
|
||||
node_id="test_node_id",
|
||||
block_id="test_block_id",
|
||||
status=ExecutionStatus.COMPLETED,
|
||||
input_data={"input1": "value1"},
|
||||
output_data={"output1": ["result1"]},
|
||||
@@ -103,7 +102,6 @@ async def test_send_execution_result_no_subscribers(
|
||||
graph_exec_id="test_exec_id",
|
||||
node_exec_id="test_node_exec_id",
|
||||
node_id="test_node_id",
|
||||
block_id="test_block_id",
|
||||
status=ExecutionStatus.COMPLETED,
|
||||
input_data={"input1": "value1"},
|
||||
output_data={"output1": ["result1"]},
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from backend.util.request import validate_url
|
||||
|
||||
|
||||
def test_validate_url():
|
||||
with pytest.raises(ValueError):
|
||||
validate_url("localhost", [])
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
validate_url("192.168.1.1", [])
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
validate_url("127.0.0.1", [])
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
validate_url("0.0.0.0", [])
|
||||
|
||||
validate_url("google.com", [])
|
||||
validate_url("github.com", [])
|
||||
validate_url("http://github.com", [])
|
||||
@@ -65,7 +65,6 @@ services:
|
||||
- REDIS_PASSWORD=password
|
||||
- ENABLE_AUTH=true
|
||||
- PYRO_HOST=0.0.0.0
|
||||
- EXECUTIONSCHEDULER_HOST=rest_server
|
||||
- EXECUTIONMANAGER_HOST=executor
|
||||
- DATABASEMANAGER_HOST=executor
|
||||
- FRONTEND_BASE_URL=http://localhost:3000
|
||||
@@ -107,7 +106,6 @@ services:
|
||||
- PYRO_HOST=0.0.0.0
|
||||
- AGENTSERVER_HOST=rest_server
|
||||
- DATABASEMANAGER_HOST=0.0.0.0
|
||||
- EXECUTIONMANAGER_HOST=0.0.0.0
|
||||
- ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!!
|
||||
ports:
|
||||
- "8002:8000"
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
FROM node:21-alpine AS base
|
||||
WORKDIR /app
|
||||
COPY autogpt_platform/frontend/package.json autogpt_platform/frontend/yarn.lock ./
|
||||
RUN --mount=type=cache,target=/usr/local/share/.cache yarn install --frozen-lockfile
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
# Dev stage
|
||||
FROM base AS dev
|
||||
@@ -16,23 +16,17 @@ FROM base AS build
|
||||
COPY autogpt_platform/frontend/ .
|
||||
RUN yarn build
|
||||
|
||||
# Prod stage - based on NextJS reference Dockerfile https://github.com/vercel/next.js/blob/64271354533ed16da51be5dce85f0dbd15f17517/examples/with-docker/Dockerfile
|
||||
# Prod stage
|
||||
FROM node:21-alpine AS prod
|
||||
ENV NODE_ENV=production
|
||||
WORKDIR /app
|
||||
|
||||
RUN addgroup --system --gid 1001 nodejs
|
||||
RUN adduser --system --uid 1001 nextjs
|
||||
|
||||
RUN mkdir .next
|
||||
RUN chown nextjs:nodejs .next
|
||||
|
||||
COPY --from=build --chown=nextjs:nodejs /app/.next/standalone ./
|
||||
COPY --from=build --chown=nextjs:nodejs /app/.next/static ./.next/static
|
||||
COPY --from=build /app/package.json /app/yarn.lock ./
|
||||
RUN yarn install --frozen-lockfile
|
||||
|
||||
COPY --from=build /app/.next ./.next
|
||||
COPY --from=build /app/public ./public
|
||||
|
||||
USER nextjs
|
||||
COPY --from=build /app/next.config.mjs ./next.config.mjs
|
||||
|
||||
EXPOSE 3000
|
||||
CMD ["node", "server.js"]
|
||||
CMD ["yarn", "start"]
|
||||
|
||||
@@ -14,7 +14,6 @@ const nextConfig = {
|
||||
},
|
||||
];
|
||||
},
|
||||
output: "standalone",
|
||||
// TODO: Re-enable TypeScript checks once current issues are resolved
|
||||
typescript: {
|
||||
ignoreBuildErrors: true,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "frontend",
|
||||
"version": "0.3.3",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
@@ -10,7 +10,6 @@
|
||||
"start": "next start",
|
||||
"lint": "next lint && prettier --check .",
|
||||
"format": "prettier --write .",
|
||||
"type-check": "tsc --noEmit",
|
||||
"test": "playwright test",
|
||||
"test-ui": "playwright test --ui",
|
||||
"gentests": "playwright codegen http://localhost:3000",
|
||||
@@ -23,47 +22,45 @@
|
||||
"defaults"
|
||||
],
|
||||
"dependencies": {
|
||||
"@faker-js/faker": "^9.2.0",
|
||||
"@hookform/resolvers": "^3.9.1",
|
||||
"@next/third-parties": "^15.0.3",
|
||||
"@next/third-parties": "^15.0.2",
|
||||
"@radix-ui/react-avatar": "^1.1.1",
|
||||
"@radix-ui/react-checkbox": "^1.1.2",
|
||||
"@radix-ui/react-collapsible": "^1.1.1",
|
||||
"@radix-ui/react-context-menu": "^2.2.1",
|
||||
"@radix-ui/react-dialog": "^1.1.2",
|
||||
"@radix-ui/react-dropdown-menu": "^2.1.2",
|
||||
"@radix-ui/react-icons": "^1.3.2",
|
||||
"@radix-ui/react-icons": "^1.3.1",
|
||||
"@radix-ui/react-label": "^2.1.0",
|
||||
"@radix-ui/react-popover": "^1.1.2",
|
||||
"@radix-ui/react-radio-group": "^1.2.1",
|
||||
"@radix-ui/react-scroll-area": "^1.2.1",
|
||||
"@radix-ui/react-scroll-area": "^1.2.0",
|
||||
"@radix-ui/react-select": "^2.1.2",
|
||||
"@radix-ui/react-separator": "^1.1.0",
|
||||
"@radix-ui/react-slot": "^1.1.0",
|
||||
"@radix-ui/react-switch": "^1.1.1",
|
||||
"@radix-ui/react-toast": "^1.2.2",
|
||||
"@radix-ui/react-tooltip": "^1.1.4",
|
||||
"@radix-ui/react-tooltip": "^1.1.3",
|
||||
"@sentry/nextjs": "^8",
|
||||
"@supabase/ssr": "^0.5.2",
|
||||
"@supabase/ssr": "^0.5.1",
|
||||
"@supabase/supabase-js": "^2.46.1",
|
||||
"@tanstack/react-table": "^8.20.5",
|
||||
"@xyflow/react": "^12.3.5",
|
||||
"@xyflow/react": "^12.3.4",
|
||||
"ajv": "^8.17.1",
|
||||
"class-variance-authority": "^0.7.0",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "1.0.4",
|
||||
"cookie": "1.0.1",
|
||||
"cookie": "0.7.0",
|
||||
"date-fns": "^4.1.0",
|
||||
"dotenv": "^16.4.5",
|
||||
"elliptic": "6.6.1",
|
||||
"lucide-react": "^0.460.0",
|
||||
"elliptic": "6.6.0",
|
||||
"lucide-react": "^0.454.0",
|
||||
"moment": "^2.30.1",
|
||||
"next": "^14.2.13",
|
||||
"next-themes": "^0.4.3",
|
||||
"react": "^18",
|
||||
"react-day-picker": "^9.3.2",
|
||||
"react-day-picker": "^9.2.1",
|
||||
"react-dom": "^18",
|
||||
"react-hook-form": "^7.53.2",
|
||||
"react-hook-form": "^7.53.1",
|
||||
"react-icons": "^5.3.0",
|
||||
"react-markdown": "^9.0.1",
|
||||
"react-modal": "^3.16.1",
|
||||
@@ -71,33 +68,33 @@
|
||||
"recharts": "^2.13.3",
|
||||
"tailwind-merge": "^2.5.4",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"uuid": "^11.0.3",
|
||||
"uuid": "^11.0.2",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chromatic-com/storybook": "^3.2.2",
|
||||
"@playwright/test": "^1.48.2",
|
||||
"@storybook/addon-essentials": "^8.4.2",
|
||||
"@storybook/addon-interactions": "^8.4.2",
|
||||
"@storybook/addon-links": "^8.4.2",
|
||||
"@storybook/addon-onboarding": "^8.4.2",
|
||||
"@storybook/blocks": "^8.4.2",
|
||||
"@storybook/nextjs": "^8.4.2",
|
||||
"@storybook/addon-essentials": "^8.4.1",
|
||||
"@storybook/addon-interactions": "^8.4.1",
|
||||
"@storybook/addon-links": "^8.4.1",
|
||||
"@storybook/addon-onboarding": "^8.4.1",
|
||||
"@storybook/blocks": "^8.4.1",
|
||||
"@storybook/nextjs": "^8.4.1",
|
||||
"@storybook/react": "^8.3.5",
|
||||
"@storybook/test": "^8.3.5",
|
||||
"@storybook/test-runner": "^0.19.1",
|
||||
"@types/node": "^22.9.0",
|
||||
"@types/node": "^22.8.7",
|
||||
"@types/react": "^18",
|
||||
"@types/react-dom": "^18",
|
||||
"@types/react-modal": "^3.16.3",
|
||||
"concurrently": "^9.1.0",
|
||||
"eslint": "^8",
|
||||
"eslint-config-next": "15.0.3",
|
||||
"eslint-config-next": "15.0.2",
|
||||
"eslint-plugin-storybook": "^0.11.0",
|
||||
"postcss": "^8",
|
||||
"prettier": "^3.3.3",
|
||||
"prettier-plugin-tailwindcss": "^0.6.8",
|
||||
"storybook": "^8.4.2",
|
||||
"storybook": "^8.4.1",
|
||||
"tailwindcss": "^3.4.14",
|
||||
"typescript": "^5"
|
||||
},
|
||||
|
||||
@@ -4,10 +4,10 @@ import { defineConfig, devices } from "@playwright/test";
|
||||
* Read environment variables from file.
|
||||
* https://github.com/motdotla/dotenv
|
||||
*/
|
||||
import dotenv from "dotenv";
|
||||
import path from "path";
|
||||
dotenv.config({ path: path.resolve(__dirname, ".env") });
|
||||
dotenv.config({ path: path.resolve(__dirname, "../backend/.env") });
|
||||
// import dotenv from 'dotenv';
|
||||
// import path from 'path';
|
||||
// dotenv.config({ path: path.resolve(__dirname, '.env') });
|
||||
|
||||
/**
|
||||
* See https://playwright.dev/docs/test-configuration.
|
||||
*/
|
||||
|
||||
@@ -4,7 +4,6 @@ import React, { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import AutoGPTServerAPI, {
|
||||
GraphMetaWithRuns,
|
||||
ExecutionMeta,
|
||||
Schedule,
|
||||
} from "@/lib/autogpt-server-api";
|
||||
|
||||
import { Card } from "@/components/ui/card";
|
||||
@@ -16,33 +15,17 @@ import {
|
||||
FlowRunsList,
|
||||
FlowRunsStats,
|
||||
} from "@/components/monitor";
|
||||
import { SchedulesTable } from "@/components/monitor/scheduleTable";
|
||||
|
||||
const Monitor = () => {
|
||||
const [flows, setFlows] = useState<GraphMetaWithRuns[]>([]);
|
||||
const [flowRuns, setFlowRuns] = useState<FlowRun[]>([]);
|
||||
const [schedules, setSchedules] = useState<Schedule[]>([]);
|
||||
const [selectedFlow, setSelectedFlow] = useState<GraphMetaWithRuns | null>(
|
||||
null,
|
||||
);
|
||||
const [selectedRun, setSelectedRun] = useState<FlowRun | null>(null);
|
||||
const [sortColumn, setSortColumn] = useState<keyof Schedule>("id");
|
||||
const [sortDirection, setSortDirection] = useState<"asc" | "desc">("asc");
|
||||
|
||||
const api = useMemo(() => new AutoGPTServerAPI(), []);
|
||||
|
||||
const fetchSchedules = useCallback(async () => {
|
||||
setSchedules(await api.listSchedules());
|
||||
}, [api]);
|
||||
|
||||
const removeSchedule = useCallback(
|
||||
async (scheduleId: string) => {
|
||||
const removedSchedule = await api.deleteSchedule(scheduleId);
|
||||
setSchedules(schedules.filter((s) => s.id !== removedSchedule.id));
|
||||
},
|
||||
[schedules, api],
|
||||
);
|
||||
|
||||
const fetchAgents = useCallback(() => {
|
||||
api.listGraphsWithRuns().then((agent) => {
|
||||
setFlows(agent);
|
||||
@@ -59,11 +42,7 @@ const Monitor = () => {
|
||||
|
||||
useEffect(() => {
|
||||
fetchAgents();
|
||||
}, [fetchAgents]);
|
||||
|
||||
useEffect(() => {
|
||||
fetchSchedules();
|
||||
}, [fetchSchedules]);
|
||||
}, [api, fetchAgents]);
|
||||
|
||||
useEffect(() => {
|
||||
const intervalId = setInterval(() => fetchAgents(), 5000);
|
||||
@@ -71,18 +50,9 @@ const Monitor = () => {
|
||||
}, [fetchAgents, flows]);
|
||||
|
||||
const column1 = "md:col-span-2 xl:col-span-3 xxl:col-span-2";
|
||||
const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3";
|
||||
const column2 = "md:col-span-3 lg:col-span-2 xl:col-span-3 space-y-4";
|
||||
const column3 = "col-span-full xl:col-span-4 xxl:col-span-5";
|
||||
|
||||
const handleSort = (column: keyof Schedule) => {
|
||||
if (sortColumn === column) {
|
||||
setSortDirection(sortDirection === "asc" ? "desc" : "asc");
|
||||
} else {
|
||||
setSortColumn(column);
|
||||
setSortDirection("asc");
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="grid grid-cols-1 gap-4 md:grid-cols-5 lg:grid-cols-4 xl:grid-cols-10">
|
||||
<AgentFlowList
|
||||
@@ -131,16 +101,6 @@ const Monitor = () => {
|
||||
<FlowRunsStats flows={flows} flowRuns={flowRuns} />
|
||||
</Card>
|
||||
)}
|
||||
<div className="col-span-full xl:col-span-6">
|
||||
<SchedulesTable
|
||||
schedules={schedules} // all schedules
|
||||
agents={flows} // for filtering purpose
|
||||
onRemoveSchedule={removeSchedule}
|
||||
sortColumn={sortColumn}
|
||||
sortDirection={sortDirection}
|
||||
onSort={handleSort}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -4,14 +4,17 @@ import { useSupabase } from "@/components/SupabaseProvider";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import useUser from "@/hooks/useUser";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { useCallback, useContext, useMemo } from "react";
|
||||
import { useCallback, useContext } from "react";
|
||||
import { FaSpinner } from "react-icons/fa";
|
||||
import { Separator } from "@/components/ui/separator";
|
||||
import { useToast } from "@/components/ui/use-toast";
|
||||
import { IconKey, IconUser } from "@/components/ui/icons";
|
||||
import { LogOutIcon, Trash2Icon } from "lucide-react";
|
||||
import { providerIcons } from "@/components/integrations/credentials-input";
|
||||
import { CredentialsProvidersContext } from "@/components/integrations/credentials-provider";
|
||||
import {
|
||||
CredentialsProviderName,
|
||||
CredentialsProvidersContext,
|
||||
} from "@/components/integrations/credentials-provider";
|
||||
import {
|
||||
Table,
|
||||
TableBody,
|
||||
@@ -20,7 +23,6 @@ import {
|
||||
TableHeader,
|
||||
TableRow,
|
||||
} from "@/components/ui/table";
|
||||
import { CredentialsProviderName } from "@/lib/autogpt-server-api";
|
||||
|
||||
export default function PrivatePage() {
|
||||
const { user, isLoading, error } = useUser();
|
||||
@@ -60,25 +62,7 @@ export default function PrivatePage() {
|
||||
[providers, toast],
|
||||
);
|
||||
|
||||
//TODO: remove when the way system credentials are handled is updated
|
||||
// This contains ids for built-in "Use Credits for X" credentials
|
||||
const hiddenCredentials = useMemo(
|
||||
() => [
|
||||
"fdb7f412-f519-48d1-9b5f-d2f73d0e01fe", // Revid
|
||||
"760f84fc-b270-42de-91f6-08efe1b512d0", // Ideogram
|
||||
"6b9fc200-4726-4973-86c9-cd526f5ce5db", // Replicate
|
||||
"53c25cb8-e3ee-465c-a4d1-e75a4c899c2a", // OpenAI
|
||||
"24e5d942-d9e3-4798-8151-90143ee55629", // Anthropic
|
||||
"4ec22295-8f97-4dd1-b42b-2c6957a02545", // Groq
|
||||
"7f7b0654-c36b-4565-8fa7-9a52575dfae2", // D-ID
|
||||
"7f26de70-ba0d-494e-ba76-238e65e7b45f", // Jina
|
||||
"66f20754-1b81-48e4-91d0-f4f0dd82145f", // Unreal Speech
|
||||
"b5a0e27d-0c98-4df3-a4b9-10193e1f3c40", // Open Router
|
||||
],
|
||||
[],
|
||||
);
|
||||
|
||||
if (isLoading || !providers) {
|
||||
if (isLoading || !providers || !providers) {
|
||||
return (
|
||||
<div className="flex h-[80vh] items-center justify-center">
|
||||
<FaSpinner className="mr-2 h-16 w-16 animate-spin" />
|
||||
@@ -92,15 +76,15 @@ export default function PrivatePage() {
|
||||
}
|
||||
|
||||
const allCredentials = Object.values(providers).flatMap((provider) =>
|
||||
[...provider.savedOAuthCredentials, ...provider.savedApiKeys]
|
||||
.filter((cred) => !hiddenCredentials.includes(cred.id))
|
||||
.map((credentials) => ({
|
||||
[...provider.savedOAuthCredentials, ...provider.savedApiKeys].map(
|
||||
(credentials) => ({
|
||||
...credentials,
|
||||
provider: provider.provider,
|
||||
providerName: provider.providerName,
|
||||
ProviderIcon: providerIcons[provider.provider],
|
||||
TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type],
|
||||
})),
|
||||
}),
|
||||
),
|
||||
);
|
||||
|
||||
return (
|
||||
|
||||
@@ -18,14 +18,7 @@ import {
|
||||
BlockUIType,
|
||||
BlockCost,
|
||||
} from "@/lib/autogpt-server-api/types";
|
||||
import {
|
||||
beautifyString,
|
||||
cn,
|
||||
getValue,
|
||||
hasNonNullNonObjectValue,
|
||||
parseKeys,
|
||||
setNestedProperty,
|
||||
} from "@/lib/utils";
|
||||
import { beautifyString, cn, setNestedProperty } from "@/lib/utils";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Switch } from "@/components/ui/switch";
|
||||
import { history } from "./history";
|
||||
@@ -41,12 +34,9 @@ import NodeOutputs from "./NodeOutputs";
|
||||
import { IconCoin } from "./ui/icons";
|
||||
import * as Separator from "@radix-ui/react-separator";
|
||||
import * as ContextMenu from "@radix-ui/react-context-menu";
|
||||
import {
|
||||
DotsVerticalIcon,
|
||||
TrashIcon,
|
||||
CopyIcon,
|
||||
ExitIcon,
|
||||
} from "@radix-ui/react-icons";
|
||||
import { DotsVerticalIcon, TrashIcon, CopyIcon } from "@radix-ui/react-icons";
|
||||
|
||||
type ParsedKey = { key: string; index?: number };
|
||||
|
||||
export type ConnectionData = Array<{
|
||||
edge_id: string;
|
||||
@@ -102,14 +92,6 @@ export function CustomNode({
|
||||
>();
|
||||
const isInitialSetup = useRef(true);
|
||||
const flowContext = useContext(FlowContext);
|
||||
let nodeFlowId = "";
|
||||
|
||||
if (data.uiType === BlockUIType.AGENT) {
|
||||
// Display the graph's schema instead AgentExecutorBlock's schema.
|
||||
data.inputSchema = data.hardcodedValues?.input_schema || {};
|
||||
data.outputSchema = data.hardcodedValues?.output_schema || {};
|
||||
nodeFlowId = data.hardcodedValues?.graph_id || nodeFlowId;
|
||||
}
|
||||
|
||||
if (!flowContext) {
|
||||
throw new Error("FlowContext consumer must be inside FlowEditor component");
|
||||
@@ -181,6 +163,38 @@ export function CustomNode({
|
||||
if (!schema?.properties) return null;
|
||||
let keys = Object.entries(schema.properties);
|
||||
switch (nodeType) {
|
||||
case BlockUIType.INPUT:
|
||||
// For INPUT blocks, dont include connection handles
|
||||
return keys.map(([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
return (
|
||||
(isRequired || isAdvancedOpen || !isAdvanced) && (
|
||||
<div key={propKey} data-id={`input-handle-${propKey}`}>
|
||||
<span className="text-m green mb-0 text-gray-900">
|
||||
{propSchema.title || beautifyString(propKey)}
|
||||
</span>
|
||||
<div key={propKey}>
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
nodeId={id}
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
errors={data.errors ?? {}}
|
||||
displayName={propSchema.title || beautifyString(propKey)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
case BlockUIType.NOTE:
|
||||
// For NOTE blocks, don't render any input handles
|
||||
const [noteKey, noteSchema] = keys[0];
|
||||
@@ -190,7 +204,7 @@ export function CustomNode({
|
||||
className=""
|
||||
selfKey={noteKey}
|
||||
schema={noteSchema as BlockIOStringSubSchema}
|
||||
value={getValue(noteKey, data.hardcodedValues)}
|
||||
value={getValue(noteKey)}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
error={data.errors?.[noteKey] ?? ""}
|
||||
@@ -199,25 +213,20 @@ export function CustomNode({
|
||||
</div>
|
||||
);
|
||||
|
||||
default:
|
||||
const getInputPropKey = (key: string) =>
|
||||
nodeType == BlockUIType.AGENT ? `data.${key}` : key;
|
||||
|
||||
case BlockUIType.OUTPUT:
|
||||
// For OUTPUT blocks, only show the 'value' property
|
||||
return keys.map(([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
const isConnectable =
|
||||
// No input connection handles for credentials
|
||||
propKey !== "credentials" &&
|
||||
// No input connection handles on INPUT blocks
|
||||
nodeType !== BlockUIType.INPUT &&
|
||||
// For OUTPUT blocks, only show the 'value' (hides 'name') input connection handle
|
||||
!(nodeType == BlockUIType.OUTPUT && propKey == "name");
|
||||
return (
|
||||
(isRequired || isAdvancedOpen || isConnected || !isAdvanced) && (
|
||||
<div key={propKey} data-id={`input-handle-${propKey}`}>
|
||||
{isConnectable ? (
|
||||
(isRequired || isAdvancedOpen || !isAdvanced) && (
|
||||
<div key={propKey} data-id={`output-handle-${propKey}`}>
|
||||
{propKey !== "value" ? (
|
||||
<span className="text-m green mb-0 text-gray-900">
|
||||
{propSchema.title || beautifyString(propKey)}
|
||||
</span>
|
||||
) : (
|
||||
<NodeHandle
|
||||
keyName={propKey}
|
||||
isConnected={isConnected}
|
||||
@@ -225,25 +234,52 @@ export function CustomNode({
|
||||
schema={propSchema}
|
||||
side="left"
|
||||
/>
|
||||
) : (
|
||||
propKey != "credentials" && (
|
||||
<span
|
||||
className="text-m green mb-0 text-gray-900"
|
||||
title={propSchema.description}
|
||||
>
|
||||
{propSchema.title || beautifyString(propKey)}
|
||||
</span>
|
||||
)
|
||||
)}
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
nodeId={id}
|
||||
propKey={getInputPropKey(propKey)}
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(
|
||||
getInputPropKey(propKey),
|
||||
data.hardcodedValues,
|
||||
)}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
errors={data.errors ?? {}}
|
||||
displayName={propSchema.title || beautifyString(propKey)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
default:
|
||||
return keys.map(([propKey, propSchema]) => {
|
||||
const isRequired = data.inputSchema.required?.includes(propKey);
|
||||
const isConnected = isHandleConnected(propKey);
|
||||
const isAdvanced = propSchema.advanced;
|
||||
return (
|
||||
(isRequired || isAdvancedOpen || isConnected || !isAdvanced) && (
|
||||
<div key={propKey} data-id={`input-handle-${propKey}`}>
|
||||
{"credentials_provider" in propSchema ? (
|
||||
<span className="text-m green mb-0 text-gray-900">
|
||||
Credentials
|
||||
</span>
|
||||
) : (
|
||||
<NodeHandle
|
||||
keyName={propKey}
|
||||
isConnected={isConnected}
|
||||
isRequired={isRequired}
|
||||
schema={propSchema}
|
||||
side="left"
|
||||
/>
|
||||
)}
|
||||
{!isConnected && (
|
||||
<NodeGenericInputField
|
||||
nodeId={id}
|
||||
propKey={propKey}
|
||||
propSchema={propSchema}
|
||||
currentValue={getValue(propKey)}
|
||||
connections={data.connections}
|
||||
handleInputChange={handleInputChange}
|
||||
handleInputClick={handleInputClick}
|
||||
@@ -282,6 +318,8 @@ export function CustomNode({
|
||||
current[lastKey.key] = value;
|
||||
}
|
||||
|
||||
// console.log(`Updating hardcoded values for node ${id}:`, newValues);
|
||||
|
||||
if (!isInitialSetup.current) {
|
||||
history.push({
|
||||
type: "UPDATE_INPUT",
|
||||
@@ -298,6 +336,48 @@ export function CustomNode({
|
||||
setErrors({ ...errors });
|
||||
};
|
||||
|
||||
// Helper function to parse keys with array indices
|
||||
//TODO move to utils
|
||||
const parseKeys = (key: string): ParsedKey[] => {
|
||||
const splits = key.split(/_@_|_#_|_\$_|\./);
|
||||
const keys: ParsedKey[] = [];
|
||||
let currentKey: string | null = null;
|
||||
|
||||
splits.forEach((split) => {
|
||||
const isInteger = /^\d+$/.test(split);
|
||||
if (!isInteger) {
|
||||
if (currentKey !== null) {
|
||||
keys.push({ key: currentKey });
|
||||
}
|
||||
currentKey = split;
|
||||
} else {
|
||||
if (currentKey !== null) {
|
||||
keys.push({ key: currentKey, index: parseInt(split, 10) });
|
||||
currentKey = null;
|
||||
} else {
|
||||
throw new Error("Invalid key format: array index without a key");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (currentKey !== null) {
|
||||
keys.push({ key: currentKey });
|
||||
}
|
||||
|
||||
return keys;
|
||||
};
|
||||
|
||||
const getValue = (key: string) => {
|
||||
const keys = parseKeys(key);
|
||||
return keys.reduce((acc, k) => {
|
||||
if (acc === undefined) return undefined;
|
||||
if (k.index !== undefined) {
|
||||
return Array.isArray(acc[k.key]) ? acc[k.key][k.index] : undefined;
|
||||
}
|
||||
return acc[k.key];
|
||||
}, data.hardcodedValues as any);
|
||||
};
|
||||
|
||||
const isHandleConnected = (key: string) => {
|
||||
return (
|
||||
data.connections &&
|
||||
@@ -320,7 +400,7 @@ export function CustomNode({
|
||||
const handleInputClick = (key: string) => {
|
||||
console.debug(`Opening modal for key: ${key}`);
|
||||
setActiveKey(key);
|
||||
const value = getValue(key, data.hardcodedValues);
|
||||
const value = getValue(key);
|
||||
setInputModalValue(
|
||||
typeof value === "object" ? JSON.stringify(value, null, 2) : value,
|
||||
);
|
||||
@@ -388,7 +468,9 @@ export function CustomNode({
|
||||
});
|
||||
}, [id, data, height, addNodes, deleteElements, getNode, getNextNodeId]);
|
||||
|
||||
const hasConfigErrors = data.errors && hasNonNullNonObjectValue(data.errors);
|
||||
const hasConfigErrors =
|
||||
data.errors &&
|
||||
Object.entries(data.errors).some(([_, value]) => value !== null);
|
||||
const outputData = data.executionResults?.at(-1)?.data;
|
||||
const hasOutputError =
|
||||
typeof outputData === "object" &&
|
||||
@@ -398,8 +480,8 @@ export function CustomNode({
|
||||
useEffect(() => {
|
||||
if (hasConfigErrors) {
|
||||
const filteredErrors = Object.fromEntries(
|
||||
Object.entries(data.errors || {}).filter(([, value]) =>
|
||||
hasNonNullNonObjectValue(value),
|
||||
Object.entries(data.errors || {}).filter(
|
||||
([_, value]) => value !== null,
|
||||
),
|
||||
);
|
||||
console.error(
|
||||
@@ -505,15 +587,6 @@ export function CustomNode({
|
||||
<CopyIcon className="mr-2 h-5 w-5" />
|
||||
<span>Copy</span>
|
||||
</ContextMenu.Item>
|
||||
{nodeFlowId && (
|
||||
<ContextMenu.Item
|
||||
onSelect={() => window.open(`/build?flowID=${nodeFlowId}`)}
|
||||
className="flex cursor-pointer items-center rounded-md px-3 py-2 hover:bg-gray-100"
|
||||
>
|
||||
<ExitIcon className="mr-2 h-5 w-5" />
|
||||
<span>Open agent</span>
|
||||
</ContextMenu.Item>
|
||||
)}
|
||||
<ContextMenu.Separator className="my-1 h-px bg-gray-300" />
|
||||
<ContextMenu.Item
|
||||
onSelect={deleteNode}
|
||||
|
||||
@@ -27,7 +27,11 @@ import "@xyflow/react/dist/style.css";
|
||||
import { CustomNode } from "./CustomNode";
|
||||
import "./flow.css";
|
||||
import { BlockUIType, Link } from "@/lib/autogpt-server-api";
|
||||
import { getTypeColor, findNewlyAddedBlockCoordinates } from "@/lib/utils";
|
||||
import {
|
||||
getTypeColor,
|
||||
filterBlocksByType,
|
||||
findNewlyAddedBlockCoordinates,
|
||||
} from "@/lib/utils";
|
||||
import { history } from "./history";
|
||||
import { CustomEdge } from "./CustomEdge";
|
||||
import ConnectionLine from "./ConnectionLine";
|
||||
@@ -44,8 +48,8 @@ import RunnerUIWrapper, {
|
||||
} from "@/components/RunnerUIWrapper";
|
||||
import PrimaryActionBar from "@/components/PrimaryActionButton";
|
||||
import { useToast } from "@/components/ui/use-toast";
|
||||
import { forceLoad } from "@sentry/nextjs";
|
||||
import { useCopyPaste } from "../hooks/useCopyPaste";
|
||||
import { CronScheduler } from "./cronScheduler";
|
||||
|
||||
// This is for the history, this is the minimum distance a block must move before it is logged
|
||||
// It helps to prevent spamming the history with small movements especially when pressing on a input in a block
|
||||
@@ -82,6 +86,8 @@ const FlowEditor: React.FC<{
|
||||
setViewport,
|
||||
} = useReactFlow<CustomNode, CustomEdge>();
|
||||
const [nodeId, setNodeId] = useState<number>(1);
|
||||
const [copiedNodes, setCopiedNodes] = useState<CustomNode[]>([]);
|
||||
const [copiedEdges, setCopiedEdges] = useState<CustomEdge[]>([]);
|
||||
const [isAnyModalOpen, setIsAnyModalOpen] = useState(false);
|
||||
const [visualizeBeads, setVisualizeBeads] = useState<
|
||||
"no" | "static" | "animate"
|
||||
@@ -93,15 +99,11 @@ const FlowEditor: React.FC<{
|
||||
setAgentDescription,
|
||||
savedAgent,
|
||||
availableNodes,
|
||||
availableFlows,
|
||||
getOutputType,
|
||||
requestSave,
|
||||
requestSaveAndRun,
|
||||
requestStopRun,
|
||||
scheduleRunner,
|
||||
isRunning,
|
||||
isScheduling,
|
||||
setIsScheduling,
|
||||
nodes,
|
||||
setNodes,
|
||||
edges,
|
||||
@@ -123,8 +125,6 @@ const FlowEditor: React.FC<{
|
||||
|
||||
const runnerUIRef = useRef<RunnerUIWrapperRef>(null);
|
||||
|
||||
const [openCron, setOpenCron] = useState(false);
|
||||
|
||||
const { toast } = useToast();
|
||||
|
||||
const TUTORIAL_STORAGE_KEY = "shepherd-tour";
|
||||
@@ -152,12 +152,6 @@ const FlowEditor: React.FC<{
|
||||
nodes.length,
|
||||
]);
|
||||
|
||||
useEffect(() => {
|
||||
if (params.get("open_scheduling") === "true") {
|
||||
setOpenCron(true);
|
||||
}
|
||||
}, [params]);
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (event: KeyboardEvent) => {
|
||||
const isMac = navigator.platform.toUpperCase().indexOf("MAC") >= 0;
|
||||
@@ -422,37 +416,8 @@ const FlowEditor: React.FC<{
|
||||
|
||||
const { x, y, zoom } = useViewport();
|
||||
|
||||
// Set the initial view port to center the canvas.
|
||||
useEffect(() => {
|
||||
if (nodes.length <= 0 || x !== 0 || y !== 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const topLeft = { x: Infinity, y: Infinity };
|
||||
const bottomRight = { x: -Infinity, y: -Infinity };
|
||||
|
||||
nodes.forEach((node) => {
|
||||
const { x, y } = node.position;
|
||||
topLeft.x = Math.min(topLeft.x, x);
|
||||
topLeft.y = Math.min(topLeft.y, y);
|
||||
// Rough estimate of the width and height of the node: 500x400.
|
||||
bottomRight.x = Math.max(bottomRight.x, x + 500);
|
||||
bottomRight.y = Math.max(bottomRight.y, y + 400);
|
||||
});
|
||||
|
||||
const centerX = (topLeft.x + bottomRight.x) / 2;
|
||||
const centerY = (topLeft.y + bottomRight.y) / 2;
|
||||
const zoom = 0.8;
|
||||
|
||||
setViewport({
|
||||
x: window.innerWidth / 2 - centerX * zoom,
|
||||
y: window.innerHeight / 2 - centerY * zoom,
|
||||
zoom: zoom,
|
||||
});
|
||||
}, [nodes, setViewport, x, y]);
|
||||
|
||||
const addNode = useCallback(
|
||||
(blockId: string, nodeType: string, hardcodedValues: any = {}) => {
|
||||
(blockId: string, nodeType: string) => {
|
||||
const nodeSchema = availableNodes.find((node) => node.id === blockId);
|
||||
if (!nodeSchema) {
|
||||
console.error(`Schema not found for block ID: ${blockId}`);
|
||||
@@ -475,14 +440,14 @@ const FlowEditor: React.FC<{
|
||||
? // we will get all the dimension of nodes, then store
|
||||
findNewlyAddedBlockCoordinates(
|
||||
nodeDimensions,
|
||||
nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500,
|
||||
60,
|
||||
1.0,
|
||||
(nodeSchema.uiType == BlockUIType.NOTE ? 300 : 500) / zoom,
|
||||
60 / zoom,
|
||||
zoom,
|
||||
)
|
||||
: // we will get all the dimension of nodes, then store
|
||||
{
|
||||
x: window.innerWidth / 2 - x,
|
||||
y: window.innerHeight / 2 - y,
|
||||
x: (window.innerWidth / 2 - x) / zoom,
|
||||
y: (window.innerHeight / 2 - y) / zoom,
|
||||
};
|
||||
|
||||
const newNode: CustomNode = {
|
||||
@@ -497,7 +462,7 @@ const FlowEditor: React.FC<{
|
||||
categories: nodeSchema.categories,
|
||||
inputSchema: nodeSchema.inputSchema,
|
||||
outputSchema: nodeSchema.outputSchema,
|
||||
hardcodedValues: hardcodedValues,
|
||||
hardcodedValues: {},
|
||||
connections: [],
|
||||
isOutputOpen: false,
|
||||
block_id: blockId,
|
||||
@@ -512,10 +477,8 @@ const FlowEditor: React.FC<{
|
||||
|
||||
setViewport(
|
||||
{
|
||||
// Rough estimate of the dimension of the node is: 500x400px.
|
||||
// Though we skip shifting the X, considering the block menu side-bar.
|
||||
x: -viewportCoordinates.x * 0.8 + (window.innerWidth - 0.0) / 2,
|
||||
y: -viewportCoordinates.y * 0.8 + (window.innerHeight - 400) / 2,
|
||||
x: -viewportCoordinates.x * zoom + window.innerWidth / 2,
|
||||
y: -viewportCoordinates.y * zoom + window.innerHeight / 2 - 100,
|
||||
zoom: 0.8,
|
||||
},
|
||||
{ duration: 500 },
|
||||
@@ -538,6 +501,7 @@ const FlowEditor: React.FC<{
|
||||
clearNodesStatusAndOutput,
|
||||
x,
|
||||
y,
|
||||
zoom,
|
||||
],
|
||||
);
|
||||
|
||||
@@ -623,24 +587,6 @@ const FlowEditor: React.FC<{
|
||||
},
|
||||
];
|
||||
|
||||
// This function is called after cron expression is created
|
||||
// So you can collect inputs for scheduling
|
||||
const afterCronCreation = (cronExpression: string) => {
|
||||
runnerUIRef.current?.collectInputsForScheduling(cronExpression);
|
||||
};
|
||||
|
||||
// This function Opens up form for creating cron expression
|
||||
const handleScheduleButton = () => {
|
||||
if (!savedAgent) {
|
||||
toast({
|
||||
title: `Please save the agent using the button in the left sidebar before running it.`,
|
||||
duration: 2000,
|
||||
});
|
||||
return;
|
||||
}
|
||||
setOpenCron(true);
|
||||
};
|
||||
|
||||
return (
|
||||
<FlowContext.Provider
|
||||
value={{ visualizeBeads, setIsAnyModalOpen, getNextNodeId }}
|
||||
@@ -672,7 +618,6 @@ const FlowEditor: React.FC<{
|
||||
pinBlocksPopover={pinBlocksPopover} // Pass the state to BlocksControl
|
||||
blocks={availableNodes}
|
||||
addBlock={addNode}
|
||||
flows={availableFlows}
|
||||
/>
|
||||
}
|
||||
botChildren={
|
||||
@@ -703,28 +648,18 @@ const FlowEditor: React.FC<{
|
||||
requestStopRun();
|
||||
}
|
||||
}}
|
||||
onClickScheduleButton={handleScheduleButton}
|
||||
isScheduling={isScheduling}
|
||||
isDisabled={!savedAgent}
|
||||
isRunning={isRunning}
|
||||
requestStopRun={requestStopRun}
|
||||
runAgentTooltip={!isRunning ? "Run Agent" : "Stop Agent"}
|
||||
/>
|
||||
<CronScheduler
|
||||
afterCronCreation={afterCronCreation}
|
||||
open={openCron}
|
||||
setOpen={setOpenCron}
|
||||
/>
|
||||
</ReactFlow>
|
||||
</div>
|
||||
<RunnerUIWrapper
|
||||
ref={runnerUIRef}
|
||||
nodes={nodes}
|
||||
setNodes={setNodes}
|
||||
setIsScheduling={setIsScheduling}
|
||||
isScheduling={isScheduling}
|
||||
isRunning={isRunning}
|
||||
scheduleRunner={scheduleRunner}
|
||||
requestSaveAndRun={requestSaveAndRun}
|
||||
/>
|
||||
</FlowContext.Provider>
|
||||
|
||||
@@ -1,21 +1,18 @@
|
||||
import React, { useState } from "react";
|
||||
import React from "react";
|
||||
import { Button } from "./ui/button";
|
||||
import { Clock, LogOut, ChevronLeft } from "lucide-react";
|
||||
import { LogOut } from "lucide-react";
|
||||
import { IconPlay, IconSquare } from "@/components/ui/icons";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { FaSpinner } from "react-icons/fa";
|
||||
|
||||
interface PrimaryActionBarProps {
|
||||
onClickAgentOutputs: () => void;
|
||||
onClickRunAgent: () => void;
|
||||
onClickScheduleButton: () => void;
|
||||
isRunning: boolean;
|
||||
isDisabled: boolean;
|
||||
isScheduling: boolean;
|
||||
requestStopRun: () => void;
|
||||
runAgentTooltip: string;
|
||||
}
|
||||
@@ -23,10 +20,8 @@ interface PrimaryActionBarProps {
|
||||
const PrimaryActionBar: React.FC<PrimaryActionBarProps> = ({
|
||||
onClickAgentOutputs,
|
||||
onClickRunAgent,
|
||||
onClickScheduleButton,
|
||||
isRunning,
|
||||
isDisabled,
|
||||
isScheduling,
|
||||
requestStopRun,
|
||||
runAgentTooltip,
|
||||
}) => {
|
||||
@@ -79,30 +74,6 @@ const PrimaryActionBar: React.FC<PrimaryActionBarProps> = ({
|
||||
<p>{runAgentTooltip}</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
<Tooltip key="ScheduleAgent" delayDuration={500}>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
className="flex items-center gap-2"
|
||||
onClick={onClickScheduleButton}
|
||||
size="primary"
|
||||
disabled={isScheduling}
|
||||
variant="outline"
|
||||
data-id="primary-action-schedule-agent"
|
||||
>
|
||||
{isScheduling ? (
|
||||
<FaSpinner className="animate-spin" />
|
||||
) : (
|
||||
<Clock className="hidden h-5 w-5 md:flex" />
|
||||
)}
|
||||
<span className="text-sm font-medium md:text-lg">
|
||||
Schedule Run
|
||||
</span>
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>Schedule this Agent</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -10,55 +10,24 @@ import { Node } from "@xyflow/react";
|
||||
import { filterBlocksByType } from "@/lib/utils";
|
||||
import { BlockIORootSchema, BlockUIType } from "@/lib/autogpt-server-api/types";
|
||||
|
||||
interface HardcodedValues {
|
||||
name: any;
|
||||
description: any;
|
||||
value: any;
|
||||
placeholder_values: any;
|
||||
limit_to_placeholder_values: any;
|
||||
}
|
||||
|
||||
export interface InputItem {
|
||||
id: string;
|
||||
type: "input";
|
||||
inputSchema: BlockIORootSchema;
|
||||
hardcodedValues: HardcodedValues;
|
||||
}
|
||||
|
||||
interface RunnerUIWrapperProps {
|
||||
nodes: Node[];
|
||||
setNodes: React.Dispatch<React.SetStateAction<Node[]>>;
|
||||
setIsScheduling: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
isRunning: boolean;
|
||||
isScheduling: boolean;
|
||||
requestSaveAndRun: () => void;
|
||||
scheduleRunner: (cronExpression: string, input: InputItem[]) => Promise<void>;
|
||||
}
|
||||
|
||||
export interface RunnerUIWrapperRef {
|
||||
openRunnerInput: () => void;
|
||||
openRunnerOutput: () => void;
|
||||
runOrOpenInput: () => void;
|
||||
collectInputsForScheduling: (cronExpression: string) => void;
|
||||
}
|
||||
|
||||
const RunnerUIWrapper = forwardRef<RunnerUIWrapperRef, RunnerUIWrapperProps>(
|
||||
(
|
||||
{
|
||||
nodes,
|
||||
setIsScheduling,
|
||||
setNodes,
|
||||
isScheduling,
|
||||
isRunning,
|
||||
requestSaveAndRun,
|
||||
scheduleRunner,
|
||||
},
|
||||
ref,
|
||||
) => {
|
||||
({ nodes, setNodes, isRunning, requestSaveAndRun }, ref) => {
|
||||
const [isRunnerInputOpen, setIsRunnerInputOpen] = useState(false);
|
||||
const [isRunnerOutputOpen, setIsRunnerOutputOpen] = useState(false);
|
||||
const [scheduledInput, setScheduledInput] = useState(false);
|
||||
const [cronExpression, setCronExpression] = useState("");
|
||||
|
||||
const getBlockInputsAndOutputs = useCallback(() => {
|
||||
const inputBlocks = filterBlocksByType(
|
||||
nodes,
|
||||
@@ -89,6 +58,7 @@ const RunnerUIWrapper = forwardRef<RunnerUIWrapperRef, RunnerUIWrapperProps>(
|
||||
const outputs = outputBlocks.map((node) => ({
|
||||
id: node.id,
|
||||
type: "output" as const,
|
||||
outputSchema: node.data.outputSchema as BlockIORootSchema,
|
||||
hardcodedValues: {
|
||||
name: (node.data.hardcodedValues as any).name || "Output",
|
||||
description:
|
||||
@@ -137,23 +107,10 @@ const RunnerUIWrapper = forwardRef<RunnerUIWrapperRef, RunnerUIWrapperProps>(
|
||||
}
|
||||
};
|
||||
|
||||
const collectInputsForScheduling = (cron_exp: string) => {
|
||||
const { inputs } = getBlockInputsAndOutputs();
|
||||
setCronExpression(cron_exp);
|
||||
|
||||
if (inputs.length > 0) {
|
||||
setScheduledInput(true);
|
||||
setIsRunnerInputOpen(true);
|
||||
} else {
|
||||
scheduleRunner(cron_exp, []);
|
||||
}
|
||||
};
|
||||
|
||||
useImperativeHandle(ref, () => ({
|
||||
openRunnerInput,
|
||||
openRunnerOutput,
|
||||
runOrOpenInput,
|
||||
collectInputsForScheduling,
|
||||
}));
|
||||
|
||||
return (
|
||||
@@ -167,18 +124,6 @@ const RunnerUIWrapper = forwardRef<RunnerUIWrapperRef, RunnerUIWrapperProps>(
|
||||
setIsRunnerInputOpen(false);
|
||||
requestSaveAndRun();
|
||||
}}
|
||||
scheduledInput={scheduledInput}
|
||||
isScheduling={isScheduling}
|
||||
onSchedule={async () => {
|
||||
setIsScheduling(true);
|
||||
await scheduleRunner(
|
||||
cronExpression,
|
||||
getBlockInputsAndOutputs().inputs,
|
||||
);
|
||||
setIsScheduling(false);
|
||||
setIsRunnerInputOpen(false);
|
||||
setScheduledInput(false);
|
||||
}}
|
||||
isRunning={isRunning}
|
||||
/>
|
||||
<RunnerOutputUI
|
||||
|
||||
@@ -37,9 +37,7 @@ export default function SupabaseProvider({
|
||||
if (event === "SIGNED_IN") {
|
||||
api.createUser();
|
||||
}
|
||||
if (event === "SIGNED_OUT") {
|
||||
router.refresh();
|
||||
}
|
||||
router.refresh();
|
||||
});
|
||||
|
||||
return () => {
|
||||
|
||||
@@ -3,9 +3,6 @@ import MarketplaceAPI from "@/lib/marketplace-api";
|
||||
import ServerSideMarketplaceAPI from "@/lib/marketplace-api/server-client";
|
||||
import { revalidatePath } from "next/cache";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { checkAuth, createServerClient } from "@/lib/supabase/server";
|
||||
import { redirect } from "next/navigation";
|
||||
import { createClient } from "@/lib/supabase/client";
|
||||
|
||||
export async function approveAgent(
|
||||
agentId: string,
|
||||
@@ -16,8 +13,6 @@ export async function approveAgent(
|
||||
"approveAgent",
|
||||
{},
|
||||
async () => {
|
||||
await checkAuth();
|
||||
|
||||
const api = new ServerSideMarketplaceAPI();
|
||||
await api.approveAgentSubmission(agentId, version, comment);
|
||||
console.debug(`Approving agent ${agentId}`);
|
||||
@@ -35,7 +30,6 @@ export async function rejectAgent(
|
||||
"rejectAgent",
|
||||
{},
|
||||
async () => {
|
||||
await checkAuth();
|
||||
const api = new ServerSideMarketplaceAPI();
|
||||
await api.rejectAgentSubmission(agentId, version, comment);
|
||||
console.debug(`Rejecting agent ${agentId}`);
|
||||
@@ -49,7 +43,6 @@ export async function getReviewableAgents() {
|
||||
"getReviewableAgents",
|
||||
{},
|
||||
async () => {
|
||||
await checkAuth();
|
||||
const api = new ServerSideMarketplaceAPI();
|
||||
return api.getAgentSubmissions();
|
||||
},
|
||||
@@ -64,7 +57,6 @@ export async function getFeaturedAgents(
|
||||
"getFeaturedAgents",
|
||||
{},
|
||||
async () => {
|
||||
await checkAuth();
|
||||
const api = new ServerSideMarketplaceAPI();
|
||||
const featured = await api.getFeaturedAgents(page, pageSize);
|
||||
console.debug(`Getting featured agents ${featured.items.length}`);
|
||||
@@ -78,7 +70,6 @@ export async function getFeaturedAgent(agentId: string) {
|
||||
"getFeaturedAgent",
|
||||
{},
|
||||
async () => {
|
||||
await checkAuth();
|
||||
const api = new ServerSideMarketplaceAPI();
|
||||
const featured = await api.getFeaturedAgent(agentId);
|
||||
console.debug(`Getting featured agent ${featured.agentId}`);
|
||||
@@ -95,7 +86,6 @@ export async function addFeaturedAgent(
|
||||
"addFeaturedAgent",
|
||||
{},
|
||||
async () => {
|
||||
await checkAuth();
|
||||
const api = new ServerSideMarketplaceAPI();
|
||||
await api.addFeaturedAgent(agentId, categories);
|
||||
console.debug(`Adding featured agent ${agentId}`);
|
||||
@@ -112,7 +102,6 @@ export async function removeFeaturedAgent(
|
||||
"removeFeaturedAgent",
|
||||
{},
|
||||
async () => {
|
||||
await checkAuth();
|
||||
const api = new ServerSideMarketplaceAPI();
|
||||
await api.removeFeaturedAgent(agentId, categories);
|
||||
console.debug(`Removing featured agent ${agentId}`);
|
||||
@@ -126,7 +115,6 @@ export async function getCategories() {
|
||||
"getCategories",
|
||||
{},
|
||||
async () => {
|
||||
await checkAuth();
|
||||
const api = new ServerSideMarketplaceAPI();
|
||||
const categories = await api.getCategories();
|
||||
console.debug(
|
||||
@@ -145,7 +133,6 @@ export async function getNotFeaturedAgents(
|
||||
"getNotFeaturedAgents",
|
||||
{},
|
||||
async () => {
|
||||
await checkAuth();
|
||||
const api = new ServerSideMarketplaceAPI();
|
||||
const agents = await api.getNotFeaturedAgents(page, pageSize);
|
||||
console.debug(`Getting not featured agents ${agents.items.length}`);
|
||||
|
||||
@@ -145,7 +145,7 @@ export const AgentImportForm: React.FC<
|
||||
);
|
||||
if (
|
||||
!["name", "description", "nodes", "links"].every(
|
||||
(key) => key in obj && obj[key] != null,
|
||||
(key) => !!obj[key],
|
||||
)
|
||||
) {
|
||||
throw new Error(
|
||||
|
||||
@@ -1,425 +0,0 @@
|
||||
import { useState } from "react";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@/components/ui/select";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogTitle,
|
||||
DialogTrigger,
|
||||
} from "@/components/ui/dialog";
|
||||
import { Separator } from "./ui/separator";
|
||||
import { CronExpressionManager } from "@/lib/monitor/cronExpressionManager";
|
||||
|
||||
interface CronSchedulerProps {
|
||||
setOpen: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
open: boolean;
|
||||
afterCronCreation: (cronExpression: string) => void;
|
||||
}
|
||||
|
||||
export function CronScheduler({
|
||||
setOpen,
|
||||
open,
|
||||
afterCronCreation,
|
||||
}: CronSchedulerProps) {
|
||||
const [frequency, setFrequency] = useState<
|
||||
"minute" | "hour" | "daily" | "weekly" | "monthly" | "yearly" | "custom"
|
||||
>("minute");
|
||||
const [selectedDays, setSelectedDays] = useState<number[]>([]);
|
||||
const [selectedTime, setSelectedTime] = useState<string>("00:00");
|
||||
const [showCustomDays, setShowCustomDays] = useState<boolean>(false);
|
||||
const [selectedMinute, setSelectedMinute] = useState<string>("0");
|
||||
const [customInterval, setCustomInterval] = useState<{
|
||||
value: number;
|
||||
unit: "minutes" | "hours" | "days";
|
||||
}>({ value: 1, unit: "minutes" });
|
||||
|
||||
// const [endType, setEndType] = useState<"never" | "on" | "after">("never");
|
||||
// const [endDate, setEndDate] = useState<Date | undefined>();
|
||||
// const [occurrences, setOccurrences] = useState<number>(1);
|
||||
|
||||
const weekDays = [
|
||||
{ label: "Su", value: 0 },
|
||||
{ label: "Mo", value: 1 },
|
||||
{ label: "Tu", value: 2 },
|
||||
{ label: "We", value: 3 },
|
||||
{ label: "Th", value: 4 },
|
||||
{ label: "Fr", value: 5 },
|
||||
{ label: "Sa", value: 6 },
|
||||
];
|
||||
|
||||
const months = [
|
||||
{ label: "Jan", value: "January" },
|
||||
{ label: "Feb", value: "February" },
|
||||
{ label: "Mar", value: "March" },
|
||||
{ label: "Apr", value: "April" },
|
||||
{ label: "May", value: "May" },
|
||||
{ label: "Jun", value: "June" },
|
||||
{ label: "Jul", value: "July" },
|
||||
{ label: "Aug", value: "August" },
|
||||
{ label: "Sep", value: "September" },
|
||||
{ label: "Oct", value: "October" },
|
||||
{ label: "Nov", value: "November" },
|
||||
{ label: "Dec", value: "December" },
|
||||
];
|
||||
|
||||
const cron_manager = new CronExpressionManager();
|
||||
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={setOpen}>
|
||||
<DialogTrigger asChild>
|
||||
<Button variant="outline">Schedule</Button>
|
||||
</DialogTrigger>
|
||||
<DialogContent>
|
||||
<DialogTitle>Schedule Task</DialogTitle>
|
||||
<div className="max-w-md space-y-6 p-2">
|
||||
<div className="space-y-4">
|
||||
<Label className="text-base font-medium">Repeat</Label>
|
||||
|
||||
<Select
|
||||
onValueChange={(value: any) => setFrequency(value)}
|
||||
defaultValue="minute"
|
||||
>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder="Select frequency" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="minute">Every Minute</SelectItem>
|
||||
<SelectItem value="hour">Every Hour</SelectItem>
|
||||
<SelectItem value="daily">Daily</SelectItem>
|
||||
<SelectItem value="weekly">Weekly</SelectItem>
|
||||
<SelectItem value="monthly">Monthly</SelectItem>
|
||||
<SelectItem value="yearly">Yearly</SelectItem>
|
||||
<SelectItem value="custom">Custom</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
|
||||
{frequency === "hour" && (
|
||||
<div className="flex items-center gap-2">
|
||||
<Label>At minute</Label>
|
||||
<Select
|
||||
value={selectedMinute}
|
||||
onValueChange={setSelectedMinute}
|
||||
>
|
||||
<SelectTrigger className="w-24">
|
||||
<SelectValue placeholder="Select minute" />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{[0, 15, 30, 45].map((min) => (
|
||||
<SelectItem key={min} value={min.toString()}>
|
||||
{min}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{frequency === "custom" && (
|
||||
<div className="flex items-center gap-2">
|
||||
<Label>Every</Label>
|
||||
<Input
|
||||
type="number"
|
||||
min="1"
|
||||
className="w-20"
|
||||
value={customInterval.value}
|
||||
onChange={(e) =>
|
||||
setCustomInterval({
|
||||
...customInterval,
|
||||
value: parseInt(e.target.value),
|
||||
})
|
||||
}
|
||||
/>
|
||||
<Select
|
||||
value={customInterval.unit}
|
||||
onValueChange={(value: any) =>
|
||||
setCustomInterval({ ...customInterval, unit: value })
|
||||
}
|
||||
>
|
||||
<SelectTrigger className="w-32">
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="minutes">Minutes</SelectItem>
|
||||
<SelectItem value="hours">Hours</SelectItem>
|
||||
<SelectItem value="days">Days</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{frequency === "weekly" && (
|
||||
<div className="space-y-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<Label>On</Label>
|
||||
<Button
|
||||
variant="outline"
|
||||
className="h-8 px-2 py-1 text-xs"
|
||||
onClick={() => {
|
||||
if (selectedDays.length === weekDays.length) {
|
||||
setSelectedDays([]);
|
||||
} else {
|
||||
setSelectedDays(weekDays.map((day) => day.value));
|
||||
}
|
||||
}}
|
||||
>
|
||||
{selectedDays.length === weekDays.length
|
||||
? "Deselect All"
|
||||
: "Select All"}
|
||||
</Button>
|
||||
<Button
|
||||
variant="outline"
|
||||
className="h-8 px-2 py-1 text-xs"
|
||||
onClick={() => setSelectedDays([1, 2, 3, 4, 5])}
|
||||
>
|
||||
Weekdays
|
||||
</Button>
|
||||
<Button
|
||||
variant="outline"
|
||||
className="h-8 px-2 py-1 text-xs"
|
||||
onClick={() => setSelectedDays([0, 6])}
|
||||
>
|
||||
Weekends
|
||||
</Button>
|
||||
</div>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{weekDays.map((day) => (
|
||||
<Button
|
||||
key={day.value}
|
||||
variant={
|
||||
selectedDays.includes(day.value) ? "default" : "outline"
|
||||
}
|
||||
className="h-10 w-10 p-0"
|
||||
onClick={() => {
|
||||
setSelectedDays((prev) =>
|
||||
prev.includes(day.value)
|
||||
? prev.filter((d) => d !== day.value)
|
||||
: [...prev, day.value],
|
||||
);
|
||||
}}
|
||||
>
|
||||
{day.label}
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
{frequency === "monthly" && (
|
||||
<div className="space-y-4">
|
||||
<Label>Days of Month</Label>
|
||||
<div className="flex gap-2">
|
||||
<Button
|
||||
variant={!showCustomDays ? "default" : "outline"}
|
||||
onClick={() => {
|
||||
setShowCustomDays(false);
|
||||
setSelectedDays(
|
||||
Array.from({ length: 31 }, (_, i) => i + 1),
|
||||
);
|
||||
}}
|
||||
>
|
||||
All Days
|
||||
</Button>
|
||||
<Button
|
||||
variant={showCustomDays ? "default" : "outline"}
|
||||
onClick={() => {
|
||||
setShowCustomDays(true);
|
||||
setSelectedDays([]);
|
||||
}}
|
||||
>
|
||||
Customize
|
||||
</Button>
|
||||
<Button variant="outline" onClick={() => setSelectedDays([15])}>
|
||||
15th
|
||||
</Button>
|
||||
<Button variant="outline" onClick={() => setSelectedDays([31])}>
|
||||
Last Day
|
||||
</Button>
|
||||
</div>
|
||||
{showCustomDays && (
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{Array.from({ length: 31 }, (_, i) => (
|
||||
<Button
|
||||
key={i + 1}
|
||||
variant={
|
||||
selectedDays.includes(i + 1) ? "default" : "outline"
|
||||
}
|
||||
className="h-10 w-10 p-0"
|
||||
onClick={() => {
|
||||
setSelectedDays((prev) =>
|
||||
prev.includes(i + 1)
|
||||
? prev.filter((d) => d !== i + 1)
|
||||
: [...prev, i + 1],
|
||||
);
|
||||
}}
|
||||
>
|
||||
{i + 1}
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{frequency === "yearly" && (
|
||||
<div className="space-y-4">
|
||||
<Label>Months</Label>
|
||||
<div className="flex gap-2">
|
||||
<Button
|
||||
variant="outline"
|
||||
className="h-8 px-2 py-1 text-xs"
|
||||
onClick={() => {
|
||||
if (selectedDays.length === months.length) {
|
||||
setSelectedDays([]);
|
||||
} else {
|
||||
setSelectedDays(Array.from({ length: 12 }, (_, i) => i));
|
||||
}
|
||||
}}
|
||||
>
|
||||
{selectedDays.length === months.length
|
||||
? "Deselect All"
|
||||
: "Select All"}
|
||||
</Button>
|
||||
</div>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{months.map((month, index) => (
|
||||
<Button
|
||||
key={index}
|
||||
variant={
|
||||
selectedDays.includes(index) ? "default" : "outline"
|
||||
}
|
||||
className="px-2 py-1"
|
||||
onClick={() => {
|
||||
setSelectedDays((prev) =>
|
||||
prev.includes(index)
|
||||
? prev.filter((m) => m !== index)
|
||||
: [...prev, index],
|
||||
);
|
||||
}}
|
||||
>
|
||||
{month.label}
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{frequency !== "minute" && frequency !== "hour" && (
|
||||
<div className="flex items-center gap-4 space-y-2">
|
||||
<Label className="pt-2">At</Label>
|
||||
<Input
|
||||
type="time"
|
||||
value={selectedTime}
|
||||
onChange={(e) => setSelectedTime(e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<Separator />
|
||||
{/*
|
||||
|
||||
On the backend, we are using standard cron expressions,
|
||||
which makes it challenging to add an end date or stop execution
|
||||
after a certain time using only cron expressions.
|
||||
(since standard cron expressions have limitations, like the lack of a year field or more...).
|
||||
|
||||
We could also use ranges in cron expression for end dates but It doesm't cover all cases (sometimes break)
|
||||
|
||||
To automatically end the scheduler, we need to store the end date and time occurrence in the database
|
||||
and modify scheduler.add_job. Currently, we can only stop the scheduler manually from the monitor tab.
|
||||
|
||||
*/}
|
||||
|
||||
{/* <div className="space-y-6">
|
||||
<Label className="text-lg font-medium">Ends</Label>
|
||||
<RadioGroup
|
||||
value={endType}
|
||||
onValueChange={(value: "never" | "on" | "after") =>
|
||||
setEndType(value)
|
||||
}
|
||||
>
|
||||
<div className="flex items-center space-x-2">
|
||||
<RadioGroupItem value="never" id="never" />
|
||||
<Label htmlFor="never">Never</Label>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center space-x-2">
|
||||
<RadioGroupItem value="on" id="on" />
|
||||
<Label htmlFor="on" className="w-[50px]">
|
||||
On
|
||||
</Label>
|
||||
<Popover>
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="outline"
|
||||
className="w-full"
|
||||
disabled={endType !== "on"}
|
||||
>
|
||||
<CalendarIcon className="mr-2 h-4 w-4" />
|
||||
{endDate ? format(endDate, "PPP") : "Pick a date"}
|
||||
</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent className="w-auto p-0">
|
||||
<Calendar
|
||||
mode="single"
|
||||
selected={endDate}
|
||||
onSelect={setEndDate}
|
||||
disabled={(date) => date < new Date()}
|
||||
fromDate={new Date()}
|
||||
/>
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
</div>
|
||||
<div className="flex items-center space-x-2">
|
||||
<RadioGroupItem value="after" id="after" />
|
||||
<Label htmlFor="after" className="ml-2 w-[50px]">
|
||||
After
|
||||
</Label>
|
||||
<Input
|
||||
type="number"
|
||||
className="ml-2 w-[100px]"
|
||||
value={occurrences}
|
||||
onChange={(e) => setOccurrences(Number(e.target.value))}
|
||||
/>
|
||||
<span>times</span>
|
||||
</div>
|
||||
</RadioGroup>
|
||||
</div> */}
|
||||
|
||||
<div className="flex justify-end space-x-2">
|
||||
<Button variant="outline" onClick={() => setOpen(false)}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
onClick={() => {
|
||||
const cronExpr = cron_manager.generateCronExpression(
|
||||
frequency,
|
||||
selectedTime,
|
||||
selectedDays,
|
||||
selectedMinute,
|
||||
customInterval,
|
||||
);
|
||||
setFrequency("minute");
|
||||
setSelectedDays([]);
|
||||
setSelectedTime("00:00");
|
||||
setShowCustomDays(false);
|
||||
setSelectedMinute("0");
|
||||
setCustomInterval({ value: 1, unit: "minutes" });
|
||||
setOpen(false);
|
||||
afterCronCreation(cronExpr);
|
||||
}}
|
||||
>
|
||||
Done
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { useState, useCallback } from "react";
|
||||
import React, { useState } from "react";
|
||||
import { Card, CardContent, CardHeader } from "@/components/ui/card";
|
||||
import { Label } from "@/components/ui/label";
|
||||
import { Button } from "@/components/ui/button";
|
||||
@@ -10,7 +10,7 @@ import {
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
} from "@/components/ui/popover";
|
||||
import { Block, BlockUIType, SpecialBlockID } from "@/lib/autogpt-server-api";
|
||||
import { Block } from "@/lib/autogpt-server-api";
|
||||
import { MagnifyingGlassIcon, PlusIcon } from "@radix-ui/react-icons";
|
||||
import { IconToyBrick } from "@/components/ui/icons";
|
||||
import { getPrimaryCategoryColor } from "@/lib/utils";
|
||||
@@ -19,17 +19,11 @@ import {
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { GraphMeta } from "@/lib/autogpt-server-api";
|
||||
|
||||
interface BlocksControlProps {
|
||||
blocks: Block[];
|
||||
addBlock: (
|
||||
id: string,
|
||||
name: string,
|
||||
hardcodedValues: Record<string, any>,
|
||||
) => void;
|
||||
addBlock: (id: string, name: string) => void;
|
||||
pinBlocksPopover: boolean;
|
||||
flows: GraphMeta[];
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -45,42 +39,29 @@ export const BlocksControl: React.FC<BlocksControlProps> = ({
|
||||
blocks,
|
||||
addBlock,
|
||||
pinBlocksPopover,
|
||||
flows,
|
||||
}) => {
|
||||
const blockList = blocks.sort((a, b) => a.name.localeCompare(b.name));
|
||||
const [searchQuery, setSearchQuery] = useState("");
|
||||
const [selectedCategory, setSelectedCategory] = useState<string | null>(null);
|
||||
const [filteredBlocks, setFilteredBlocks] = useState<Block[]>(blockList);
|
||||
|
||||
const getFilteredBlockList = (): Block[] => {
|
||||
const blockList = blocks
|
||||
.filter((b) => b.uiType !== BlockUIType.AGENT)
|
||||
.sort((a, b) => a.name.localeCompare(b.name));
|
||||
const agentList = flows.map(
|
||||
(flow) =>
|
||||
({
|
||||
id: SpecialBlockID.AGENT,
|
||||
name: flow.name,
|
||||
description:
|
||||
`Ver.${flow.version}` +
|
||||
(flow.description ? ` | ${flow.description}` : ""),
|
||||
categories: [{ category: "AGENT", description: "" }],
|
||||
inputSchema: flow.input_schema,
|
||||
outputSchema: flow.output_schema,
|
||||
staticOutput: false,
|
||||
uiType: BlockUIType.AGENT,
|
||||
uiKey: flow.id,
|
||||
costs: [],
|
||||
hardcodedValues: {
|
||||
graph_id: flow.id,
|
||||
graph_version: flow.version,
|
||||
input_schema: flow.input_schema,
|
||||
output_schema: flow.output_schema,
|
||||
},
|
||||
}) as Block,
|
||||
);
|
||||
const resetFilters = React.useCallback(() => {
|
||||
setSearchQuery("");
|
||||
setSelectedCategory(null);
|
||||
setFilteredBlocks(blockList);
|
||||
}, [blockList]);
|
||||
|
||||
return blockList
|
||||
.concat(agentList)
|
||||
.filter(
|
||||
// Extract unique categories from blocks
|
||||
const categories = Array.from(
|
||||
new Set([
|
||||
null,
|
||||
...blocks.flatMap((block) => block.categories.map((cat) => cat.category)),
|
||||
]),
|
||||
);
|
||||
|
||||
React.useEffect(() => {
|
||||
setFilteredBlocks(
|
||||
blockList.filter(
|
||||
(block: Block) =>
|
||||
(block.name.toLowerCase().includes(searchQuery.toLowerCase()) ||
|
||||
beautifyString(block.name)
|
||||
@@ -88,23 +69,9 @@ export const BlocksControl: React.FC<BlocksControlProps> = ({
|
||||
.includes(searchQuery.toLowerCase())) &&
|
||||
(!selectedCategory ||
|
||||
block.categories.some((cat) => cat.category === selectedCategory)),
|
||||
);
|
||||
};
|
||||
|
||||
const resetFilters = React.useCallback(() => {
|
||||
setSearchQuery("");
|
||||
setSelectedCategory(null);
|
||||
}, []);
|
||||
|
||||
// Extract unique categories from blocks
|
||||
const categories = Array.from(
|
||||
new Set([
|
||||
null,
|
||||
...blocks
|
||||
.flatMap((block) => block.categories.map((cat) => cat.category))
|
||||
.sort(),
|
||||
]),
|
||||
);
|
||||
),
|
||||
);
|
||||
}, [blockList, searchQuery, selectedCategory]);
|
||||
|
||||
return (
|
||||
<Popover
|
||||
@@ -183,14 +150,12 @@ export const BlocksControl: React.FC<BlocksControlProps> = ({
|
||||
className="h-[60vh] w-fit w-full"
|
||||
data-id="blocks-control-scroll-area"
|
||||
>
|
||||
{getFilteredBlockList().map((block) => (
|
||||
{filteredBlocks.map((block) => (
|
||||
<Card
|
||||
key={block.uiKey || block.id}
|
||||
key={block.id}
|
||||
className="m-2 my-4 flex h-20 cursor-pointer shadow-none hover:shadow-lg"
|
||||
data-id={`block-card-${block.id}`}
|
||||
onClick={() =>
|
||||
addBlock(block.id, block.name, block?.hardcodedValues || {})
|
||||
}
|
||||
onClick={() => addBlock(block.id, block.name)}
|
||||
>
|
||||
<div
|
||||
className={`-ml-px h-full w-3 rounded-l-xl ${getPrimaryCategoryColor(block.categories)}`}
|
||||
@@ -205,10 +170,7 @@ export const BlocksControl: React.FC<BlocksControlProps> = ({
|
||||
{beautifyString(block.name).replace(/ Block$/, "")}
|
||||
</span>
|
||||
<span className="block break-words text-xs font-normal text-gray-500">
|
||||
{/* Cap description at 100 characters max */}
|
||||
{block.description?.length > 100
|
||||
? block.description.slice(0, 100) + "..."
|
||||
: block.description}
|
||||
{block.description}
|
||||
</span>
|
||||
</div>
|
||||
<div
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user