diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 98a69ae40b..2895f601ba 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -21,3 +21,16 @@ Here is a list of our critical paths, if you need some inspiration on what and h
- Upload agent to marketplace
- Import an agent from marketplace and confirm it executes correctly
- Edit an agent from monitor, and confirm it executes correctly
+
+### Configuration Changes 📝
+> [!NOTE]
+Only for the new autogpt platform, currently in autogpt_platform/
+
+If you're making configuration or infrastructure changes, please remember to check you've updated the related infrastructure code in the autogpt_platform/infra folder.
+
+Examples of such changes might include:
+
+- Changing ports
+- Adding new services that need to communicate with each other
+- Secrets or environment variable changes
+- New or infrastructure changes such as databases
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000..68b6fc2b7c
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,179 @@
+version: 2
+updates:
+ # autogpt_libs (Poetry project)
+ - package-ecosystem: "pip"
+ directory: "autogpt_platform/autogpt_libs"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 10
+ target-branch: "dev"
+ groups:
+ production-dependencies:
+ dependency-type: "production"
+ update-types:
+ - "minor"
+ - "patch"
+ development-dependencies:
+ dependency-type: "development"
+ update-types:
+ - "minor"
+ - "patch"
+
+ # backend (Poetry project)
+ - package-ecosystem: "pip"
+ directory: "autogpt_platform/backend"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 10
+ target-branch: "dev"
+ groups:
+ production-dependencies:
+ dependency-type: "production"
+ update-types:
+ - "minor"
+ - "patch"
+ development-dependencies:
+ dependency-type: "development"
+ update-types:
+ - "minor"
+ - "patch"
+
+
+ # frontend (Next.js project)
+ - package-ecosystem: "npm"
+ directory: "autogpt_platform/frontend"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 10
+ target-branch: "dev"
+ groups:
+ production-dependencies:
+ dependency-type: "production"
+ update-types:
+ - "minor"
+ - "patch"
+ development-dependencies:
+ dependency-type: "development"
+ update-types:
+ - "minor"
+ - "patch"
+
+
+ # infra (Terraform)
+ - package-ecosystem: "terraform"
+ directory: "autogpt_platform/infra"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 5
+ target-branch: "dev"
+ groups:
+ production-dependencies:
+ dependency-type: "production"
+ update-types:
+ - "minor"
+ - "patch"
+ development-dependencies:
+ dependency-type: "development"
+ update-types:
+ - "minor"
+ - "patch"
+
+
+ # market (Poetry project)
+ - package-ecosystem: "pip"
+ directory: "autogpt_platform/market"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 10
+ target-branch: "dev"
+ groups:
+ production-dependencies:
+ dependency-type: "production"
+ update-types:
+ - "minor"
+ - "patch"
+ development-dependencies:
+ dependency-type: "development"
+ update-types:
+ - "minor"
+ - "patch"
+
+
+ # GitHub Actions
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 5
+ target-branch: "dev"
+ groups:
+ production-dependencies:
+ dependency-type: "production"
+ update-types:
+ - "minor"
+ - "patch"
+ development-dependencies:
+ dependency-type: "development"
+ update-types:
+ - "minor"
+ - "patch"
+
+
+ # Docker
+ - package-ecosystem: "docker"
+ directory: "autogpt_platform/"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 5
+ target-branch: "dev"
+ groups:
+ production-dependencies:
+ dependency-type: "production"
+ update-types:
+ - "minor"
+ - "patch"
+ development-dependencies:
+ dependency-type: "development"
+ update-types:
+ - "minor"
+ - "patch"
+
+
+ # Submodules
+ - package-ecosystem: "gitsubmodule"
+ directory: "autogpt_platform/supabase"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 1
+ target-branch: "dev"
+ groups:
+ production-dependencies:
+ dependency-type: "production"
+ update-types:
+ - "minor"
+ - "patch"
+ development-dependencies:
+ dependency-type: "development"
+ update-types:
+ - "minor"
+ - "patch"
+
+
+ # Docs
+ - package-ecosystem: 'pip'
+ directory: "docs/"
+ schedule:
+ interval: "weekly"
+ open-pull-requests-limit: 1
+ target-branch: "dev"
+ groups:
+ production-dependencies:
+ dependency-type: "production"
+ update-types:
+ - "minor"
+ - "patch"
+ development-dependencies:
+ dependency-type: "development"
+ update-types:
+ - "minor"
+ - "patch"
diff --git a/.github/labeler.yml b/.github/labeler.yml
index 9b843c2670..8d23469838 100644
--- a/.github/labeler.yml
+++ b/.github/labeler.yml
@@ -25,3 +25,8 @@ platform/frontend:
platform/backend:
- changed-files:
- any-glob-to-any-file: autogpt_platform/backend/**
+ - all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**'
+
+platform/blocks:
+- changed-files:
+ - any-glob-to-any-file: autogpt_platform/backend/backend/blocks/**
diff --git a/.github/workflows/classic-autogpt-ci.yml b/.github/workflows/classic-autogpt-ci.yml
index be19992e34..e549da8ae0 100644
--- a/.github/workflows/classic-autogpt-ci.yml
+++ b/.github/workflows/classic-autogpt-ci.yml
@@ -2,12 +2,12 @@ name: Classic - AutoGPT CI
on:
push:
- branches: [ master, development, ci-test* ]
+ branches: [ master, dev, ci-test* ]
paths:
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
pull_request:
- branches: [ master, development, release-* ]
+ branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-autogpt-ci.yml'
- 'classic/original_autogpt/**'
diff --git a/.github/workflows/classic-autogpt-docker-ci.yml b/.github/workflows/classic-autogpt-docker-ci.yml
index ae7279cf13..31689ebcb7 100644
--- a/.github/workflows/classic-autogpt-docker-ci.yml
+++ b/.github/workflows/classic-autogpt-docker-ci.yml
@@ -8,7 +8,7 @@ on:
- 'classic/original_autogpt/**'
- 'classic/forge/**'
pull_request:
- branches: [ master, development, release-* ]
+ branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-autogpt-docker-ci.yml'
- 'classic/original_autogpt/**'
diff --git a/.github/workflows/classic-autogpts-ci.yml b/.github/workflows/classic-autogpts-ci.yml
index 4aff154e76..21bc945c00 100644
--- a/.github/workflows/classic-autogpts-ci.yml
+++ b/.github/workflows/classic-autogpts-ci.yml
@@ -5,7 +5,7 @@ on:
schedule:
- cron: '0 8 * * *'
push:
- branches: [ master, development, ci-test* ]
+ branches: [ master, dev, ci-test* ]
paths:
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
@@ -16,7 +16,7 @@ on:
- 'classic/setup.py'
- '!**/*.md'
pull_request:
- branches: [ master, development, release-* ]
+ branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-autogpts-ci.yml'
- 'classic/original_autogpt/**'
diff --git a/.github/workflows/classic-benchmark-ci.yml b/.github/workflows/classic-benchmark-ci.yml
index d6914083ef..03ebe10fdd 100644
--- a/.github/workflows/classic-benchmark-ci.yml
+++ b/.github/workflows/classic-benchmark-ci.yml
@@ -2,13 +2,13 @@ name: Classic - AGBenchmark CI
on:
push:
- branches: [ master, development, ci-test* ]
+ branches: [ master, dev, ci-test* ]
paths:
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
- .github/workflows/classic-benchmark-ci.yml
pull_request:
- branches: [ master, development, release-* ]
+ branches: [ master, dev, release-* ]
paths:
- 'classic/benchmark/**'
- '!classic/benchmark/reports/**'
diff --git a/.github/workflows/classic-forge-ci.yml b/.github/workflows/classic-forge-ci.yml
index 6b81214e78..4642f57521 100644
--- a/.github/workflows/classic-forge-ci.yml
+++ b/.github/workflows/classic-forge-ci.yml
@@ -2,13 +2,13 @@ name: Classic - Forge CI
on:
push:
- branches: [ master, development, ci-test* ]
+ branches: [ master, dev, ci-test* ]
paths:
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
- branches: [ master, development, release-* ]
+ branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-forge-ci.yml'
- 'classic/forge/**'
diff --git a/.github/workflows/classic-frontend-ci.yml b/.github/workflows/classic-frontend-ci.yml
index f84259a262..fca919ee2c 100644
--- a/.github/workflows/classic-frontend-ci.yml
+++ b/.github/workflows/classic-frontend-ci.yml
@@ -49,7 +49,7 @@ jobs:
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
if: github.event_name == 'push'
- uses: peter-evans/create-pull-request@v6
+ uses: peter-evans/create-pull-request@v7
with:
add-paths: classic/frontend/build/web
base: ${{ github.ref_name }}
diff --git a/.github/workflows/classic-python-checks.yml b/.github/workflows/classic-python-checks.yml
index b6519348d3..59d90a7fae 100644
--- a/.github/workflows/classic-python-checks.yml
+++ b/.github/workflows/classic-python-checks.yml
@@ -2,7 +2,7 @@ name: Classic - Python checks
on:
push:
- branches: [ master, development, ci-test* ]
+ branches: [ master, dev, ci-test* ]
paths:
- '.github/workflows/classic-python-checks-ci.yml'
- 'classic/original_autogpt/**'
@@ -11,7 +11,7 @@ on:
- '**.py'
- '!classic/forge/tests/vcr_cassettes'
pull_request:
- branches: [ master, development, release-* ]
+ branches: [ master, dev, release-* ]
paths:
- '.github/workflows/classic-python-checks-ci.yml'
- 'classic/original_autogpt/**'
diff --git a/.github/workflows/platform-autgpt-deploy-prod.yml b/.github/workflows/platform-autgpt-deploy-prod.yml
new file mode 100644
index 0000000000..ffd87eacf8
--- /dev/null
+++ b/.github/workflows/platform-autgpt-deploy-prod.yml
@@ -0,0 +1,182 @@
+name: AutoGPT Platform - Build, Push, and Deploy Prod Environment
+
+on:
+ release:
+ types: [published]
+
+permissions:
+ contents: 'read'
+ id-token: 'write'
+
+env:
+ PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }}
+ GKE_CLUSTER: prod-gke-cluster
+ GKE_ZONE: us-central1-a
+ NAMESPACE: prod-agpt
+
+jobs:
+ migrate:
+ environment: production
+ name: Run migrations for AutoGPT Platform
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.11'
+
+ - name: Install Python dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install prisma
+
+ - name: Run Backend Migrations
+ working-directory: ./autogpt_platform/backend
+ run: |
+ python -m prisma migrate deploy
+ env:
+ DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
+
+ - name: Run Market Migrations
+ working-directory: ./autogpt_platform/market
+ run: |
+ python -m prisma migrate deploy
+ env:
+ DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
+
+ build-push-deploy:
+ environment: production
+ name: Build, Push, and Deploy
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+
+ - id: 'auth'
+ uses: 'google-github-actions/auth@v2'
+ with:
+ workload_identity_provider: 'projects/1021527134101/locations/global/workloadIdentityPools/prod-pool/providers/github'
+ service_account: 'prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com'
+ token_format: 'access_token'
+ create_credentials_file: true
+
+ - name: 'Set up Cloud SDK'
+ uses: 'google-github-actions/setup-gcloud@v2'
+
+ - name: 'Configure Docker'
+ run: |
+ gcloud auth configure-docker us-east1-docker.pkg.dev
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Cache Docker layers
+ uses: actions/cache@v4
+ with:
+ path: /tmp/.buildx-cache
+ key: ${{ runner.os }}-buildx-${{ github.sha }}
+ restore-keys: |
+ ${{ runner.os }}-buildx-
+
+ - name: Check for changes
+ id: check_changes
+ run: |
+ git fetch origin master
+ BACKEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false")
+ FRONTEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false")
+ MARKET_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false")
+ echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT
+ echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT
+ echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT
+
+ - name: Get GKE credentials
+ uses: 'google-github-actions/get-gke-credentials@v2'
+ with:
+ cluster_name: ${{ env.GKE_CLUSTER }}
+ location: ${{ env.GKE_ZONE }}
+
+ - name: Build and Push Backend
+ if: steps.check_changes.outputs.backend_changed == 'true'
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: ./autogpt_platform/backend/Dockerfile
+ push: true
+ tags: us-east1-docker.pkg.dev/agpt-prod/agpt-backend-prod/agpt-backend-prod:${{ github.sha }}
+ cache-from: type=local,src=/tmp/.buildx-cache
+ cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
+
+ - name: Build and Push Frontend
+ if: steps.check_changes.outputs.frontend_changed == 'true'
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: ./autogpt_platform/frontend/Dockerfile
+ push: true
+ tags: us-east1-docker.pkg.dev/agpt-prod/agpt-frontend-prod/agpt-frontend-prod:${{ github.sha }}
+ cache-from: type=local,src=/tmp/.buildx-cache
+ cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
+
+ - name: Build and Push Market
+ if: steps.check_changes.outputs.market_changed == 'true'
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: ./autogpt_platform/market/Dockerfile
+ push: true
+ tags: us-east1-docker.pkg.dev/agpt-prod/agpt-market-prod/agpt-market-prod:${{ github.sha }}
+ cache-from: type=local,src=/tmp/.buildx-cache
+ cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
+
+ - name: Move cache
+ run: |
+ rm -rf /tmp/.buildx-cache
+ mv /tmp/.buildx-cache-new /tmp/.buildx-cache
+
+ - name: Set up Helm
+ uses: azure/setup-helm@v4
+ with:
+ version: v3.4.0
+
+ - name: Deploy Backend
+ if: steps.check_changes.outputs.backend_changed == 'true'
+ run: |
+ helm upgrade autogpt-server ./autogpt-server \
+ --namespace ${{ env.NAMESPACE }} \
+ -f autogpt-server/values.yaml \
+ -f autogpt-server/values.prod.yaml \
+ --set image.tag=${{ github.sha }}
+
+ - name: Deploy Websocket
+ if: steps.check_changes.outputs.backend_changed == 'true'
+ run: |
+ helm upgrade autogpt-websocket-server ./autogpt-websocket-server \
+ --namespace ${{ env.NAMESPACE }} \
+ -f autogpt-websocket-server/values.yaml \
+ -f autogpt-websocket-server/values.prod.yaml \
+ --set image.tag=${{ github.sha }}
+
+ - name: Deploy Market
+ if: steps.check_changes.outputs.market_changed == 'true'
+ run: |
+ helm upgrade autogpt-market ./autogpt-market \
+ --namespace ${{ env.NAMESPACE }} \
+ -f autogpt-market/values.yaml \
+ -f autogpt-market/values.prod.yaml \
+ --set image.tag=${{ github.sha }}
+
+ - name: Deploy Frontend
+ if: steps.check_changes.outputs.frontend_changed == 'true'
+ run: |
+ helm upgrade autogpt-builder ./autogpt-builder \
+ --namespace ${{ env.NAMESPACE }} \
+ -f autogpt-builder/values.yaml \
+ -f autogpt-builder/values.prod.yaml \
+ --set image.tag=${{ github.sha }}
diff --git a/.github/workflows/platform-autogpt-deploy.yaml b/.github/workflows/platform-autogpt-deploy.yaml
new file mode 100644
index 0000000000..e05d442986
--- /dev/null
+++ b/.github/workflows/platform-autogpt-deploy.yaml
@@ -0,0 +1,186 @@
+name: AutoGPT Platform - Build, Push, and Deploy Dev Environment
+
+on:
+ push:
+ branches: [ dev ]
+ paths:
+ - 'autogpt_platform/backend/**'
+ - 'autogpt_platform/frontend/**'
+ - 'autogpt_platform/market/**'
+
+permissions:
+ contents: 'read'
+ id-token: 'write'
+
+env:
+ PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }}
+ GKE_CLUSTER: dev-gke-cluster
+ GKE_ZONE: us-central1-a
+ NAMESPACE: dev-agpt
+
+jobs:
+ migrate:
+ environment: develop
+ name: Run migrations for AutoGPT Platform
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v2
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.11'
+
+ - name: Install Python dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install prisma
+
+ - name: Run Backend Migrations
+ working-directory: ./autogpt_platform/backend
+ run: |
+ python -m prisma migrate deploy
+ env:
+ DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
+
+ - name: Run Market Migrations
+ working-directory: ./autogpt_platform/market
+ run: |
+ python -m prisma migrate deploy
+ env:
+ DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
+
+ build-push-deploy:
+ name: Build, Push, and Deploy
+ needs: migrate
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+
+ - id: 'auth'
+ uses: 'google-github-actions/auth@v2'
+ with:
+ workload_identity_provider: 'projects/638488734936/locations/global/workloadIdentityPools/dev-pool/providers/github'
+ service_account: 'dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com'
+ token_format: 'access_token'
+ create_credentials_file: true
+
+ - name: 'Set up Cloud SDK'
+ uses: 'google-github-actions/setup-gcloud@v2'
+
+ - name: 'Configure Docker'
+ run: |
+ gcloud auth configure-docker us-east1-docker.pkg.dev
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Cache Docker layers
+ uses: actions/cache@v4
+ with:
+ path: /tmp/.buildx-cache
+ key: ${{ runner.os }}-buildx-${{ github.sha }}
+ restore-keys: |
+ ${{ runner.os }}-buildx-
+
+ - name: Check for changes
+ id: check_changes
+ run: |
+ git fetch origin dev
+ BACKEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false")
+ FRONTEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false")
+ MARKET_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false")
+ echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT
+ echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT
+ echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT
+
+ - name: Get GKE credentials
+ uses: 'google-github-actions/get-gke-credentials@v2'
+ with:
+ cluster_name: ${{ env.GKE_CLUSTER }}
+ location: ${{ env.GKE_ZONE }}
+
+ - name: Build and Push Backend
+ if: steps.check_changes.outputs.backend_changed == 'true'
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: ./autogpt_platform/backend/Dockerfile
+ push: true
+ tags: us-east1-docker.pkg.dev/agpt-dev/agpt-backend-dev/agpt-backend-dev:${{ github.sha }}
+ cache-from: type=local,src=/tmp/.buildx-cache
+ cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
+
+ - name: Build and Push Frontend
+ if: steps.check_changes.outputs.frontend_changed == 'true'
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: ./autogpt_platform/frontend/Dockerfile
+ push: true
+ tags: us-east1-docker.pkg.dev/agpt-dev/agpt-frontend-dev/agpt-frontend-dev:${{ github.sha }}
+ cache-from: type=local,src=/tmp/.buildx-cache
+ cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
+
+ - name: Build and Push Market
+ if: steps.check_changes.outputs.market_changed == 'true'
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: ./autogpt_platform/market/Dockerfile
+ push: true
+ tags: us-east1-docker.pkg.dev/agpt-dev/agpt-market-dev/agpt-market-dev:${{ github.sha }}
+ cache-from: type=local,src=/tmp/.buildx-cache
+ cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
+
+ - name: Move cache
+ run: |
+ rm -rf /tmp/.buildx-cache
+ mv /tmp/.buildx-cache-new /tmp/.buildx-cache
+
+ - name: Set up Helm
+ uses: azure/setup-helm@v4
+ with:
+ version: v3.4.0
+
+ - name: Deploy Backend
+ if: steps.check_changes.outputs.backend_changed == 'true'
+ run: |
+ helm upgrade autogpt-server ./autogpt-server \
+ --namespace ${{ env.NAMESPACE }} \
+ -f autogpt-server/values.yaml \
+ -f autogpt-server/values.dev.yaml \
+ --set image.tag=${{ github.sha }}
+
+ - name: Deploy Websocket
+ if: steps.check_changes.outputs.backend_changed == 'true'
+ run: |
+ helm upgrade autogpt-websocket-server ./autogpt-websocket-server \
+ --namespace ${{ env.NAMESPACE }} \
+ -f autogpt-websocket-server/values.yaml \
+ -f autogpt-websocket-server/values.dev.yaml \
+ --set image.tag=${{ github.sha }}
+
+ - name: Deploy Market
+ if: steps.check_changes.outputs.market_changed == 'true'
+ run: |
+ helm upgrade autogpt-market ./autogpt-market \
+ --namespace ${{ env.NAMESPACE }} \
+ -f autogpt-market/values.yaml \
+ -f autogpt-market/values.dev.yaml \
+ --set image.tag=${{ github.sha }}
+
+ - name: Deploy Frontend
+ if: steps.check_changes.outputs.frontend_changed == 'true'
+ run: |
+ helm upgrade autogpt-builder ./autogpt-builder \
+ --namespace ${{ env.NAMESPACE }} \
+ -f autogpt-builder/values.yaml \
+ -f autogpt-builder/values.dev.yaml \
+ --set image.tag=${{ github.sha }}
\ No newline at end of file
diff --git a/.github/workflows/platform-autogpt-infra-ci.yml b/.github/workflows/platform-autogpt-infra-ci.yml
index c61094582f..b2d34ac0ab 100644
--- a/.github/workflows/platform-autogpt-infra-ci.yml
+++ b/.github/workflows/platform-autogpt-infra-ci.yml
@@ -2,7 +2,7 @@ name: AutoGPT Platform - Infra
on:
push:
- branches: [ master ]
+ branches: [ master, dev ]
paths:
- '.github/workflows/platform-autogpt-infra-ci.yml'
- 'autogpt_platform/infra/**'
@@ -36,12 +36,12 @@ jobs:
tflint_changed_only: false
- name: Set up Helm
- uses: azure/setup-helm@v4.2.0
+ uses: azure/setup-helm@v4
with:
version: v3.14.4
- name: Set up chart-testing
- uses: helm/chart-testing-action@v2.6.0
+ uses: helm/chart-testing-action@v2.6.1
- name: Run chart-testing (list-changed)
id: list-changed
diff --git a/.github/workflows/platform-backend-ci.yml b/.github/workflows/platform-backend-ci.yml
index eb5a3481c7..63fe8b118f 100644
--- a/.github/workflows/platform-backend-ci.yml
+++ b/.github/workflows/platform-backend-ci.yml
@@ -2,12 +2,12 @@ name: AutoGPT Platform - Backend CI
on:
push:
- branches: [master, development, ci-test*]
+ branches: [master, dev, ci-test*]
paths:
- ".github/workflows/platform-backend-ci.yml"
- "autogpt_platform/backend/**"
pull_request:
- branches: [master, development, release-*]
+ branches: [master, dev, release-*]
paths:
- ".github/workflows/platform-backend-ci.yml"
- "autogpt_platform/backend/**"
@@ -32,6 +32,14 @@ jobs:
python-version: ["3.10"]
runs-on: ubuntu-latest
+ services:
+ redis:
+ image: bitnami/redis:6.2
+ env:
+ REDIS_PASSWORD: testpassword
+ ports:
+ - 6379:6379
+
steps:
- name: Checkout repository
uses: actions/checkout@v4
@@ -96,9 +104,9 @@ jobs:
- name: Run pytest with coverage
run: |
if [[ "${{ runner.debug }}" == "1" ]]; then
- poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
+ poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
else
- poetry run pytest -vv test
+ poetry run pytest -s -vv test
fi
if: success() || (failure() && steps.lint.outcome == 'failure')
env:
@@ -107,6 +115,10 @@ jobs:
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
+ REDIS_HOST: 'localhost'
+ REDIS_PORT: '6379'
+ REDIS_PASSWORD: 'testpassword'
+
env:
CI: true
PLAIN_OUTPUT: True
diff --git a/.github/workflows/platform-frontend-ci.yml b/.github/workflows/platform-frontend-ci.yml
index 72c3888c1d..ce3633013b 100644
--- a/.github/workflows/platform-frontend-ci.yml
+++ b/.github/workflows/platform-frontend-ci.yml
@@ -2,7 +2,7 @@ name: AutoGPT Platform - Frontend CI
on:
push:
- branches: [master]
+ branches: [master, dev]
paths:
- ".github/workflows/platform-frontend-ci.yml"
- "autogpt_platform/frontend/**"
@@ -29,24 +29,37 @@ jobs:
- name: Install dependencies
run: |
- npm install
-
- - name: Check formatting with Prettier
- run: |
- npx prettier --check .
+ yarn install --frozen-lockfile
- name: Run lint
run: |
- npm run lint
+ yarn lint
test:
runs-on: ubuntu-latest
steps:
+ - name: Free Disk Space (Ubuntu)
+ uses: jlumbroso/free-disk-space@main
+ with:
+ # this might remove tools that are actually needed,
+ # if set to "true" but frees about 6 GB
+ tool-cache: false
+
+ # all of these default to true, but feel free to set to
+ # "false" if necessary for your workflow
+ android: false
+ dotnet: false
+ haskell: false
+ large-packages: true
+ docker-images: true
+ swap-storage: true
+
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
+
- name: Set up Node.js
uses: actions/setup-node@v4
with:
@@ -62,18 +75,18 @@ jobs:
- name: Install dependencies
run: |
- npm install
+ yarn install --frozen-lockfile
- name: Setup Builder .env
run: |
cp .env.example .env
- name: Install Playwright Browsers
- run: npx playwright install --with-deps
+ run: yarn playwright install --with-deps
- name: Run tests
run: |
- npm run test
+ yarn test
- uses: actions/upload-artifact@v4
if: ${{ !cancelled() }}
diff --git a/.github/workflows/platform-market-ci.yml b/.github/workflows/platform-market-ci.yml
new file mode 100644
index 0000000000..560c05d64a
--- /dev/null
+++ b/.github/workflows/platform-market-ci.yml
@@ -0,0 +1,125 @@
+name: AutoGPT Platform - Backend CI
+
+on:
+ push:
+ branches: [master, dev, ci-test*]
+ paths:
+ - ".github/workflows/platform-market-ci.yml"
+ - "autogpt_platform/market/**"
+ pull_request:
+ branches: [master, dev, release-*]
+ paths:
+ - ".github/workflows/platform-market-ci.yml"
+ - "autogpt_platform/market/**"
+
+concurrency:
+ group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
+
+defaults:
+ run:
+ shell: bash
+ working-directory: autogpt_platform/market
+
+jobs:
+ test:
+ permissions:
+ contents: read
+ timeout-minutes: 30
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ["3.10"]
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ submodules: true
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Setup Supabase
+ uses: supabase/setup-cli@v1
+ with:
+ version: latest
+
+ - id: get_date
+ name: Get date
+ run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
+
+ - name: Set up Python dependency cache
+ uses: actions/cache@v4
+ with:
+ path: ~/.cache/pypoetry
+ key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/market/poetry.lock') }}
+
+ - name: Install Poetry (Unix)
+ run: |
+ curl -sSL https://install.python-poetry.org | python3 -
+
+ if [ "${{ runner.os }}" = "macOS" ]; then
+ PATH="$HOME/.local/bin:$PATH"
+ echo "$HOME/.local/bin" >> $GITHUB_PATH
+ fi
+
+ - name: Install Python dependencies
+ run: poetry install
+
+ - name: Generate Prisma Client
+ run: poetry run prisma generate
+
+ - id: supabase
+ name: Start Supabase
+ working-directory: .
+ run: |
+ supabase init
+ supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
+ supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
+ # outputs:
+ # DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
+
+ - name: Run Database Migrations
+ run: poetry run prisma migrate dev --name updates
+ env:
+ DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
+
+ - id: lint
+ name: Run Linter
+ run: poetry run lint
+
+ # Tests comment out because they do not work with prisma mock, nor have they been updated since they were created
+ # - name: Run pytest with coverage
+ # run: |
+ # if [[ "${{ runner.debug }}" == "1" ]]; then
+ # poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
+ # else
+ # poetry run pytest -s -vv test
+ # fi
+ # if: success() || (failure() && steps.lint.outcome == 'failure')
+ # env:
+ # LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
+ # DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
+ # SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
+ # SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
+ # SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
+ # REDIS_HOST: 'localhost'
+ # REDIS_PORT: '6379'
+ # REDIS_PASSWORD: 'testpassword'
+
+ env:
+ CI: true
+ PLAIN_OUTPUT: True
+ RUN_ENV: local
+ PORT: 8080
+
+ # - name: Upload coverage reports to Codecov
+ # uses: codecov/codecov-action@v4
+ # with:
+ # token: ${{ secrets.CODECOV_TOKEN }}
+ # flags: backend,${{ runner.os }}
diff --git a/.github/workflows/repo-pr-enforce-base-branch.yml b/.github/workflows/repo-pr-enforce-base-branch.yml
new file mode 100644
index 0000000000..3d4bd9096a
--- /dev/null
+++ b/.github/workflows/repo-pr-enforce-base-branch.yml
@@ -0,0 +1,21 @@
+name: Repo - Enforce dev as base branch
+on:
+ pull_request_target:
+ branches: [ master ]
+ types: [ opened ]
+
+jobs:
+ check_pr_target:
+ runs-on: ubuntu-latest
+ permissions:
+ pull-requests: write
+ steps:
+ - name: Check if PR is from dev or hotfix
+ if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }}
+ run: |
+ gh pr comment ${{ github.event.number }} --repo "$REPO" \
+ --body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.'
+ gh pr edit ${{ github.event.number }} --base dev --repo "$REPO"
+ env:
+ GITHUB_TOKEN: ${{ github.token }}
+ REPO: ${{ github.repository }}
diff --git a/.github/workflows/repo-pr-label.yml b/.github/workflows/repo-pr-label.yml
index cbd34d639d..eef928ef16 100644
--- a/.github/workflows/repo-pr-label.yml
+++ b/.github/workflows/repo-pr-label.yml
@@ -3,7 +3,7 @@ name: Repo - Pull Request auto-label
on:
# So that PRs touching the same files as the push are updated
push:
- branches: [ master, development, release-* ]
+ branches: [ master, dev, release-* ]
paths-ignore:
- 'classic/forge/tests/vcr_cassettes'
- 'classic/benchmark/reports/**'
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index fb29d72d38..5144d6b6bc 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -11,7 +11,7 @@ Also check out our [🚀 Roadmap][roadmap] for information about our priorities
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
## Contributing to the AutoGPT Platform Folder
-All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution.
+All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license.
## In short
1. Avoid duplicate work, issues, PRs etc.
diff --git a/LICENSE b/LICENSE
index 601935b85e..52c6e9a8d5 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,7 +1,13 @@
+All portions of this repository are under one of two licenses. The majority of the AutoGPT repository is under the MIT License below. The autogpt_platform folder is under the
+Polyform Shield License.
+
+
MIT License
+
Copyright (c) 2023 Toran Bruce Richards
+
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
@@ -9,9 +15,11 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
+
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
diff --git a/README.md b/README.md
index c6c603988e..a323729ad5 100644
--- a/README.md
+++ b/README.md
@@ -65,6 +65,7 @@ Here are two examples of what you can do with AutoGPT:
These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case.
---
+### Mission and Licencing
Our mission is to provide the tools, so that you can focus on what matters:
- 🏗️ **Building** - Lay the foundation for something amazing.
@@ -77,6 +78,13 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
|
**🚀 [Contributing](CONTRIBUTING.md)**
+**Licensing:**
+
+MIT License: The majority of the AutoGPT repository is under the MIT License.
+
+Polyform Shield License: This license applies to the autogpt_platform folder.
+
+For more information, see https://agpt.co/blog/introducing-the-autogpt-platform
---
## 🤖 AutoGPT Classic
@@ -101,7 +109,7 @@ This guide will walk you through the process of creating your own agent and usin
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
|
-📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
+📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark
### 💻 UI
@@ -150,6 +158,8 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
---
+## Stars stats
+
@@ -159,3 +169,10 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
+
+
+## ⚡ Contributors
+
+
+
+
diff --git a/autogpt_platform/README.md b/autogpt_platform/README.md
index db64280932..02cbe68149 100644
--- a/autogpt_platform/README.md
+++ b/autogpt_platform/README.md
@@ -149,6 +149,3 @@ To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml
3. Save the file and run `docker compose up -d` to apply the changes.
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
-
-
-
diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/__init__.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/__init__.py
index 546bc5f5b4..f957198eb7 100644
--- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/__init__.py
+++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/__init__.py
@@ -1,8 +1,9 @@
from .store import SupabaseIntegrationCredentialsStore
-from .types import APIKeyCredentials, OAuth2Credentials
+from .types import Credentials, APIKeyCredentials, OAuth2Credentials
__all__ = [
"SupabaseIntegrationCredentialsStore",
+ "Credentials",
"APIKeyCredentials",
"OAuth2Credentials",
]
diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py
index 13e1e69c83..6a4bb354fc 100644
--- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py
+++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/store.py
@@ -1,8 +1,13 @@
import secrets
from datetime import datetime, timedelta, timezone
-from typing import cast
+from typing import TYPE_CHECKING
-from supabase import Client
+if TYPE_CHECKING:
+ from redis import Redis
+ from backend.executor.database import DatabaseManager
+
+from autogpt_libs.utils.cache import thread_cached
+from autogpt_libs.utils.synchronize import RedisKeyedMutex
from .types import (
Credentials,
@@ -14,26 +19,36 @@ from .types import (
class SupabaseIntegrationCredentialsStore:
- def __init__(self, supabase: Client):
- self.supabase = supabase
+ def __init__(self, redis: "Redis"):
+ self.locks = RedisKeyedMutex(redis)
+
+ @property
+ @thread_cached
+ def db_manager(self) -> "DatabaseManager":
+ from backend.executor.database import DatabaseManager
+ from backend.util.service import get_service_client
+ return get_service_client(DatabaseManager)
def add_creds(self, user_id: str, credentials: Credentials) -> None:
- if self.get_creds_by_id(user_id, credentials.id):
- raise ValueError(
- f"Can not re-create existing credentials with ID {credentials.id} "
- f"for user with ID {user_id}"
+ with self.locked_user_metadata(user_id):
+ if self.get_creds_by_id(user_id, credentials.id):
+ raise ValueError(
+ f"Can not re-create existing credentials #{credentials.id} "
+ f"for user #{user_id}"
+ )
+ self._set_user_integration_creds(
+ user_id, [*self.get_all_creds(user_id), credentials]
)
- self._set_user_integration_creds(
- user_id, [*self.get_all_creds(user_id), credentials]
- )
def get_all_creds(self, user_id: str) -> list[Credentials]:
user_metadata = self._get_user_metadata(user_id)
- return UserMetadata.model_validate(user_metadata).integration_credentials
+ return UserMetadata.model_validate(
+ user_metadata.model_dump()
+ ).integration_credentials
def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None:
- credentials = self.get_all_creds(user_id)
- return next((c for c in credentials if c.id == credentials_id), None)
+ all_credentials = self.get_all_creds(user_id)
+ return next((c for c in all_credentials if c.id == credentials_id), None)
def get_creds_by_provider(self, user_id: str, provider: str) -> list[Credentials]:
credentials = self.get_all_creds(user_id)
@@ -44,65 +59,81 @@ class SupabaseIntegrationCredentialsStore:
return list(set(c.provider for c in credentials))
def update_creds(self, user_id: str, updated: Credentials) -> None:
- current = self.get_creds_by_id(user_id, updated.id)
- if not current:
- raise ValueError(
- f"Credentials with ID {updated.id} "
- f"for user with ID {user_id} not found"
- )
- if type(current) is not type(updated):
- raise TypeError(
- f"Can not update credentials with ID {updated.id} "
- f"from type {type(current)} "
- f"to type {type(updated)}"
- )
+ with self.locked_user_metadata(user_id):
+ current = self.get_creds_by_id(user_id, updated.id)
+ if not current:
+ raise ValueError(
+ f"Credentials with ID {updated.id} "
+ f"for user with ID {user_id} not found"
+ )
+ if type(current) is not type(updated):
+ raise TypeError(
+ f"Can not update credentials with ID {updated.id} "
+ f"from type {type(current)} "
+ f"to type {type(updated)}"
+ )
- # Ensure no scopes are removed when updating credentials
- if (
- isinstance(updated, OAuth2Credentials)
- and isinstance(current, OAuth2Credentials)
- and not set(updated.scopes).issuperset(current.scopes)
- ):
- raise ValueError(
- f"Can not update credentials with ID {updated.id} "
- f"and scopes {current.scopes} "
- f"to more restrictive set of scopes {updated.scopes}"
- )
+ # Ensure no scopes are removed when updating credentials
+ if (
+ isinstance(updated, OAuth2Credentials)
+ and isinstance(current, OAuth2Credentials)
+ and not set(updated.scopes).issuperset(current.scopes)
+ ):
+ raise ValueError(
+ f"Can not update credentials with ID {updated.id} "
+ f"and scopes {current.scopes} "
+ f"to more restrictive set of scopes {updated.scopes}"
+ )
- # Update the credentials
- updated_credentials_list = [
- updated if c.id == updated.id else c for c in self.get_all_creds(user_id)
- ]
- self._set_user_integration_creds(user_id, updated_credentials_list)
+ # Update the credentials
+ updated_credentials_list = [
+ updated if c.id == updated.id else c
+ for c in self.get_all_creds(user_id)
+ ]
+ self._set_user_integration_creds(user_id, updated_credentials_list)
def delete_creds_by_id(self, user_id: str, credentials_id: str) -> None:
- filtered_credentials = [
- c for c in self.get_all_creds(user_id) if c.id != credentials_id
- ]
- self._set_user_integration_creds(user_id, filtered_credentials)
+ with self.locked_user_metadata(user_id):
+ filtered_credentials = [
+ c for c in self.get_all_creds(user_id) if c.id != credentials_id
+ ]
+ self._set_user_integration_creds(user_id, filtered_credentials)
- async def store_state_token(self, user_id: str, provider: str) -> str:
+ def store_state_token(self, user_id: str, provider: str, scopes: list[str]) -> str:
token = secrets.token_urlsafe(32)
expires_at = datetime.now(timezone.utc) + timedelta(minutes=10)
state = OAuthState(
- token=token, provider=provider, expires_at=int(expires_at.timestamp())
+ token=token,
+ provider=provider,
+ expires_at=int(expires_at.timestamp()),
+ scopes=scopes,
)
- user_metadata = self._get_user_metadata(user_id)
- oauth_states = user_metadata.get("integration_oauth_states", [])
- oauth_states.append(state.model_dump())
- user_metadata["integration_oauth_states"] = oauth_states
+ with self.locked_user_metadata(user_id):
+ user_metadata = self._get_user_metadata(user_id)
+ oauth_states = user_metadata.integration_oauth_states
+ oauth_states.append(state.model_dump())
+ user_metadata.integration_oauth_states = oauth_states
- self.supabase.auth.admin.update_user_by_id(
- user_id, {"user_metadata": user_metadata}
- )
+ self.db_manager.update_user_metadata(
+ user_id=user_id, metadata=user_metadata
+ )
return token
- async def verify_state_token(self, user_id: str, token: str, provider: str) -> bool:
+ def get_any_valid_scopes_from_state_token(
+ self, user_id: str, token: str, provider: str
+ ) -> list[str]:
+ """
+ Get the valid scopes from the OAuth state token. This will return any valid scopes
+ from any OAuth state token for the given provider. If no valid scopes are found,
+ an empty list is returned. DO NOT RELY ON THIS TOKEN TO AUTHENTICATE A USER, AS IT
+ IS TO CHECK IF THE USER HAS GIVEN PERMISSIONS TO THE APPLICATION BEFORE EXCHANGING
+ THE CODE FOR TOKENS.
+ """
user_metadata = self._get_user_metadata(user_id)
- oauth_states = user_metadata.get("integration_oauth_states", [])
+ oauth_states = user_metadata.integration_oauth_states
now = datetime.now(timezone.utc)
valid_state = next(
@@ -117,13 +148,33 @@ class SupabaseIntegrationCredentialsStore:
)
if valid_state:
- # Remove the used state
- oauth_states.remove(valid_state)
- user_metadata["integration_oauth_states"] = oauth_states
- self.supabase.auth.admin.update_user_by_id(
- user_id, {"user_metadata": user_metadata}
+ return valid_state.get("scopes", [])
+
+ return []
+
+ def verify_state_token(self, user_id: str, token: str, provider: str) -> bool:
+ with self.locked_user_metadata(user_id):
+ user_metadata = self._get_user_metadata(user_id)
+ oauth_states = user_metadata.integration_oauth_states
+
+ now = datetime.now(timezone.utc)
+ valid_state = next(
+ (
+ state
+ for state in oauth_states
+ if state["token"] == token
+ and state["provider"] == provider
+ and state["expires_at"] > now.timestamp()
+ ),
+ None,
)
- return True
+
+ if valid_state:
+ # Remove the used state
+ oauth_states.remove(valid_state)
+ user_metadata.integration_oauth_states = oauth_states
+ self.db_manager.update_user_metadata(user_id, user_metadata)
+ return True
return False
@@ -131,15 +182,13 @@ class SupabaseIntegrationCredentialsStore:
self, user_id: str, credentials: list[Credentials]
) -> None:
raw_metadata = self._get_user_metadata(user_id)
- raw_metadata.update(
- {"integration_credentials": [c.model_dump() for c in credentials]}
- )
- self.supabase.auth.admin.update_user_by_id(
- user_id, {"user_metadata": raw_metadata}
- )
+ raw_metadata.integration_credentials = [c.model_dump() for c in credentials]
+ self.db_manager.update_user_metadata(user_id, raw_metadata)
def _get_user_metadata(self, user_id: str) -> UserMetadataRaw:
- response = self.supabase.auth.admin.get_user_by_id(user_id)
- if not response.user:
- raise ValueError(f"User with ID {user_id} not found")
- return cast(UserMetadataRaw, response.user.user_metadata)
+ metadata: UserMetadataRaw = self.db_manager.get_user_metadata(user_id=user_id)
+ return metadata
+
+ def locked_user_metadata(self, user_id: str):
+ key = (self.db_manager, f"user:{user_id}", "metadata")
+ return self.locks.locked(key)
diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py
index da39f6a842..0f973bb524 100644
--- a/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py
+++ b/autogpt_platform/autogpt_libs/autogpt_libs/supabase_integration_credentials_store/types.py
@@ -56,6 +56,7 @@ class OAuthState(BaseModel):
token: str
provider: str
expires_at: int
+ scopes: list[str]
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
@@ -64,6 +65,6 @@ class UserMetadata(BaseModel):
integration_oauth_states: list[OAuthState] = Field(default_factory=list)
-class UserMetadataRaw(TypedDict, total=False):
- integration_credentials: list[dict]
- integration_oauth_states: list[dict]
+class UserMetadataRaw(BaseModel):
+ integration_credentials: list[dict] = Field(default_factory=list)
+ integration_oauth_states: list[dict] = Field(default_factory=list)
diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py
new file mode 100644
index 0000000000..9c69da9411
--- /dev/null
+++ b/autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py
@@ -0,0 +1,20 @@
+from typing import Callable, TypeVar, ParamSpec
+import threading
+
+P = ParamSpec("P")
+R = TypeVar("R")
+
+
+def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
+ thread_local = threading.local()
+
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
+ cache = getattr(thread_local, "cache", None)
+ if cache is None:
+ cache = thread_local.cache = {}
+ key = (args, tuple(sorted(kwargs.items())))
+ if key not in cache:
+ cache[key] = func(*args, **kwargs)
+ return cache[key]
+
+ return wrapper
diff --git a/autogpt_platform/autogpt_libs/autogpt_libs/utils/synchronize.py b/autogpt_platform/autogpt_libs/autogpt_libs/utils/synchronize.py
new file mode 100644
index 0000000000..bdd0aa79e6
--- /dev/null
+++ b/autogpt_platform/autogpt_libs/autogpt_libs/utils/synchronize.py
@@ -0,0 +1,56 @@
+from contextlib import contextmanager
+from threading import Lock
+from typing import TYPE_CHECKING, Any
+
+from expiringdict import ExpiringDict
+
+if TYPE_CHECKING:
+ from redis import Redis
+ from redis.lock import Lock as RedisLock
+
+
+class RedisKeyedMutex:
+ """
+ This class provides a mutex that can be locked and unlocked by a specific key,
+ using Redis as a distributed locking provider.
+ It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
+ in case the key is not unlocked for a specified duration, to prevent memory leaks.
+ """
+
+ def __init__(self, redis: "Redis", timeout: int | None = 60):
+ self.redis = redis
+ self.timeout = timeout
+ self.locks: dict[Any, "RedisLock"] = ExpiringDict(
+ max_len=6000, max_age_seconds=self.timeout
+ )
+ self.locks_lock = Lock()
+
+ @contextmanager
+ def locked(self, key: Any):
+ lock = self.acquire(key)
+ try:
+ yield
+ finally:
+ lock.release()
+
+ def acquire(self, key: Any) -> "RedisLock":
+ """Acquires and returns a lock with the given key"""
+ with self.locks_lock:
+ if key not in self.locks:
+ self.locks[key] = self.redis.lock(
+ str(key), self.timeout, thread_local=False
+ )
+ lock = self.locks[key]
+ lock.acquire()
+ return lock
+
+ def release(self, key: Any):
+ if lock := self.locks.get(key):
+ lock.release()
+
+ def release_all_locks(self):
+ """Call this on process termination to ensure all locks are released"""
+ self.locks_lock.acquire(blocking=False)
+ for lock in self.locks.values():
+ if lock.locked() and lock.owned():
+ lock.release()
diff --git a/autogpt_platform/autogpt_libs/poetry.lock b/autogpt_platform/autogpt_libs/poetry.lock
index 9b71e6ccf1..2041187ec4 100644
--- a/autogpt_platform/autogpt_libs/poetry.lock
+++ b/autogpt_platform/autogpt_libs/poetry.lock
@@ -377,6 +377,20 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "expiringdict"
+version = "1.2.2"
+description = "Dictionary with auto-expiring values for caching purposes"
+optional = false
+python-versions = "*"
+files = [
+ {file = "expiringdict-1.2.2-py3-none-any.whl", hash = "sha256:09a5d20bc361163e6432a874edd3179676e935eb81b925eccef48d409a8a45e8"},
+ {file = "expiringdict-1.2.2.tar.gz", hash = "sha256:300fb92a7e98f15b05cf9a856c1415b3bc4f2e132be07daa326da6414c23ee09"},
+]
+
+[package.extras]
+tests = ["coverage", "coveralls", "dill", "mock", "nose"]
+
[[package]]
name = "frozenlist"
version = "1.4.1"
@@ -569,13 +583,13 @@ grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"]
[[package]]
name = "google-cloud-logging"
-version = "3.11.2"
+version = "3.11.3"
description = "Stackdriver Logging API client library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "google_cloud_logging-3.11.2-py2.py3-none-any.whl", hash = "sha256:0a755f04f184fbe77ad608258dc283a032485ebb4d0e2b2501964059ee9c898f"},
- {file = "google_cloud_logging-3.11.2.tar.gz", hash = "sha256:4897441c2b74f6eda9181c23a8817223b6145943314a821d64b729d30766cb2b"},
+ {file = "google_cloud_logging-3.11.3-py2.py3-none-any.whl", hash = "sha256:b8ec23f2998f76a58f8492db26a0f4151dd500425c3f08448586b85972f3c494"},
+ {file = "google_cloud_logging-3.11.3.tar.gz", hash = "sha256:0a73cd94118875387d4535371d9e9426861edef8e44fba1261e86782d5b8d54f"},
]
[package.dependencies]
@@ -612,17 +626,17 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"]
[[package]]
name = "gotrue"
-version = "2.8.1"
+version = "2.9.3"
description = "Python Client Library for Supabase Auth"
optional = false
-python-versions = "<4.0,>=3.8"
+python-versions = "<4.0,>=3.9"
files = [
- {file = "gotrue-2.8.1-py3-none-any.whl", hash = "sha256:97dff077d71cca629f046c35ba34fae132b69c55fe271651766ddcf6d8132468"},
- {file = "gotrue-2.8.1.tar.gz", hash = "sha256:644d0096c4c390f7e36d9cb05271a7091c01e7dc6d506eb117b8fe8fc48eb8d9"},
+ {file = "gotrue-2.9.3-py3-none-any.whl", hash = "sha256:9d2e9c74405d879f4828e0a7b94daf167a6e109c10ae6e5c59a0e21446f6e423"},
+ {file = "gotrue-2.9.3.tar.gz", hash = "sha256:051551d80e642bdd2ab42cac78207745d89a2a08f429a1512d82624e675d8255"},
]
[package.dependencies]
-httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
+httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
pydantic = ">=1.10,<3"
[[package]]
@@ -972,20 +986,20 @@ files = [
[[package]]
name = "postgrest"
-version = "0.16.11"
+version = "0.17.2"
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
optional = false
-python-versions = "<4.0,>=3.8"
+python-versions = "<4.0,>=3.9"
files = [
- {file = "postgrest-0.16.11-py3-none-any.whl", hash = "sha256:22fb6b817ace1f68aa648fd4ce0f56d2786c9260fa4ed2cb9046191231a682b8"},
- {file = "postgrest-0.16.11.tar.gz", hash = "sha256:10af51b4c39e288ad7df2db92d6a61fb3c4683131b40561f473e3de116e83fa5"},
+ {file = "postgrest-0.17.2-py3-none-any.whl", hash = "sha256:f7c4f448e5a5e2d4c1dcf192edae9d1007c4261e9a6fb5116783a0046846ece2"},
+ {file = "postgrest-0.17.2.tar.gz", hash = "sha256:445cd4e4a191e279492549df0c4e827d32f9d01d0852599bb8a6efb0f07fcf78"},
]
[package.dependencies]
deprecation = ">=2.1.0,<3.0.0"
-httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
+httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
pydantic = ">=1.9,<3.0"
-strenum = ">=0.4.9,<0.5.0"
+strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""}
[[package]]
name = "proto-plus"
@@ -1031,6 +1045,7 @@ description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs
optional = false
python-versions = ">=3.8"
files = [
+ {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"},
{file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"},
]
@@ -1041,6 +1056,7 @@ description = "A collection of ASN.1-based protocols modules"
optional = false
python-versions = ">=3.8"
files = [
+ {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"},
{file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"},
]
@@ -1049,18 +1065,18 @@ pyasn1 = ">=0.4.6,<0.7.0"
[[package]]
name = "pydantic"
-version = "2.9.1"
+version = "2.9.2"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"},
- {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"},
+ {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
+ {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
-pydantic-core = "2.23.3"
+pydantic-core = "2.23.4"
typing-extensions = [
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
@@ -1072,100 +1088,100 @@ timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
-version = "2.23.3"
+version = "2.23.4"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"},
- {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"},
- {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"},
- {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"},
- {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"},
- {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"},
- {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"},
- {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"},
- {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"},
- {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"},
- {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"},
- {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"},
- {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"},
- {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"},
- {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"},
- {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"},
- {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"},
- {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"},
- {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"},
- {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"},
- {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"},
- {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"},
- {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"},
- {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"},
- {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"},
- {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"},
- {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"},
- {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"},
- {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"},
- {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"},
- {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"},
- {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"},
- {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"},
- {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"},
- {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"},
- {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"},
- {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
+ {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
+ {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
+ {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
+ {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
+ {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
+ {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
+ {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
+ {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
+ {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
+ {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
+ {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
+ {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
+ {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
]
[package.dependencies]
@@ -1173,13 +1189,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pydantic-settings"
-version = "2.5.2"
+version = "2.6.0"
description = "Settings management using Pydantic"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic_settings-2.5.2-py3-none-any.whl", hash = "sha256:2c912e55fd5794a59bf8c832b9de832dcfdf4778d79ff79b708744eed499a907"},
- {file = "pydantic_settings-2.5.2.tar.gz", hash = "sha256:f90b139682bee4d2065273d5185d71d37ea46cfe57e1b5ae184fc6a0b2484ca0"},
+ {file = "pydantic_settings-2.6.0-py3-none-any.whl", hash = "sha256:4a819166f119b74d7f8c765196b165f95cc7487ce58ea27dec8a5a26be0970e0"},
+ {file = "pydantic_settings-2.6.0.tar.gz", hash = "sha256:44a1804abffac9e6a30372bb45f6cafab945ef5af25e66b1c634c01dd39e0188"},
]
[package.dependencies]
@@ -1253,6 +1269,24 @@ python-dateutil = ">=2.8.1,<3.0.0"
typing-extensions = ">=4.12.2,<5.0.0"
websockets = ">=11,<13"
+[[package]]
+name = "redis"
+version = "5.1.1"
+description = "Python client for Redis database and key-value store"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "redis-5.1.1-py3-none-any.whl", hash = "sha256:f8ea06b7482a668c6475ae202ed8d9bcaa409f6e87fb77ed1043d912afd62e24"},
+ {file = "redis-5.1.1.tar.gz", hash = "sha256:f6c997521fedbae53387307c5d0bf784d9acc28d9f1d058abeac566ec4dbed72"},
+]
+
+[package.dependencies]
+async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""}
+
+[package.extras]
+hiredis = ["hiredis (>=3.0.0)"]
+ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"]
+
[[package]]
name = "requests"
version = "2.32.3"
@@ -1312,17 +1346,17 @@ files = [
[[package]]
name = "storage3"
-version = "0.7.7"
+version = "0.8.2"
description = "Supabase Storage client for Python."
optional = false
-python-versions = "<4.0,>=3.8"
+python-versions = "<4.0,>=3.9"
files = [
- {file = "storage3-0.7.7-py3-none-any.whl", hash = "sha256:ed80a2546cd0b5c22e2c30ea71096db6c99268daf2958c603488e7d72efb8426"},
- {file = "storage3-0.7.7.tar.gz", hash = "sha256:9fba680cf761d139ad764f43f0e91c245d1ce1af2cc3afe716652f835f48f83e"},
+ {file = "storage3-0.8.2-py3-none-any.whl", hash = "sha256:f2e995b18c77a2a9265d1a33047d43e4d6abb11eb3ca5067959f68281c305de3"},
+ {file = "storage3-0.8.2.tar.gz", hash = "sha256:db05d3fe8fb73bd30c814c4c4749664f37a5dfc78b629e8c058ef558c2b89f5a"},
]
[package.dependencies]
-httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
+httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
python-dateutil = ">=2.8.2,<3.0.0"
typing-extensions = ">=4.2.0,<5.0.0"
@@ -1344,36 +1378,36 @@ test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"]
[[package]]
name = "supabase"
-version = "2.7.4"
+version = "2.9.1"
description = "Supabase client for Python."
optional = false
python-versions = "<4.0,>=3.9"
files = [
- {file = "supabase-2.7.4-py3-none-any.whl", hash = "sha256:01815fbc30cac753933d4a44a2529fd13cb7634b56c705c65b12a02c8e75982b"},
- {file = "supabase-2.7.4.tar.gz", hash = "sha256:5a979c7711b3c5ce688514fa0afc015780522569494e1a9a9d25d03b7c3d654b"},
+ {file = "supabase-2.9.1-py3-none-any.whl", hash = "sha256:a96f857a465712cb551679c1df66ba772c834f861756ce4aa2aa4cb703f6aeb7"},
+ {file = "supabase-2.9.1.tar.gz", hash = "sha256:51fce39c9eb50573126dabb342541ec5e1f13e7476938768f4b0ccfdb8c522cd"},
]
[package.dependencies]
-gotrue = ">=1.3,<3.0"
-httpx = ">=0.24,<0.28"
-postgrest = ">=0.14,<0.17.0"
+gotrue = ">=2.9.0,<3.0.0"
+httpx = ">=0.26,<0.28"
+postgrest = ">=0.17.0,<0.18.0"
realtime = ">=2.0.0,<3.0.0"
-storage3 = ">=0.5.3,<0.8.0"
-supafunc = ">=0.3.1,<0.6.0"
+storage3 = ">=0.8.0,<0.9.0"
+supafunc = ">=0.6.0,<0.7.0"
[[package]]
name = "supafunc"
-version = "0.5.1"
+version = "0.6.2"
description = "Library for Supabase Functions"
optional = false
-python-versions = "<4.0,>=3.8"
+python-versions = "<4.0,>=3.9"
files = [
- {file = "supafunc-0.5.1-py3-none-any.whl", hash = "sha256:b05e99a2b41270211a3f90ec843c04c5f27a5618f2d2d2eb8e07f41eb962a910"},
- {file = "supafunc-0.5.1.tar.gz", hash = "sha256:1ae9dce6bd935939c561650e86abb676af9665ecf5d4ffc1c7ec3c4932c84334"},
+ {file = "supafunc-0.6.2-py3-none-any.whl", hash = "sha256:101b30616b0a1ce8cf938eca1df362fa4cf1deacb0271f53ebbd674190fb0da5"},
+ {file = "supafunc-0.6.2.tar.gz", hash = "sha256:c7dfa20db7182f7fe4ae436e94e05c06cd7ed98d697fed75d68c7b9792822adc"},
]
[package.dependencies]
-httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
+httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
[[package]]
name = "typing-extensions"
@@ -1690,4 +1724,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<4.0"
-content-hash = "e9b6e5d877eeb9c9f1ebc69dead1985d749facc160afbe61f3bf37e9a6e35aa5"
+content-hash = "44af7722ca3d2788fc817129ac43477b71eea9921d51502a63f755cb04e3f254"
diff --git a/autogpt_platform/autogpt_libs/pyproject.toml b/autogpt_platform/autogpt_libs/pyproject.toml
index 81ee51fb80..caf4ac1e35 100644
--- a/autogpt_platform/autogpt_libs/pyproject.toml
+++ b/autogpt_platform/autogpt_libs/pyproject.toml
@@ -8,13 +8,17 @@ packages = [{ include = "autogpt_libs" }]
[tool.poetry.dependencies]
colorama = "^0.4.6"
-google-cloud-logging = "^3.8.0"
-pydantic = "^2.8.2"
-pydantic-settings = "^2.5.2"
+expiringdict = "^1.2.2"
+google-cloud-logging = "^3.11.3"
+pydantic = "^2.9.2"
+pydantic-settings = "^2.6.0"
pyjwt = "^2.8.0"
python = ">=3.10,<4.0"
python-dotenv = "^1.0.1"
-supabase = "^2.7.2"
+supabase = "^2.9.1"
+
+[tool.poetry.group.dev.dependencies]
+redis = "^5.0.8"
[build-system]
requires = ["poetry-core"]
diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example
index fed9a309d2..0ec84ca83e 100644
--- a/autogpt_platform/backend/.env.example
+++ b/autogpt_platform/backend/.env.example
@@ -12,18 +12,21 @@ REDIS_PORT=6379
REDIS_PASSWORD=password
ENABLE_CREDIT=false
-APP_ENV="local"
+# What environment things should be logged under: local dev or prod
+APP_ENV=local
+# What environment to behave as: "local" or "cloud"
+BEHAVE_AS=local
PYRO_HOST=localhost
SENTRY_DSN=
## User auth with Supabase is required for any of the 3rd party integrations with auth to work.
-ENABLE_AUTH=false
+ENABLE_AUTH=true
SUPABASE_URL=http://localhost:8000
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
# For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow for integrations to work.
-# FRONTEND_BASE_URL=http://localhost:3000
+FRONTEND_BASE_URL=http://localhost:3000
## == INTEGRATION CREDENTIALS == ##
# Each set of server side credentials is required for the corresponding 3rd party
@@ -36,6 +39,15 @@ SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET=
+# Google OAuth App server credentials - https://console.cloud.google.com/apis/credentials, and enable gmail api and set scopes
+# https://console.cloud.google.com/apis/credentials/consent ?project=
+
+# You'll need to add/enable the following scopes (minimum):
+# https://console.developers.google.com/apis/api/gmail.googleapis.com/overview ?project=
+# https://console.cloud.google.com/apis/library/sheets.googleapis.com/ ?project=
+GOOGLE_CLIENT_ID=
+GOOGLE_CLIENT_SECRET=
+
## ===== OPTIONAL API KEYS ===== ##
# LLM
@@ -74,6 +86,14 @@ SMTP_PASSWORD=
MEDIUM_API_KEY=
MEDIUM_AUTHOR_ID=
+# Google Maps
+GOOGLE_MAPS_API_KEY=
+
+# Replicate
+REPLICATE_API_KEY=
+
+# Ideogram
+IDEOGRAM_API_KEY=
# Logging Configuration
LOG_LEVEL=INFO
diff --git a/autogpt_platform/backend/Dockerfile b/autogpt_platform/backend/Dockerfile
index f697db1198..5795398d1f 100644
--- a/autogpt_platform/backend/Dockerfile
+++ b/autogpt_platform/backend/Dockerfile
@@ -8,7 +8,7 @@ WORKDIR /app
# Install build dependencies
RUN apt-get update \
- && apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev postgresql-client git \
+ && apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev libpq5 gettext libz-dev libssl-dev postgresql-client git \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
diff --git a/autogpt_platform/backend/README.advanced.md b/autogpt_platform/backend/README.advanced.md
index 829a3d7926..09e0f90fcc 100644
--- a/autogpt_platform/backend/README.advanced.md
+++ b/autogpt_platform/backend/README.advanced.md
@@ -37,7 +37,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
5. Generate the Prisma client
```sh
- poetry run prisma generate --schema postgres/schema.prisma
+ poetry run prisma generate
```
@@ -61,7 +61,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
```sh
cd ../backend
- prisma migrate dev --schema postgres/schema.prisma
+ prisma migrate deploy
```
## Running The Server
diff --git a/autogpt_platform/backend/README.md b/autogpt_platform/backend/README.md
index fc0c6b3944..ab91027df2 100644
--- a/autogpt_platform/backend/README.md
+++ b/autogpt_platform/backend/README.md
@@ -58,17 +58,18 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
6. Migrate the database. Be careful because this deletes current data in the database.
```sh
- docker compose up db redis -d
- poetry run prisma migrate dev
+ docker compose up db -d
+ poetry run prisma migrate deploy
```
## Running The Server
### Starting the server without Docker
-Run the following command to build the dockerfiles:
+Run the following command to run database in docker but the application locally:
```sh
+docker compose --profile local up deps --build --detach
poetry run app
```
diff --git a/autogpt_platform/backend/backend/app.py b/autogpt_platform/backend/backend/app.py
index c265c7a224..5d77ea9632 100644
--- a/autogpt_platform/backend/backend/app.py
+++ b/autogpt_platform/backend/backend/app.py
@@ -24,10 +24,12 @@ def main(**kwargs):
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
"""
- from backend.executor import ExecutionManager, ExecutionScheduler
- from backend.server import AgentServer, WebsocketServer
+ from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
+ from backend.server.rest_api import AgentServer
+ from backend.server.ws_api import WebsocketServer
run_processes(
+ DatabaseManager(),
ExecutionManager(),
ExecutionScheduler(),
WebsocketServer(),
diff --git a/autogpt_platform/backend/backend/blocks/__init__.py b/autogpt_platform/backend/backend/blocks/__init__.py
index d090aa41be..4fb89e3957 100644
--- a/autogpt_platform/backend/backend/blocks/__init__.py
+++ b/autogpt_platform/backend/backend/blocks/__init__.py
@@ -2,6 +2,7 @@ import importlib
import os
import re
from pathlib import Path
+from typing import Type, TypeVar
from backend.data.block import Block
@@ -24,28 +25,31 @@ for module in modules:
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
-AVAILABLE_BLOCKS = {}
+AVAILABLE_BLOCKS: dict[str, Type[Block]] = {}
-def all_subclasses(clz):
- subclasses = clz.__subclasses__()
+T = TypeVar("T")
+
+
+def all_subclasses(cls: Type[T]) -> list[Type[T]]:
+ subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
-for cls in all_subclasses(Block):
- name = cls.__name__
+for block_cls in all_subclasses(Block):
+ name = block_cls.__name__
- if cls.__name__.endswith("Base"):
+ if block_cls.__name__.endswith("Base"):
continue
- if not cls.__name__.endswith("Block"):
+ if not block_cls.__name__.endswith("Block"):
raise ValueError(
- f"Block class {cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
+ f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
)
- block = cls()
+ block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
@@ -53,15 +57,33 @@ for cls in all_subclasses(Block):
if block.id in AVAILABLE_BLOCKS:
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
+ input_schema = block.input_schema.model_fields
+ output_schema = block.output_schema.model_fields
+
# Prevent duplicate field name in input_schema and output_schema
- duplicate_field_names = set(block.input_schema.model_fields.keys()) & set(
- block.output_schema.model_fields.keys()
- )
+ duplicate_field_names = set(input_schema.keys()) & set(output_schema.keys())
if duplicate_field_names:
raise ValueError(
f"{block.name} has duplicate field names in input_schema and output_schema: {duplicate_field_names}"
)
+ # Make sure `error` field is a string in the output schema
+ if "error" in output_schema and output_schema["error"].annotation is not str:
+ raise ValueError(
+ f"{block.name} `error` field in output_schema must be a string"
+ )
+
+ # Make sure all fields in input_schema and output_schema are annotated and has a value
+ for field_name, field in [*input_schema.items(), *output_schema.items()]:
+ if field.annotation is None:
+ raise ValueError(
+ f"{block.name} has a field {field_name} that is not annotated"
+ )
+ if field.json_schema_extra is None:
+ raise ValueError(
+ f"{block.name} has a field {field_name} not defined as SchemaField"
+ )
+
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(f"{block.name} has a boolean field with no default value")
@@ -69,6 +91,6 @@ for cls in all_subclasses(Block):
if block.disabled:
continue
- AVAILABLE_BLOCKS[block.id] = block
+ AVAILABLE_BLOCKS[block.id] = block_cls
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]
diff --git a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py
new file mode 100644
index 0000000000..3fe92950c1
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py
@@ -0,0 +1,298 @@
+import logging
+import time
+from enum import Enum
+
+import requests
+
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import BlockSecret, SchemaField, SecretField
+
+
+class AudioTrack(str, Enum):
+ OBSERVER = ("Observer",)
+ FUTURISTIC_BEAT = ("Futuristic Beat",)
+ SCIENCE_DOCUMENTARY = ("Science Documentary",)
+ HOTLINE = ("Hotline",)
+ BLADERUNNER_2049 = ("Bladerunner 2049",)
+ A_FUTURE = ("A Future",)
+ ELYSIAN_EMBERS = ("Elysian Embers",)
+ INSPIRING_CINEMATIC = ("Inspiring Cinematic",)
+ BLADERUNNER_REMIX = ("Bladerunner Remix",)
+ IZZAMUZZIC = ("Izzamuzzic",)
+ NAS = ("Nas",)
+ PARIS_ELSE = ("Paris - Else",)
+ SNOWFALL = ("Snowfall",)
+ BURLESQUE = ("Burlesque",)
+ CORNY_CANDY = ("Corny Candy",)
+ HIGHWAY_NOCTURNE = ("Highway Nocturne",)
+ I_DONT_THINK_SO = ("I Don't Think So",)
+ LOSING_YOUR_MARBLES = ("Losing Your Marbles",)
+ REFRESHER = ("Refresher",)
+ TOURIST = ("Tourist",)
+ TWIN_TYCHES = ("Twin Tyches",)
+
+ @property
+ def audio_url(self):
+ audio_urls = {
+ AudioTrack.OBSERVER: "https://cdn.tfrv.xyz/audio/observer.mp3",
+ AudioTrack.FUTURISTIC_BEAT: "https://cdn.tfrv.xyz/audio/_futuristic-beat.mp3",
+ AudioTrack.SCIENCE_DOCUMENTARY: "https://cdn.tfrv.xyz/audio/_science-documentary.mp3",
+ AudioTrack.HOTLINE: "https://cdn.tfrv.xyz/audio/_hotline.mp3",
+ AudioTrack.BLADERUNNER_2049: "https://cdn.tfrv.xyz/audio/_bladerunner-2049.mp3",
+ AudioTrack.A_FUTURE: "https://cdn.tfrv.xyz/audio/a-future.mp3",
+ AudioTrack.ELYSIAN_EMBERS: "https://cdn.tfrv.xyz/audio/elysian-embers.mp3",
+ AudioTrack.INSPIRING_CINEMATIC: "https://cdn.tfrv.xyz/audio/inspiring-cinematic-ambient.mp3",
+ AudioTrack.BLADERUNNER_REMIX: "https://cdn.tfrv.xyz/audio/bladerunner-remix.mp3",
+ AudioTrack.IZZAMUZZIC: "https://cdn.tfrv.xyz/audio/_izzamuzzic.mp3",
+ AudioTrack.NAS: "https://cdn.tfrv.xyz/audio/_nas.mp3",
+ AudioTrack.PARIS_ELSE: "https://cdn.tfrv.xyz/audio/_paris-else.mp3",
+ AudioTrack.SNOWFALL: "https://cdn.tfrv.xyz/audio/_snowfall.mp3",
+ AudioTrack.BURLESQUE: "https://cdn.tfrv.xyz/audio/burlesque.mp3",
+ AudioTrack.CORNY_CANDY: "https://cdn.tfrv.xyz/audio/corny-candy.mp3",
+ AudioTrack.HIGHWAY_NOCTURNE: "https://cdn.tfrv.xyz/audio/highway-nocturne.mp3",
+ AudioTrack.I_DONT_THINK_SO: "https://cdn.tfrv.xyz/audio/i-dont-think-so.mp3",
+ AudioTrack.LOSING_YOUR_MARBLES: "https://cdn.tfrv.xyz/audio/losing-your-marbles.mp3",
+ AudioTrack.REFRESHER: "https://cdn.tfrv.xyz/audio/refresher.mp3",
+ AudioTrack.TOURIST: "https://cdn.tfrv.xyz/audio/tourist.mp3",
+ AudioTrack.TWIN_TYCHES: "https://cdn.tfrv.xyz/audio/twin-tynches.mp3",
+ }
+ return audio_urls[self]
+
+
+class GenerationPreset(str, Enum):
+ LEONARDO = ("Default",)
+ ANIME = ("Anime",)
+ REALISM = ("Realist",)
+ ILLUSTRATION = ("Illustration",)
+ SKETCH_COLOR = ("Sketch Color",)
+ SKETCH_BW = ("Sketch B&W",)
+ PIXAR = ("Pixar",)
+ INK = ("Japanese Ink",)
+ RENDER_3D = ("3D Render",)
+ LEGO = ("Lego",)
+ SCIFI = ("Sci-Fi",)
+ RECRO_CARTOON = ("Retro Cartoon",)
+ PIXEL_ART = ("Pixel Art",)
+ CREATIVE = ("Creative",)
+ PHOTOGRAPHY = ("Photography",)
+ RAYTRACED = ("Raytraced",)
+ ENVIRONMENT = ("Environment",)
+ FANTASY = ("Fantasy",)
+ ANIME_SR = ("Anime Realism",)
+ MOVIE = ("Movie",)
+ STYLIZED_ILLUSTRATION = ("Stylized Illustration",)
+ MANGA = ("Manga",)
+
+
+class Voice(str, Enum):
+ LILY = "Lily"
+ DANIEL = "Daniel"
+ BRIAN = "Brian"
+ JESSICA = "Jessica"
+ CHARLOTTE = "Charlotte"
+ CALLUM = "Callum"
+
+ @property
+ def voice_id(self):
+ voice_id_map = {
+ Voice.LILY: "pFZP5JQG7iQjIQuC4Bku",
+ Voice.DANIEL: "onwK4e9ZLuTAKqWW03F9",
+ Voice.BRIAN: "nPczCjzI2devNBz1zQrb",
+ Voice.JESSICA: "cgSgspJ2msm6clMCkdW9",
+ Voice.CHARLOTTE: "XB0fDUnXU5powFXDhCwa",
+ Voice.CALLUM: "N2lVS1w4EtoT3dr4eOWO",
+ }
+ return voice_id_map[self]
+
+ def __str__(self):
+ return self.value
+
+
+class VisualMediaType(str, Enum):
+ STOCK_VIDEOS = ("stockVideo",)
+ MOVING_AI_IMAGES = ("movingImage",)
+ AI_VIDEO = ("aiVideo",)
+
+
+logger = logging.getLogger(__name__)
+
+
+class AIShortformVideoCreatorBlock(Block):
+ class Input(BlockSchema):
+ api_key: BlockSecret = SecretField(
+ key="revid_api_key",
+ description="Your revid.ai API key",
+ placeholder="Enter your revid.ai API key",
+ )
+ script: str = SchemaField(
+ description="""1. Use short and punctuated sentences\n\n2. Use linebreaks to create a new clip\n\n3. Text outside of brackets is spoken by the AI, and [text between brackets] will be used to guide the visual generation. For example, [close-up of a cat] will show a close-up of a cat.""",
+ placeholder="[close-up of a cat] Meow!",
+ )
+ ratio: str = SchemaField(
+ description="Aspect ratio of the video", default="9 / 16"
+ )
+ resolution: str = SchemaField(
+ description="Resolution of the video", default="720p"
+ )
+ frame_rate: int = SchemaField(description="Frame rate of the video", default=60)
+ generation_preset: GenerationPreset = SchemaField(
+ description="Generation preset for visual style - only effects AI generated visuals",
+ default=GenerationPreset.LEONARDO,
+ placeholder=GenerationPreset.LEONARDO,
+ )
+ background_music: AudioTrack = SchemaField(
+ description="Background music track",
+ default=AudioTrack.HIGHWAY_NOCTURNE,
+ placeholder=AudioTrack.HIGHWAY_NOCTURNE,
+ )
+ voice: Voice = SchemaField(
+ description="AI voice to use for narration",
+ default=Voice.LILY,
+ placeholder=Voice.LILY,
+ )
+ video_style: VisualMediaType = SchemaField(
+ description="Type of visual media to use for the video",
+ default=VisualMediaType.STOCK_VIDEOS,
+ placeholder=VisualMediaType.STOCK_VIDEOS,
+ )
+
+ class Output(BlockSchema):
+ video_url: str = SchemaField(description="The URL of the created video")
+ error: str = SchemaField(description="Error message if the request failed")
+
+ def __init__(self):
+ super().__init__(
+ id="361697fb-0c4f-4feb-aed3-8320c88c771b",
+ description="Creates a shortform video using revid.ai",
+ categories={BlockCategory.SOCIAL, BlockCategory.AI},
+ input_schema=AIShortformVideoCreatorBlock.Input,
+ output_schema=AIShortformVideoCreatorBlock.Output,
+ test_input={
+ "api_key": "test_api_key",
+ "script": "[close-up of a cat] Meow!",
+ "ratio": "9 / 16",
+ "resolution": "720p",
+ "frame_rate": 60,
+ "generation_preset": GenerationPreset.LEONARDO,
+ "background_music": AudioTrack.HIGHWAY_NOCTURNE,
+ "voice": Voice.LILY,
+ "video_style": VisualMediaType.STOCK_VIDEOS,
+ },
+ test_output=(
+ "video_url",
+ "https://example.com/video.mp4",
+ ),
+ test_mock={
+ "create_webhook": lambda: (
+ "test_uuid",
+ "https://webhook.site/test_uuid",
+ ),
+ "create_video": lambda api_key, payload: {"pid": "test_pid"},
+ "wait_for_video": lambda api_key, pid, webhook_token, max_wait_time=1000: "https://example.com/video.mp4",
+ },
+ )
+
+ def create_webhook(self):
+ url = "https://webhook.site/token"
+ headers = {"Accept": "application/json", "Content-Type": "application/json"}
+ response = requests.post(url, headers=headers)
+ response.raise_for_status()
+ webhook_data = response.json()
+ return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
+
+ def create_video(self, api_key: str, payload: dict) -> dict:
+ url = "https://www.revid.ai/api/public/v2/render"
+ headers = {"key": api_key}
+ response = requests.post(url, json=payload, headers=headers)
+ logger.debug(
+ f"API Response Status Code: {response.status_code}, Content: {response.text}"
+ )
+ response.raise_for_status()
+ return response.json()
+
+ def check_video_status(self, api_key: str, pid: str) -> dict:
+ url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
+ headers = {"key": api_key}
+ response = requests.get(url, headers=headers)
+ response.raise_for_status()
+ return response.json()
+
+ def wait_for_video(
+ self, api_key: str, pid: str, webhook_token: str, max_wait_time: int = 1000
+ ) -> str:
+ start_time = time.time()
+ while time.time() - start_time < max_wait_time:
+ status = self.check_video_status(api_key, pid)
+ logger.debug(f"Video status: {status}")
+
+ if status.get("status") == "ready" and "videoUrl" in status:
+ return status["videoUrl"]
+ elif status.get("status") == "error":
+ error_message = status.get("error", "Unknown error occurred")
+ logger.error(f"Video creation failed: {error_message}")
+ raise ValueError(f"Video creation failed: {error_message}")
+ elif status.get("status") in ["FAILED", "CANCELED"]:
+ logger.error(f"Video creation failed: {status.get('message')}")
+ raise ValueError(f"Video creation failed: {status.get('message')}")
+
+ time.sleep(10)
+
+ logger.error("Video creation timed out")
+ raise TimeoutError("Video creation timed out")
+
+ def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ # Create a new Webhook.site URL
+ webhook_token, webhook_url = self.create_webhook()
+ logger.debug(f"Webhook URL: {webhook_url}")
+
+ audio_url = input_data.background_music.audio_url
+
+ payload = {
+ "frameRate": input_data.frame_rate,
+ "resolution": input_data.resolution,
+ "frameDurationMultiplier": 18,
+ "webhook": webhook_url,
+ "creationParams": {
+ "mediaType": input_data.video_style,
+ "captionPresetName": "Wrap 1",
+ "selectedVoice": input_data.voice.voice_id,
+ "hasEnhancedGeneration": True,
+ "generationPreset": input_data.generation_preset.name,
+ "selectedAudio": input_data.background_music,
+ "origin": "/create",
+ "inputText": input_data.script,
+ "flowType": "text-to-video",
+ "slug": "create-tiktok-video",
+ "hasToGenerateVoice": True,
+ "hasToTranscript": False,
+ "hasToSearchMedia": True,
+ "hasAvatar": False,
+ "hasWebsiteRecorder": False,
+ "hasTextSmallAtBottom": False,
+ "ratio": input_data.ratio,
+ "sourceType": "contentScraping",
+ "selectedStoryStyle": {"value": "custom", "label": "Custom"},
+ "hasToGenerateVideos": input_data.video_style
+ != VisualMediaType.STOCK_VIDEOS,
+ "audioUrl": audio_url,
+ },
+ }
+
+ logger.debug("Creating video...")
+ response = self.create_video(input_data.api_key.get_secret_value(), payload)
+ pid = response.get("pid")
+
+ if not pid:
+ logger.error(
+ f"Failed to create video: No project ID returned. API Response: {response}"
+ )
+ raise RuntimeError("Failed to create video: No project ID returned")
+ else:
+ logger.debug(
+ f"Video created with project ID: {pid}. Waiting for completion..."
+ )
+ video_url = self.wait_for_video(
+ input_data.api_key.get_secret_value(), pid, webhook_token
+ )
+ logger.debug(f"Video ready: {video_url}")
+ yield "video_url", video_url
diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py
index b86d8d872b..391d6b615a 100644
--- a/autogpt_platform/backend/backend/blocks/basic.py
+++ b/autogpt_platform/backend/backend/blocks/basic.py
@@ -2,7 +2,6 @@ import re
from typing import Any, List
from jinja2 import BaseLoader, Environment
-from pydantic import Field
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
from backend.data.model import SchemaField
@@ -19,18 +18,18 @@ class StoreValueBlock(Block):
"""
class Input(BlockSchema):
- input: Any = Field(
+ input: Any = SchemaField(
description="Trigger the block to produce the output. "
"The value is only used when `data` is None."
)
- data: Any = Field(
+ data: Any = SchemaField(
description="The constant data to be retained in the block. "
"This value is passed as `output`.",
default=None,
)
class Output(BlockSchema):
- output: Any
+ output: Any = SchemaField(description="The stored data retained in the block.")
def __init__(self):
super().__init__(
@@ -56,10 +55,10 @@ class StoreValueBlock(Block):
class PrintToConsoleBlock(Block):
class Input(BlockSchema):
- text: str
+ text: str = SchemaField(description="The text to print to the console.")
class Output(BlockSchema):
- status: str
+ status: str = SchemaField(description="The status of the print operation.")
def __init__(self):
super().__init__(
@@ -79,16 +78,18 @@ class PrintToConsoleBlock(Block):
class FindInDictionaryBlock(Block):
class Input(BlockSchema):
- input: Any = Field(description="Dictionary to lookup from")
- key: str | int = Field(description="Key to lookup in the dictionary")
+ input: Any = SchemaField(description="Dictionary to lookup from")
+ key: str | int = SchemaField(description="Key to lookup in the dictionary")
class Output(BlockSchema):
- output: Any = Field(description="Value found for the given key")
- missing: Any = Field(description="Value of the input that missing the key")
+ output: Any = SchemaField(description="Value found for the given key")
+ missing: Any = SchemaField(
+ description="Value of the input that missing the key"
+ )
def __init__(self):
super().__init__(
- id="b2g2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6",
+ id="0e50422c-6dee-4145-83d6-3a5a392f65de",
description="Lookup the given key in the input dictionary/object/list and return the value.",
input_schema=FindInDictionaryBlock.Input,
output_schema=FindInDictionaryBlock.Output,
@@ -330,20 +331,17 @@ class AddToDictionaryBlock(Block):
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- # If no dictionary is provided, create a new one
- if input_data.dictionary is None:
- updated_dict = {}
- else:
- # Create a copy of the input dictionary to avoid modifying the original
- updated_dict = input_data.dictionary.copy()
+ # If no dictionary is provided, create a new one
+ if input_data.dictionary is None:
+ updated_dict = {}
+ else:
+ # Create a copy of the input dictionary to avoid modifying the original
+ updated_dict = input_data.dictionary.copy()
- # Add the new key-value pair
- updated_dict[input_data.key] = input_data.value
+ # Add the new key-value pair
+ updated_dict[input_data.key] = input_data.value
- yield "updated_dictionary", updated_dict
- except Exception as e:
- yield "error", f"Failed to add entry to dictionary: {str(e)}"
+ yield "updated_dictionary", updated_dict
class AddToListBlock(Block):
@@ -401,23 +399,20 @@ class AddToListBlock(Block):
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- # If no list is provided, create a new one
- if input_data.list is None:
- updated_list = []
- else:
- # Create a copy of the input list to avoid modifying the original
- updated_list = input_data.list.copy()
+ # If no list is provided, create a new one
+ if input_data.list is None:
+ updated_list = []
+ else:
+ # Create a copy of the input list to avoid modifying the original
+ updated_list = input_data.list.copy()
- # Add the new entry
- if input_data.position is None:
- updated_list.append(input_data.entry)
- else:
- updated_list.insert(input_data.position, input_data.entry)
+ # Add the new entry
+ if input_data.position is None:
+ updated_list.append(input_data.entry)
+ else:
+ updated_list.insert(input_data.position, input_data.entry)
- yield "updated_list", updated_list
- except Exception as e:
- yield "error", f"Failed to add entry to list: {str(e)}"
+ yield "updated_list", updated_list
class NoteBlock(Block):
@@ -429,7 +424,7 @@ class NoteBlock(Block):
def __init__(self):
super().__init__(
- id="31d1064e-7446-4693-o7d4-65e5ca9110d1",
+ id="cc10ff7b-7753-4ff2-9af6-9399b1a7eddc",
description="This block is used to display a sticky note with the given text.",
categories={BlockCategory.BASIC},
input_schema=NoteBlock.Input,
diff --git a/autogpt_platform/backend/backend/blocks/block.py b/autogpt_platform/backend/backend/blocks/block.py
index a4bea7aee7..01e8af7238 100644
--- a/autogpt_platform/backend/backend/blocks/block.py
+++ b/autogpt_platform/backend/backend/blocks/block.py
@@ -3,6 +3,7 @@ import re
from typing import Type
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
class BlockInstallationBlock(Block):
@@ -15,11 +16,17 @@ class BlockInstallationBlock(Block):
"""
class Input(BlockSchema):
- code: str
+ code: str = SchemaField(
+ description="Python code of the block to be installed",
+ )
class Output(BlockSchema):
- success: str
- error: str
+ success: str = SchemaField(
+ description="Success message if the block is installed successfully",
+ )
+ error: str = SchemaField(
+ description="Error message if the block installation fails",
+ )
def __init__(self):
super().__init__(
@@ -37,14 +44,12 @@ class BlockInstallationBlock(Block):
if search := re.search(r"class (\w+)\(Block\):", code):
class_name = search.group(1)
else:
- yield "error", "No class found in the code."
- return
+ raise RuntimeError("No class found in the code.")
if search := re.search(r"id=\"(\w+-\w+-\w+-\w+-\w+)\"", code):
file_name = search.group(1)
else:
- yield "error", "No UUID found in the code."
- return
+ raise RuntimeError("No UUID found in the code.")
block_dir = os.path.dirname(__file__)
file_path = f"{block_dir}/{file_name}.py"
@@ -63,4 +68,4 @@ class BlockInstallationBlock(Block):
yield "success", "Block installed successfully."
except Exception as e:
os.remove(file_path)
- yield "error", f"[Code]\n{code}\n\n[Error]\n{str(e)}"
+ raise RuntimeError(f"[Code]\n{code}\n\n[Error]\n{str(e)}")
diff --git a/autogpt_platform/backend/backend/blocks/csv.py b/autogpt_platform/backend/backend/blocks/csv.py
index b19a65f24e..e78c899473 100644
--- a/autogpt_platform/backend/backend/blocks/csv.py
+++ b/autogpt_platform/backend/backend/blocks/csv.py
@@ -1,21 +1,49 @@
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
-from backend.data.model import ContributorDetails
+from backend.data.model import ContributorDetails, SchemaField
class ReadCsvBlock(Block):
class Input(BlockSchema):
- contents: str
- delimiter: str = ","
- quotechar: str = '"'
- escapechar: str = "\\"
- has_header: bool = True
- skip_rows: int = 0
- strip: bool = True
- skip_columns: list[str] = []
+ contents: str = SchemaField(
+ description="The contents of the CSV file to read",
+ placeholder="a, b, c\n1,2,3\n4,5,6",
+ )
+ delimiter: str = SchemaField(
+ description="The delimiter used in the CSV file",
+ default=",",
+ )
+ quotechar: str = SchemaField(
+ description="The character used to quote fields",
+ default='"',
+ )
+ escapechar: str = SchemaField(
+ description="The character used to escape the delimiter",
+ default="\\",
+ )
+ has_header: bool = SchemaField(
+ description="Whether the CSV file has a header row",
+ default=True,
+ )
+ skip_rows: int = SchemaField(
+ description="The number of rows to skip from the start of the file",
+ default=0,
+ )
+ strip: bool = SchemaField(
+ description="Whether to strip whitespace from the values",
+ default=True,
+ )
+ skip_columns: list[str] = SchemaField(
+ description="The columns to skip from the start of the row",
+ default=[],
+ )
class Output(BlockSchema):
- row: dict[str, str]
- all_data: list[dict[str, str]]
+ row: dict[str, str] = SchemaField(
+ description="The data produced from each row in the CSV file"
+ )
+ all_data: list[dict[str, str]] = SchemaField(
+ description="All the data in the CSV file as a list of rows"
+ )
def __init__(self):
super().__init__(
@@ -24,7 +52,7 @@ class ReadCsvBlock(Block):
output_schema=ReadCsvBlock.Output,
description="Reads a CSV file and outputs the data as a list of dictionaries and individual rows via rows.",
contributors=[ContributorDetails(name="Nicholas Tindle")],
- categories={BlockCategory.TEXT},
+ categories={BlockCategory.TEXT, BlockCategory.DATA},
test_input={
"contents": "a, b, c\n1,2,3\n4,5,6",
},
diff --git a/autogpt_platform/backend/backend/blocks/decoder_block.py b/autogpt_platform/backend/backend/blocks/decoder_block.py
new file mode 100644
index 0000000000..033cdfb0b3
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/decoder_block.py
@@ -0,0 +1,39 @@
+import codecs
+
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
+
+
+class TextDecoderBlock(Block):
+ class Input(BlockSchema):
+ text: str = SchemaField(
+ description="A string containing escaped characters to be decoded",
+ placeholder='Your entire text block with \\n and \\" escaped characters',
+ )
+
+ class Output(BlockSchema):
+ decoded_text: str = SchemaField(
+ description="The decoded text with escape sequences processed"
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="2570e8fe-8447-43ed-84c7-70d657923231",
+ description="Decodes a string containing escape sequences into actual text",
+ categories={BlockCategory.TEXT},
+ input_schema=TextDecoderBlock.Input,
+ output_schema=TextDecoderBlock.Output,
+ test_input={"text": """Hello\nWorld!\nThis is a \"quoted\" string."""},
+ test_output=[
+ (
+ "decoded_text",
+ """Hello
+World!
+This is a "quoted" string.""",
+ )
+ ],
+ )
+
+ def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ decoded_text = codecs.decode(input_data.text, "unicode_escape")
+ yield "decoded_text", decoded_text
diff --git a/autogpt_platform/backend/backend/blocks/discord.py b/autogpt_platform/backend/backend/blocks/discord.py
index 8565684a07..e5414cd327 100644
--- a/autogpt_platform/backend/backend/blocks/discord.py
+++ b/autogpt_platform/backend/backend/blocks/discord.py
@@ -2,10 +2,9 @@ import asyncio
import aiohttp
import discord
-from pydantic import Field
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
-from backend.data.model import BlockSecret, SecretField
+from backend.data.model import BlockSecret, SchemaField, SecretField
class ReadDiscordMessagesBlock(Block):
@@ -13,22 +12,24 @@ class ReadDiscordMessagesBlock(Block):
discord_bot_token: BlockSecret = SecretField(
key="discord_bot_token", description="Discord bot token"
)
- continuous_read: bool = Field(
+ continuous_read: bool = SchemaField(
description="Whether to continuously read messages", default=True
)
class Output(BlockSchema):
- message_content: str = Field(description="The content of the message received")
- channel_name: str = Field(
+ message_content: str = SchemaField(
+ description="The content of the message received"
+ )
+ channel_name: str = SchemaField(
description="The name of the channel the message was received from"
)
- username: str = Field(
+ username: str = SchemaField(
description="The username of the user who sent the message"
)
def __init__(self):
super().__init__(
- id="d3f4g5h6-1i2j-3k4l-5m6n-7o8p9q0r1s2t", # Unique ID for the node
+ id="df06086a-d5ac-4abb-9996-2ad0acb2eff7",
input_schema=ReadDiscordMessagesBlock.Input, # Assign input schema
output_schema=ReadDiscordMessagesBlock.Output, # Assign output schema
description="Reads messages from a Discord channel using a bot token.",
@@ -134,19 +135,21 @@ class SendDiscordMessageBlock(Block):
discord_bot_token: BlockSecret = SecretField(
key="discord_bot_token", description="Discord bot token"
)
- message_content: str = Field(description="The content of the message received")
- channel_name: str = Field(
+ message_content: str = SchemaField(
+ description="The content of the message received"
+ )
+ channel_name: str = SchemaField(
description="The name of the channel the message was received from"
)
class Output(BlockSchema):
- status: str = Field(
+ status: str = SchemaField(
description="The status of the operation (e.g., 'Message sent', 'Error')"
)
def __init__(self):
super().__init__(
- id="h1i2j3k4-5l6m-7n8o-9p0q-r1s2t3u4v5w6", # Unique ID for the node
+ id="d0822ab5-9f8a-44a3-8971-531dd0178b6b",
input_schema=SendDiscordMessageBlock.Input, # Assign input schema
output_schema=SendDiscordMessageBlock.Output, # Assign output schema
description="Sends a message to a Discord channel using a bot token.",
diff --git a/autogpt_platform/backend/backend/blocks/email_block.py b/autogpt_platform/backend/backend/blocks/email_block.py
index edfb2f391b..79accb6d7d 100644
--- a/autogpt_platform/backend/backend/blocks/email_block.py
+++ b/autogpt_platform/backend/backend/blocks/email_block.py
@@ -2,17 +2,17 @@ import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
-from pydantic import BaseModel, ConfigDict, Field
+from pydantic import BaseModel, ConfigDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import BlockSecret, SchemaField, SecretField
class EmailCredentials(BaseModel):
- smtp_server: str = Field(
+ smtp_server: str = SchemaField(
default="smtp.gmail.com", description="SMTP server address"
)
- smtp_port: int = Field(default=25, description="SMTP port number")
+ smtp_port: int = SchemaField(default=25, description="SMTP port number")
smtp_username: BlockSecret = SecretField(key="smtp_username")
smtp_password: BlockSecret = SecretField(key="smtp_password")
@@ -30,7 +30,7 @@ class SendEmailBlock(Block):
body: str = SchemaField(
description="Body of the email", placeholder="Enter the email body"
)
- creds: EmailCredentials = Field(
+ creds: EmailCredentials = SchemaField(
description="SMTP credentials",
default=EmailCredentials(),
)
@@ -43,7 +43,7 @@ class SendEmailBlock(Block):
def __init__(self):
super().__init__(
- id="a1234567-89ab-cdef-0123-456789abcdef",
+ id="4335878a-394e-4e67-adf2-919877ff49ae",
description="This block sends an email using the provided SMTP credentials.",
categories={BlockCategory.OUTPUT},
input_schema=SendEmailBlock.Input,
@@ -67,35 +67,28 @@ class SendEmailBlock(Block):
def send_email(
creds: EmailCredentials, to_email: str, subject: str, body: str
) -> str:
- try:
- smtp_server = creds.smtp_server
- smtp_port = creds.smtp_port
- smtp_username = creds.smtp_username.get_secret_value()
- smtp_password = creds.smtp_password.get_secret_value()
+ smtp_server = creds.smtp_server
+ smtp_port = creds.smtp_port
+ smtp_username = creds.smtp_username.get_secret_value()
+ smtp_password = creds.smtp_password.get_secret_value()
- msg = MIMEMultipart()
- msg["From"] = smtp_username
- msg["To"] = to_email
- msg["Subject"] = subject
- msg.attach(MIMEText(body, "plain"))
+ msg = MIMEMultipart()
+ msg["From"] = smtp_username
+ msg["To"] = to_email
+ msg["Subject"] = subject
+ msg.attach(MIMEText(body, "plain"))
- with smtplib.SMTP(smtp_server, smtp_port) as server:
- server.starttls()
- server.login(smtp_username, smtp_password)
- server.sendmail(smtp_username, to_email, msg.as_string())
+ with smtplib.SMTP(smtp_server, smtp_port) as server:
+ server.starttls()
+ server.login(smtp_username, smtp_password)
+ server.sendmail(smtp_username, to_email, msg.as_string())
- return "Email sent successfully"
- except Exception as e:
- return f"Failed to send email: {str(e)}"
+ return "Email sent successfully"
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- status = self.send_email(
+ yield "status", self.send_email(
input_data.creds,
input_data.to_email,
input_data.subject,
input_data.body,
)
- if "successfully" in status:
- yield "status", status
- else:
- yield "error", status
diff --git a/autogpt_platform/backend/backend/blocks/github/issues.py b/autogpt_platform/backend/backend/blocks/github/issues.py
index 97a4694340..ee9391545d 100644
--- a/autogpt_platform/backend/backend/blocks/github/issues.py
+++ b/autogpt_platform/backend/backend/blocks/github/issues.py
@@ -13,6 +13,7 @@ from ._auth import (
)
+# --8<-- [start:GithubCommentBlockExample]
class GithubCommentBlock(Block):
class Input(BlockSchema):
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
@@ -92,16 +93,16 @@ class GithubCommentBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- id, url = self.post_comment(
- credentials,
- input_data.issue_url,
- input_data.comment,
- )
- yield "id", id
- yield "url", url
- except Exception as e:
- yield "error", f"Failed to post comment: {str(e)}"
+ id, url = self.post_comment(
+ credentials,
+ input_data.issue_url,
+ input_data.comment,
+ )
+ yield "id", id
+ yield "url", url
+
+
+# --8<-- [end:GithubCommentBlockExample]
class GithubMakeIssueBlock(Block):
@@ -175,17 +176,14 @@ class GithubMakeIssueBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- number, url = self.create_issue(
- credentials,
- input_data.repo_url,
- input_data.title,
- input_data.body,
- )
- yield "number", number
- yield "url", url
- except Exception as e:
- yield "error", f"Failed to create issue: {str(e)}"
+ number, url = self.create_issue(
+ credentials,
+ input_data.repo_url,
+ input_data.title,
+ input_data.body,
+ )
+ yield "number", number
+ yield "url", url
class GithubReadIssueBlock(Block):
@@ -258,16 +256,13 @@ class GithubReadIssueBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- title, body, user = self.read_issue(
- credentials,
- input_data.issue_url,
- )
- yield "title", title
- yield "body", body
- yield "user", user
- except Exception as e:
- yield "error", f"Failed to read issue: {str(e)}"
+ title, body, user = self.read_issue(
+ credentials,
+ input_data.issue_url,
+ )
+ yield "title", title
+ yield "body", body
+ yield "user", user
class GithubListIssuesBlock(Block):
@@ -346,14 +341,11 @@ class GithubListIssuesBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- issues = self.list_issues(
- credentials,
- input_data.repo_url,
- )
- yield from (("issue", issue) for issue in issues)
- except Exception as e:
- yield "error", f"Failed to list issues: {str(e)}"
+ issues = self.list_issues(
+ credentials,
+ input_data.repo_url,
+ )
+ yield from (("issue", issue) for issue in issues)
class GithubAddLabelBlock(Block):
@@ -424,15 +416,12 @@ class GithubAddLabelBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- status = self.add_label(
- credentials,
- input_data.issue_url,
- input_data.label,
- )
- yield "status", status
- except Exception as e:
- yield "error", f"Failed to add label: {str(e)}"
+ status = self.add_label(
+ credentials,
+ input_data.issue_url,
+ input_data.label,
+ )
+ yield "status", status
class GithubRemoveLabelBlock(Block):
@@ -508,15 +497,12 @@ class GithubRemoveLabelBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- status = self.remove_label(
- credentials,
- input_data.issue_url,
- input_data.label,
- )
- yield "status", status
- except Exception as e:
- yield "error", f"Failed to remove label: {str(e)}"
+ status = self.remove_label(
+ credentials,
+ input_data.issue_url,
+ input_data.label,
+ )
+ yield "status", status
class GithubAssignIssueBlock(Block):
@@ -590,15 +576,12 @@ class GithubAssignIssueBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- status = self.assign_issue(
- credentials,
- input_data.issue_url,
- input_data.assignee,
- )
- yield "status", status
- except Exception as e:
- yield "error", f"Failed to assign issue: {str(e)}"
+ status = self.assign_issue(
+ credentials,
+ input_data.issue_url,
+ input_data.assignee,
+ )
+ yield "status", status
class GithubUnassignIssueBlock(Block):
@@ -672,12 +655,9 @@ class GithubUnassignIssueBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- status = self.unassign_issue(
- credentials,
- input_data.issue_url,
- input_data.assignee,
- )
- yield "status", status
- except Exception as e:
- yield "error", f"Failed to unassign issue: {str(e)}"
+ status = self.unassign_issue(
+ credentials,
+ input_data.issue_url,
+ input_data.assignee,
+ )
+ yield "status", status
diff --git a/autogpt_platform/backend/backend/blocks/github/pull_requests.py b/autogpt_platform/backend/backend/blocks/github/pull_requests.py
index 87540b66df..b04c730dc3 100644
--- a/autogpt_platform/backend/backend/blocks/github/pull_requests.py
+++ b/autogpt_platform/backend/backend/blocks/github/pull_requests.py
@@ -87,14 +87,11 @@ class GithubListPullRequestsBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- pull_requests = self.list_prs(
- credentials,
- input_data.repo_url,
- )
- yield from (("pull_request", pr) for pr in pull_requests)
- except Exception as e:
- yield "error", f"Failed to list pull requests: {str(e)}"
+ pull_requests = self.list_prs(
+ credentials,
+ input_data.repo_url,
+ )
+ yield from (("pull_request", pr) for pr in pull_requests)
class GithubMakePullRequestBlock(Block):
@@ -203,9 +200,7 @@ class GithubMakePullRequestBlock(Block):
error_message = error_details.get("message", "Unknown error")
else:
error_message = str(http_err)
- yield "error", f"Failed to create pull request: {error_message}"
- except Exception as e:
- yield "error", f"Failed to create pull request: {str(e)}"
+ raise RuntimeError(f"Failed to create pull request: {error_message}")
class GithubReadPullRequestBlock(Block):
@@ -313,23 +308,20 @@ class GithubReadPullRequestBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- title, body, author = self.read_pr(
+ title, body, author = self.read_pr(
+ credentials,
+ input_data.pr_url,
+ )
+ yield "title", title
+ yield "body", body
+ yield "author", author
+
+ if input_data.include_pr_changes:
+ changes = self.read_pr_changes(
credentials,
input_data.pr_url,
)
- yield "title", title
- yield "body", body
- yield "author", author
-
- if input_data.include_pr_changes:
- changes = self.read_pr_changes(
- credentials,
- input_data.pr_url,
- )
- yield "changes", changes
- except Exception as e:
- yield "error", f"Failed to read pull request: {str(e)}"
+ yield "changes", changes
class GithubAssignPRReviewerBlock(Block):
@@ -418,9 +410,7 @@ class GithubAssignPRReviewerBlock(Block):
)
else:
error_msg = f"HTTP error: {http_err} - {http_err.response.text}"
- yield "error", error_msg
- except Exception as e:
- yield "error", f"Failed to assign reviewer: {str(e)}"
+ raise RuntimeError(error_msg)
class GithubUnassignPRReviewerBlock(Block):
@@ -490,15 +480,12 @@ class GithubUnassignPRReviewerBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- status = self.unassign_reviewer(
- credentials,
- input_data.pr_url,
- input_data.reviewer,
- )
- yield "status", status
- except Exception as e:
- yield "error", f"Failed to unassign reviewer: {str(e)}"
+ status = self.unassign_reviewer(
+ credentials,
+ input_data.pr_url,
+ input_data.reviewer,
+ )
+ yield "status", status
class GithubListPRReviewersBlock(Block):
@@ -586,11 +573,8 @@ class GithubListPRReviewersBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- reviewers = self.list_reviewers(
- credentials,
- input_data.pr_url,
- )
- yield from (("reviewer", reviewer) for reviewer in reviewers)
- except Exception as e:
- yield "error", f"Failed to list reviewers: {str(e)}"
+ reviewers = self.list_reviewers(
+ credentials,
+ input_data.pr_url,
+ )
+ yield from (("reviewer", reviewer) for reviewer in reviewers)
diff --git a/autogpt_platform/backend/backend/blocks/github/repo.py b/autogpt_platform/backend/backend/blocks/github/repo.py
index 63dcc7e1a1..29eeb757e2 100644
--- a/autogpt_platform/backend/backend/blocks/github/repo.py
+++ b/autogpt_platform/backend/backend/blocks/github/repo.py
@@ -96,14 +96,11 @@ class GithubListTagsBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- tags = self.list_tags(
- credentials,
- input_data.repo_url,
- )
- yield from (("tag", tag) for tag in tags)
- except Exception as e:
- yield "error", f"Failed to list tags: {str(e)}"
+ tags = self.list_tags(
+ credentials,
+ input_data.repo_url,
+ )
+ yield from (("tag", tag) for tag in tags)
class GithubListBranchesBlock(Block):
@@ -183,14 +180,11 @@ class GithubListBranchesBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- branches = self.list_branches(
- credentials,
- input_data.repo_url,
- )
- yield from (("branch", branch) for branch in branches)
- except Exception as e:
- yield "error", f"Failed to list branches: {str(e)}"
+ branches = self.list_branches(
+ credentials,
+ input_data.repo_url,
+ )
+ yield from (("branch", branch) for branch in branches)
class GithubListDiscussionsBlock(Block):
@@ -294,13 +288,10 @@ class GithubListDiscussionsBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- discussions = self.list_discussions(
- credentials, input_data.repo_url, input_data.num_discussions
- )
- yield from (("discussion", discussion) for discussion in discussions)
- except Exception as e:
- yield "error", f"Failed to list discussions: {str(e)}"
+ discussions = self.list_discussions(
+ credentials, input_data.repo_url, input_data.num_discussions
+ )
+ yield from (("discussion", discussion) for discussion in discussions)
class GithubListReleasesBlock(Block):
@@ -381,14 +372,11 @@ class GithubListReleasesBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- releases = self.list_releases(
- credentials,
- input_data.repo_url,
- )
- yield from (("release", release) for release in releases)
- except Exception as e:
- yield "error", f"Failed to list releases: {str(e)}"
+ releases = self.list_releases(
+ credentials,
+ input_data.repo_url,
+ )
+ yield from (("release", release) for release in releases)
class GithubReadFileBlock(Block):
@@ -474,18 +462,15 @@ class GithubReadFileBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- raw_content, size = self.read_file(
- credentials,
- input_data.repo_url,
- input_data.file_path.lstrip("/"),
- input_data.branch,
- )
- yield "raw_content", raw_content
- yield "text_content", base64.b64decode(raw_content).decode("utf-8")
- yield "size", size
- except Exception as e:
- yield "error", f"Failed to read file: {str(e)}"
+ raw_content, size = self.read_file(
+ credentials,
+ input_data.repo_url,
+ input_data.file_path.lstrip("/"),
+ input_data.branch,
+ )
+ yield "raw_content", raw_content
+ yield "text_content", base64.b64decode(raw_content).decode("utf-8")
+ yield "size", size
class GithubReadFolderBlock(Block):
@@ -612,17 +597,14 @@ class GithubReadFolderBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- files, dirs = self.read_folder(
- credentials,
- input_data.repo_url,
- input_data.folder_path.lstrip("/"),
- input_data.branch,
- )
- yield from (("file", file) for file in files)
- yield from (("dir", dir) for dir in dirs)
- except Exception as e:
- yield "error", f"Failed to read folder: {str(e)}"
+ files, dirs = self.read_folder(
+ credentials,
+ input_data.repo_url,
+ input_data.folder_path.lstrip("/"),
+ input_data.branch,
+ )
+ yield from (("file", file) for file in files)
+ yield from (("dir", dir) for dir in dirs)
class GithubMakeBranchBlock(Block):
@@ -703,16 +685,13 @@ class GithubMakeBranchBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- status = self.create_branch(
- credentials,
- input_data.repo_url,
- input_data.new_branch,
- input_data.source_branch,
- )
- yield "status", status
- except Exception as e:
- yield "error", f"Failed to create branch: {str(e)}"
+ status = self.create_branch(
+ credentials,
+ input_data.repo_url,
+ input_data.new_branch,
+ input_data.source_branch,
+ )
+ yield "status", status
class GithubDeleteBranchBlock(Block):
@@ -775,12 +754,9 @@ class GithubDeleteBranchBlock(Block):
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
- try:
- status = self.delete_branch(
- credentials,
- input_data.repo_url,
- input_data.branch,
- )
- yield "status", status
- except Exception as e:
- yield "error", f"Failed to delete branch: {str(e)}"
+ status = self.delete_branch(
+ credentials,
+ input_data.repo_url,
+ input_data.branch,
+ )
+ yield "status", status
diff --git a/autogpt_platform/backend/backend/blocks/google/_auth.py b/autogpt_platform/backend/backend/blocks/google/_auth.py
new file mode 100644
index 0000000000..742fcb36fa
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/google/_auth.py
@@ -0,0 +1,54 @@
+from typing import Literal
+
+from autogpt_libs.supabase_integration_credentials_store.types import OAuth2Credentials
+from pydantic import SecretStr
+
+from backend.data.model import CredentialsField, CredentialsMetaInput
+from backend.util.settings import Secrets
+
+# --8<-- [start:GoogleOAuthIsConfigured]
+secrets = Secrets()
+GOOGLE_OAUTH_IS_CONFIGURED = bool(
+ secrets.google_client_id and secrets.google_client_secret
+)
+# --8<-- [end:GoogleOAuthIsConfigured]
+GoogleCredentials = OAuth2Credentials
+GoogleCredentialsInput = CredentialsMetaInput[Literal["google"], Literal["oauth2"]]
+
+
+def GoogleCredentialsField(scopes: list[str]) -> GoogleCredentialsInput:
+ """
+ Creates a Google credentials input on a block.
+
+ Params:
+ scopes: The authorization scopes needed for the block to work.
+ """
+ return CredentialsField(
+ provider="google",
+ supported_credential_types={"oauth2"},
+ required_scopes=set(scopes),
+ description="The Google integration requires OAuth2 authentication.",
+ )
+
+
+TEST_CREDENTIALS = OAuth2Credentials(
+ id="01234567-89ab-cdef-0123-456789abcdef",
+ provider="google",
+ access_token=SecretStr("mock-google-access-token"),
+ refresh_token=SecretStr("mock-google-refresh-token"),
+ access_token_expires_at=1234567890,
+ scopes=[
+ "https://www.googleapis.com/auth/gmail.readonly",
+ "https://www.googleapis.com/auth/gmail.send",
+ ],
+ title="Mock Google OAuth2 Credentials",
+ username="mock-google-username",
+ refresh_token_expires_at=1234567890,
+)
+
+TEST_CREDENTIALS_INPUT = {
+ "provider": TEST_CREDENTIALS.provider,
+ "id": TEST_CREDENTIALS.id,
+ "type": TEST_CREDENTIALS.type,
+ "title": TEST_CREDENTIALS.title,
+}
diff --git a/autogpt_platform/backend/backend/blocks/google/gmail.py b/autogpt_platform/backend/backend/blocks/google/gmail.py
new file mode 100644
index 0000000000..beb96f3439
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/google/gmail.py
@@ -0,0 +1,503 @@
+import base64
+from email.utils import parseaddr
+from typing import List
+
+from google.oauth2.credentials import Credentials
+from googleapiclient.discovery import build
+from pydantic import BaseModel
+
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
+
+from ._auth import (
+ GOOGLE_OAUTH_IS_CONFIGURED,
+ TEST_CREDENTIALS,
+ TEST_CREDENTIALS_INPUT,
+ GoogleCredentials,
+ GoogleCredentialsField,
+ GoogleCredentialsInput,
+)
+
+
+class Attachment(BaseModel):
+ filename: str
+ content_type: str
+ size: int
+ attachment_id: str
+
+
+class Email(BaseModel):
+ id: str
+ subject: str
+ snippet: str
+ from_: str
+ to: str
+ date: str
+ body: str = "" # Default to an empty string
+ sizeEstimate: int
+ attachments: List[Attachment]
+
+
+class GmailReadBlock(Block):
+ class Input(BlockSchema):
+ credentials: GoogleCredentialsInput = GoogleCredentialsField(
+ ["https://www.googleapis.com/auth/gmail.readonly"]
+ )
+ query: str = SchemaField(
+ description="Search query for reading emails",
+ default="is:unread",
+ )
+ max_results: int = SchemaField(
+ description="Maximum number of emails to retrieve",
+ default=10,
+ )
+
+ class Output(BlockSchema):
+ email: Email = SchemaField(
+ description="Email data",
+ )
+ emails: list[Email] = SchemaField(
+ description="List of email data",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="25310c70-b89b-43ba-b25c-4dfa7e2a481c",
+ description="This block reads emails from Gmail.",
+ categories={BlockCategory.COMMUNICATION},
+ disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
+ input_schema=GmailReadBlock.Input,
+ output_schema=GmailReadBlock.Output,
+ test_input={
+ "query": "is:unread",
+ "max_results": 5,
+ "credentials": TEST_CREDENTIALS_INPUT,
+ },
+ test_credentials=TEST_CREDENTIALS,
+ test_output=[
+ (
+ "result",
+ [
+ {
+ "id": "1",
+ "subject": "Test Email",
+ "snippet": "This is a test email",
+ }
+ ],
+ ),
+ ],
+ test_mock={
+ "_read_emails": lambda *args, **kwargs: [
+ {
+ "id": "1",
+ "subject": "Test Email",
+ "snippet": "This is a test email",
+ }
+ ],
+ "_send_email": lambda *args, **kwargs: {"id": "1", "status": "sent"},
+ },
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
+ ) -> BlockOutput:
+ service = self._build_service(credentials, **kwargs)
+ messages = self._read_emails(service, input_data.query, input_data.max_results)
+ for email in messages:
+ yield "email", email
+ yield "emails", messages
+
+ @staticmethod
+ def _build_service(credentials: GoogleCredentials, **kwargs):
+ creds = Credentials(
+ token=(
+ credentials.access_token.get_secret_value()
+ if credentials.access_token
+ else None
+ ),
+ refresh_token=(
+ credentials.refresh_token.get_secret_value()
+ if credentials.refresh_token
+ else None
+ ),
+ token_uri="https://oauth2.googleapis.com/token",
+ client_id=kwargs.get("client_id"),
+ client_secret=kwargs.get("client_secret"),
+ scopes=credentials.scopes,
+ )
+ return build("gmail", "v1", credentials=creds)
+
+ def _read_emails(
+ self, service, query: str | None, max_results: int | None
+ ) -> list[Email]:
+ results = (
+ service.users()
+ .messages()
+ .list(userId="me", q=query or "", maxResults=max_results or 10)
+ .execute()
+ )
+ messages = results.get("messages", [])
+
+ email_data = []
+ for message in messages:
+ msg = (
+ service.users()
+ .messages()
+ .get(userId="me", id=message["id"], format="full")
+ .execute()
+ )
+
+ headers = {
+ header["name"].lower(): header["value"]
+ for header in msg["payload"]["headers"]
+ }
+
+ attachments = self._get_attachments(service, msg)
+
+ email = Email(
+ id=msg["id"],
+ subject=headers.get("subject", "No Subject"),
+ snippet=msg["snippet"],
+ from_=parseaddr(headers.get("from", ""))[1],
+ to=parseaddr(headers.get("to", ""))[1],
+ date=headers.get("date", ""),
+ body=self._get_email_body(msg),
+ sizeEstimate=msg["sizeEstimate"],
+ attachments=attachments,
+ )
+ email_data.append(email)
+
+ return email_data
+
+ def _get_email_body(self, msg):
+ if "parts" in msg["payload"]:
+ for part in msg["payload"]["parts"]:
+ if part["mimeType"] == "text/plain":
+ return base64.urlsafe_b64decode(part["body"]["data"]).decode(
+ "utf-8"
+ )
+ elif msg["payload"]["mimeType"] == "text/plain":
+ return base64.urlsafe_b64decode(msg["payload"]["body"]["data"]).decode(
+ "utf-8"
+ )
+
+ return "This email does not contain a text body."
+
+ def _get_attachments(self, service, message):
+ attachments = []
+ if "parts" in message["payload"]:
+ for part in message["payload"]["parts"]:
+ if part["filename"]:
+ attachment = Attachment(
+ filename=part["filename"],
+ content_type=part["mimeType"],
+ size=int(part["body"].get("size", 0)),
+ attachment_id=part["body"]["attachmentId"],
+ )
+ attachments.append(attachment)
+ return attachments
+
+ # Add a new method to download attachment content
+ def download_attachment(self, service, message_id: str, attachment_id: str):
+ attachment = (
+ service.users()
+ .messages()
+ .attachments()
+ .get(userId="me", messageId=message_id, id=attachment_id)
+ .execute()
+ )
+ file_data = base64.urlsafe_b64decode(attachment["data"].encode("UTF-8"))
+ return file_data
+
+
+class GmailSendBlock(Block):
+ class Input(BlockSchema):
+ credentials: GoogleCredentialsInput = GoogleCredentialsField(
+ ["https://www.googleapis.com/auth/gmail.send"]
+ )
+ to: str = SchemaField(
+ description="Recipient email address",
+ )
+ subject: str = SchemaField(
+ description="Email subject",
+ )
+ body: str = SchemaField(
+ description="Email body",
+ )
+
+ class Output(BlockSchema):
+ result: dict = SchemaField(
+ description="Send confirmation",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="6c27abc2-e51d-499e-a85f-5a0041ba94f0",
+ description="This block sends an email using Gmail.",
+ categories={BlockCategory.COMMUNICATION},
+ input_schema=GmailSendBlock.Input,
+ output_schema=GmailSendBlock.Output,
+ disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
+ test_input={
+ "to": "recipient@example.com",
+ "subject": "Test Email",
+ "body": "This is a test email sent from GmailSendBlock.",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ },
+ test_credentials=TEST_CREDENTIALS,
+ test_output=[
+ ("result", {"id": "1", "status": "sent"}),
+ ],
+ test_mock={
+ "_send_email": lambda *args, **kwargs: {"id": "1", "status": "sent"},
+ },
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
+ ) -> BlockOutput:
+ service = GmailReadBlock._build_service(credentials, **kwargs)
+ send_result = self._send_email(
+ service, input_data.to, input_data.subject, input_data.body
+ )
+ yield "result", send_result
+
+ def _send_email(self, service, to: str, subject: str, body: str) -> dict:
+ if not to or not subject or not body:
+ raise ValueError("To, subject, and body are required for sending an email")
+ message = self._create_message(to, subject, body)
+ sent_message = (
+ service.users().messages().send(userId="me", body=message).execute()
+ )
+ return {"id": sent_message["id"], "status": "sent"}
+
+ def _create_message(self, to: str, subject: str, body: str) -> dict:
+ import base64
+ from email.mime.text import MIMEText
+
+ message = MIMEText(body)
+ message["to"] = to
+ message["subject"] = subject
+ raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8")
+ return {"raw": raw_message}
+
+
+class GmailListLabelsBlock(Block):
+ class Input(BlockSchema):
+ credentials: GoogleCredentialsInput = GoogleCredentialsField(
+ ["https://www.googleapis.com/auth/gmail.labels"]
+ )
+
+ class Output(BlockSchema):
+ result: list[dict] = SchemaField(
+ description="List of labels",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="3e1c2c1c-c689-4520-b956-1f3bf4e02bb7",
+ description="This block lists all labels in Gmail.",
+ categories={BlockCategory.COMMUNICATION},
+ input_schema=GmailListLabelsBlock.Input,
+ output_schema=GmailListLabelsBlock.Output,
+ disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
+ test_input={
+ "credentials": TEST_CREDENTIALS_INPUT,
+ },
+ test_credentials=TEST_CREDENTIALS,
+ test_output=[
+ (
+ "result",
+ [
+ {"id": "Label_1", "name": "Important"},
+ {"id": "Label_2", "name": "Work"},
+ ],
+ ),
+ ],
+ test_mock={
+ "_list_labels": lambda *args, **kwargs: [
+ {"id": "Label_1", "name": "Important"},
+ {"id": "Label_2", "name": "Work"},
+ ],
+ },
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
+ ) -> BlockOutput:
+ service = GmailReadBlock._build_service(credentials, **kwargs)
+ labels = self._list_labels(service)
+ yield "result", labels
+
+ def _list_labels(self, service) -> list[dict]:
+ results = service.users().labels().list(userId="me").execute()
+ labels = results.get("labels", [])
+ return [{"id": label["id"], "name": label["name"]} for label in labels]
+
+
+class GmailAddLabelBlock(Block):
+ class Input(BlockSchema):
+ credentials: GoogleCredentialsInput = GoogleCredentialsField(
+ ["https://www.googleapis.com/auth/gmail.modify"]
+ )
+ message_id: str = SchemaField(
+ description="Message ID to add label to",
+ )
+ label_name: str = SchemaField(
+ description="Label name to add",
+ )
+
+ class Output(BlockSchema):
+ result: dict = SchemaField(
+ description="Label addition result",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="f884b2fb-04f4-4265-9658-14f433926ac9",
+ description="This block adds a label to a Gmail message.",
+ categories={BlockCategory.COMMUNICATION},
+ input_schema=GmailAddLabelBlock.Input,
+ output_schema=GmailAddLabelBlock.Output,
+ disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
+ test_input={
+ "message_id": "12345",
+ "label_name": "Important",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ },
+ test_credentials=TEST_CREDENTIALS,
+ test_output=[
+ (
+ "result",
+ {"status": "Label added successfully", "label_id": "Label_1"},
+ ),
+ ],
+ test_mock={
+ "_add_label": lambda *args, **kwargs: {
+ "status": "Label added successfully",
+ "label_id": "Label_1",
+ },
+ },
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
+ ) -> BlockOutput:
+ service = GmailReadBlock._build_service(credentials, **kwargs)
+ result = self._add_label(service, input_data.message_id, input_data.label_name)
+ yield "result", result
+
+ def _add_label(self, service, message_id: str, label_name: str) -> dict:
+ label_id = self._get_or_create_label(service, label_name)
+ service.users().messages().modify(
+ userId="me", id=message_id, body={"addLabelIds": [label_id]}
+ ).execute()
+ return {"status": "Label added successfully", "label_id": label_id}
+
+ def _get_or_create_label(self, service, label_name: str) -> str:
+ label_id = self._get_label_id(service, label_name)
+ if not label_id:
+ label = (
+ service.users()
+ .labels()
+ .create(userId="me", body={"name": label_name})
+ .execute()
+ )
+ label_id = label["id"]
+ return label_id
+
+ def _get_label_id(self, service, label_name: str) -> str | None:
+ results = service.users().labels().list(userId="me").execute()
+ labels = results.get("labels", [])
+ for label in labels:
+ if label["name"] == label_name:
+ return label["id"]
+ return None
+
+
+class GmailRemoveLabelBlock(Block):
+ class Input(BlockSchema):
+ credentials: GoogleCredentialsInput = GoogleCredentialsField(
+ ["https://www.googleapis.com/auth/gmail.modify"]
+ )
+ message_id: str = SchemaField(
+ description="Message ID to remove label from",
+ )
+ label_name: str = SchemaField(
+ description="Label name to remove",
+ )
+
+ class Output(BlockSchema):
+ result: dict = SchemaField(
+ description="Label removal result",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="0afc0526-aba1-4b2b-888e-a22b7c3f359d",
+ description="This block removes a label from a Gmail message.",
+ categories={BlockCategory.COMMUNICATION},
+ input_schema=GmailRemoveLabelBlock.Input,
+ output_schema=GmailRemoveLabelBlock.Output,
+ disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
+ test_input={
+ "message_id": "12345",
+ "label_name": "Important",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ },
+ test_credentials=TEST_CREDENTIALS,
+ test_output=[
+ (
+ "result",
+ {"status": "Label removed successfully", "label_id": "Label_1"},
+ ),
+ ],
+ test_mock={
+ "_remove_label": lambda *args, **kwargs: {
+ "status": "Label removed successfully",
+ "label_id": "Label_1",
+ },
+ },
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
+ ) -> BlockOutput:
+ service = GmailReadBlock._build_service(credentials, **kwargs)
+ result = self._remove_label(
+ service, input_data.message_id, input_data.label_name
+ )
+ yield "result", result
+
+ def _remove_label(self, service, message_id: str, label_name: str) -> dict:
+ label_id = self._get_label_id(service, label_name)
+ if label_id:
+ service.users().messages().modify(
+ userId="me", id=message_id, body={"removeLabelIds": [label_id]}
+ ).execute()
+ return {"status": "Label removed successfully", "label_id": label_id}
+ else:
+ return {"status": "Label not found", "label_name": label_name}
+
+ def _get_label_id(self, service, label_name: str) -> str | None:
+ results = service.users().labels().list(userId="me").execute()
+ labels = results.get("labels", [])
+ for label in labels:
+ if label["name"] == label_name:
+ return label["id"]
+ return None
diff --git a/autogpt_platform/backend/backend/blocks/google/sheets.py b/autogpt_platform/backend/backend/blocks/google/sheets.py
new file mode 100644
index 0000000000..e7878ff4b6
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/google/sheets.py
@@ -0,0 +1,184 @@
+from google.oauth2.credentials import Credentials
+from googleapiclient.discovery import build
+
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
+
+from ._auth import (
+ GOOGLE_OAUTH_IS_CONFIGURED,
+ TEST_CREDENTIALS,
+ TEST_CREDENTIALS_INPUT,
+ GoogleCredentials,
+ GoogleCredentialsField,
+ GoogleCredentialsInput,
+)
+
+
+class GoogleSheetsReadBlock(Block):
+ class Input(BlockSchema):
+ credentials: GoogleCredentialsInput = GoogleCredentialsField(
+ ["https://www.googleapis.com/auth/spreadsheets.readonly"]
+ )
+ spreadsheet_id: str = SchemaField(
+ description="The ID of the spreadsheet to read from",
+ )
+ range: str = SchemaField(
+ description="The A1 notation of the range to read",
+ )
+
+ class Output(BlockSchema):
+ result: list[list[str]] = SchemaField(
+ description="The data read from the spreadsheet",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="5724e902-3635-47e9-a108-aaa0263a4988",
+ description="This block reads data from a Google Sheets spreadsheet.",
+ categories={BlockCategory.DATA},
+ input_schema=GoogleSheetsReadBlock.Input,
+ output_schema=GoogleSheetsReadBlock.Output,
+ disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
+ test_input={
+ "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "range": "Sheet1!A1:B2",
+ "credentials": TEST_CREDENTIALS_INPUT,
+ },
+ test_credentials=TEST_CREDENTIALS,
+ test_output=[
+ (
+ "result",
+ [
+ ["Name", "Score"],
+ ["Alice", "85"],
+ ],
+ ),
+ ],
+ test_mock={
+ "_read_sheet": lambda *args, **kwargs: [
+ ["Name", "Score"],
+ ["Alice", "85"],
+ ],
+ },
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
+ ) -> BlockOutput:
+ service = self._build_service(credentials, **kwargs)
+ data = self._read_sheet(service, input_data.spreadsheet_id, input_data.range)
+ yield "result", data
+
+ @staticmethod
+ def _build_service(credentials: GoogleCredentials, **kwargs):
+ creds = Credentials(
+ token=(
+ credentials.access_token.get_secret_value()
+ if credentials.access_token
+ else None
+ ),
+ refresh_token=(
+ credentials.refresh_token.get_secret_value()
+ if credentials.refresh_token
+ else None
+ ),
+ token_uri="https://oauth2.googleapis.com/token",
+ client_id=kwargs.get("client_id"),
+ client_secret=kwargs.get("client_secret"),
+ scopes=credentials.scopes,
+ )
+ return build("sheets", "v4", credentials=creds)
+
+ def _read_sheet(self, service, spreadsheet_id: str, range: str) -> list[list[str]]:
+ sheet = service.spreadsheets()
+ result = sheet.values().get(spreadsheetId=spreadsheet_id, range=range).execute()
+ return result.get("values", [])
+
+
+class GoogleSheetsWriteBlock(Block):
+ class Input(BlockSchema):
+ credentials: GoogleCredentialsInput = GoogleCredentialsField(
+ ["https://www.googleapis.com/auth/spreadsheets"]
+ )
+ spreadsheet_id: str = SchemaField(
+ description="The ID of the spreadsheet to write to",
+ )
+ range: str = SchemaField(
+ description="The A1 notation of the range to write",
+ )
+ values: list[list[str]] = SchemaField(
+ description="The data to write to the spreadsheet",
+ )
+
+ class Output(BlockSchema):
+ result: dict = SchemaField(
+ description="The result of the write operation",
+ )
+ error: str = SchemaField(
+ description="Error message if any",
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="d9291e87-301d-47a8-91fe-907fb55460e5",
+ description="This block writes data to a Google Sheets spreadsheet.",
+ categories={BlockCategory.DATA},
+ input_schema=GoogleSheetsWriteBlock.Input,
+ output_schema=GoogleSheetsWriteBlock.Output,
+ disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
+ test_input={
+ "spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
+ "range": "Sheet1!A1:B2",
+ "values": [
+ ["Name", "Score"],
+ ["Bob", "90"],
+ ],
+ "credentials": TEST_CREDENTIALS_INPUT,
+ },
+ test_credentials=TEST_CREDENTIALS,
+ test_output=[
+ (
+ "result",
+ {"updatedCells": 4, "updatedColumns": 2, "updatedRows": 2},
+ ),
+ ],
+ test_mock={
+ "_write_sheet": lambda *args, **kwargs: {
+ "updatedCells": 4,
+ "updatedColumns": 2,
+ "updatedRows": 2,
+ },
+ },
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
+ ) -> BlockOutput:
+ service = GoogleSheetsReadBlock._build_service(credentials, **kwargs)
+ result = self._write_sheet(
+ service,
+ input_data.spreadsheet_id,
+ input_data.range,
+ input_data.values,
+ )
+ yield "result", result
+
+ def _write_sheet(
+ self, service, spreadsheet_id: str, range: str, values: list[list[str]]
+ ) -> dict:
+ body = {"values": values}
+ result = (
+ service.spreadsheets()
+ .values()
+ .update(
+ spreadsheetId=spreadsheet_id,
+ range=range,
+ valueInputOption="USER_ENTERED",
+ body=body,
+ )
+ .execute()
+ )
+ return result
diff --git a/autogpt_platform/backend/backend/blocks/google_maps.py b/autogpt_platform/backend/backend/blocks/google_maps.py
new file mode 100644
index 0000000000..3be57b93e8
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/google_maps.py
@@ -0,0 +1,124 @@
+import googlemaps
+from pydantic import BaseModel
+
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import BlockSecret, SchemaField, SecretField
+
+
+class Place(BaseModel):
+ name: str
+ address: str
+ phone: str
+ rating: float
+ reviews: int
+ website: str
+
+
+class GoogleMapsSearchBlock(Block):
+ class Input(BlockSchema):
+ api_key: BlockSecret = SecretField(
+ key="google_maps_api_key",
+ description="Google Maps API Key",
+ )
+ query: str = SchemaField(
+ description="Search query for local businesses",
+ placeholder="e.g., 'restaurants in New York'",
+ )
+ radius: int = SchemaField(
+ description="Search radius in meters (max 50000)",
+ default=5000,
+ ge=1,
+ le=50000,
+ )
+ max_results: int = SchemaField(
+ description="Maximum number of results to return (max 60)",
+ default=20,
+ ge=1,
+ le=60,
+ )
+
+ class Output(BlockSchema):
+ place: Place = SchemaField(description="Place found")
+ error: str = SchemaField(description="Error message if the search failed")
+
+ def __init__(self):
+ super().__init__(
+ id="f47ac10b-58cc-4372-a567-0e02b2c3d479",
+ description="This block searches for local businesses using Google Maps API.",
+ categories={BlockCategory.SEARCH},
+ input_schema=GoogleMapsSearchBlock.Input,
+ output_schema=GoogleMapsSearchBlock.Output,
+ test_input={
+ "api_key": "your_test_api_key",
+ "query": "restaurants in new york",
+ "radius": 5000,
+ "max_results": 5,
+ },
+ test_output=[
+ (
+ "place",
+ {
+ "name": "Test Restaurant",
+ "address": "123 Test St, New York, NY 10001",
+ "phone": "+1 (555) 123-4567",
+ "rating": 4.5,
+ "reviews": 100,
+ "website": "https://testrestaurant.com",
+ },
+ ),
+ ],
+ test_mock={
+ "search_places": lambda *args, **kwargs: [
+ {
+ "name": "Test Restaurant",
+ "address": "123 Test St, New York, NY 10001",
+ "phone": "+1 (555) 123-4567",
+ "rating": 4.5,
+ "reviews": 100,
+ "website": "https://testrestaurant.com",
+ }
+ ]
+ },
+ )
+
+ def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ places = self.search_places(
+ input_data.api_key.get_secret_value(),
+ input_data.query,
+ input_data.radius,
+ input_data.max_results,
+ )
+ for place in places:
+ yield "place", place
+
+ def search_places(self, api_key, query, radius, max_results):
+ client = googlemaps.Client(key=api_key)
+ return self._search_places(client, query, radius, max_results)
+
+ def _search_places(self, client, query, radius, max_results):
+ results = []
+ next_page_token = None
+ while len(results) < max_results:
+ response = client.places(
+ query=query,
+ radius=radius,
+ page_token=next_page_token,
+ )
+ for place in response["results"]:
+ if len(results) >= max_results:
+ break
+ place_details = client.place(place["place_id"])["result"]
+ results.append(
+ Place(
+ name=place_details.get("name", ""),
+ address=place_details.get("formatted_address", ""),
+ phone=place_details.get("formatted_phone_number", ""),
+ rating=place_details.get("rating", 0),
+ reviews=place_details.get("user_ratings_total", 0),
+ website=place_details.get("website", ""),
+ )
+ )
+ next_page_token = response.get("next_page_token")
+ if not next_page_token:
+ break
+ return results
diff --git a/autogpt_platform/backend/backend/blocks/http.py b/autogpt_platform/backend/backend/blocks/http.py
index 04d893f847..74a1d3d0bb 100644
--- a/autogpt_platform/backend/backend/blocks/http.py
+++ b/autogpt_platform/backend/backend/blocks/http.py
@@ -4,6 +4,7 @@ from enum import Enum
import requests
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
class HttpMethod(Enum):
@@ -18,15 +19,27 @@ class HttpMethod(Enum):
class SendWebRequestBlock(Block):
class Input(BlockSchema):
- url: str
- method: HttpMethod = HttpMethod.POST
- headers: dict[str, str] = {}
- body: object = {}
+ url: str = SchemaField(
+ description="The URL to send the request to",
+ placeholder="https://api.example.com",
+ )
+ method: HttpMethod = SchemaField(
+ description="The HTTP method to use for the request",
+ default=HttpMethod.POST,
+ )
+ headers: dict[str, str] = SchemaField(
+ description="The headers to include in the request",
+ default={},
+ )
+ body: object = SchemaField(
+ description="The body of the request",
+ default={},
+ )
class Output(BlockSchema):
- response: object
- client_error: object
- server_error: object
+ response: object = SchemaField(description="The response from the server")
+ client_error: object = SchemaField(description="The error on 4xx status codes")
+ server_error: object = SchemaField(description="The error on 5xx status codes")
def __init__(self):
super().__init__(
diff --git a/autogpt_platform/backend/backend/blocks/ideogram.py b/autogpt_platform/backend/backend/blocks/ideogram.py
new file mode 100644
index 0000000000..6818a25371
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/ideogram.py
@@ -0,0 +1,253 @@
+from enum import Enum
+from typing import Any, Dict, Optional
+
+import requests
+
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import BlockSecret, SchemaField, SecretField
+
+
+class IdeogramModelName(str, Enum):
+ V2 = "V_2"
+ V1 = "V_1"
+ V1_TURBO = "V_1_TURBO"
+ V2_TURBO = "V_2_TURBO"
+
+
+class MagicPromptOption(str, Enum):
+ AUTO = "AUTO"
+ ON = "ON"
+ OFF = "OFF"
+
+
+class StyleType(str, Enum):
+ AUTO = "AUTO"
+ GENERAL = "GENERAL"
+ REALISTIC = "REALISTIC"
+ DESIGN = "DESIGN"
+ RENDER_3D = "RENDER_3D"
+ ANIME = "ANIME"
+
+
+class ColorPalettePreset(str, Enum):
+ NONE = "NONE"
+ EMBER = "EMBER"
+ FRESH = "FRESH"
+ JUNGLE = "JUNGLE"
+ MAGIC = "MAGIC"
+ MELON = "MELON"
+ MOSAIC = "MOSAIC"
+ PASTEL = "PASTEL"
+ ULTRAMARINE = "ULTRAMARINE"
+
+
+class AspectRatio(str, Enum):
+ ASPECT_10_16 = "ASPECT_10_16"
+ ASPECT_16_10 = "ASPECT_16_10"
+ ASPECT_9_16 = "ASPECT_9_16"
+ ASPECT_16_9 = "ASPECT_16_9"
+ ASPECT_3_2 = "ASPECT_3_2"
+ ASPECT_2_3 = "ASPECT_2_3"
+ ASPECT_4_3 = "ASPECT_4_3"
+ ASPECT_3_4 = "ASPECT_3_4"
+ ASPECT_1_1 = "ASPECT_1_1"
+ ASPECT_1_3 = "ASPECT_1_3"
+ ASPECT_3_1 = "ASPECT_3_1"
+
+
+class UpscaleOption(str, Enum):
+ AI_UPSCALE = "AI Upscale"
+ NO_UPSCALE = "No Upscale"
+
+
+class IdeogramModelBlock(Block):
+ class Input(BlockSchema):
+ api_key: BlockSecret = SecretField(
+ key="ideogram_api_key",
+ description="Ideogram API Key",
+ )
+ prompt: str = SchemaField(
+ description="Text prompt for image generation",
+ placeholder="e.g., 'A futuristic cityscape at sunset'",
+ title="Prompt",
+ )
+ ideogram_model_name: IdeogramModelName = SchemaField(
+ description="The name of the Image Generation Model, e.g., V_2",
+ default=IdeogramModelName.V2,
+ title="Image Generation Model",
+ advanced=False,
+ )
+ aspect_ratio: AspectRatio = SchemaField(
+ description="Aspect ratio for the generated image",
+ default=AspectRatio.ASPECT_1_1,
+ title="Aspect Ratio",
+ advanced=False,
+ )
+ upscale: UpscaleOption = SchemaField(
+ description="Upscale the generated image",
+ default=UpscaleOption.NO_UPSCALE,
+ title="Upscale Image",
+ advanced=False,
+ )
+ magic_prompt_option: MagicPromptOption = SchemaField(
+ description="Whether to use MagicPrompt for enhancing the request",
+ default=MagicPromptOption.AUTO,
+ title="Magic Prompt Option",
+ advanced=True,
+ )
+ seed: Optional[int] = SchemaField(
+ description="Random seed. Set for reproducible generation",
+ default=None,
+ title="Seed",
+ advanced=True,
+ )
+ style_type: StyleType = SchemaField(
+ description="Style type to apply, applicable for V_2 and above",
+ default=StyleType.AUTO,
+ title="Style Type",
+ advanced=True,
+ )
+ negative_prompt: Optional[str] = SchemaField(
+ description="Description of what to exclude from the image",
+ default=None,
+ title="Negative Prompt",
+ advanced=True,
+ )
+ color_palette_name: ColorPalettePreset = SchemaField(
+ description="Color palette preset name, choose 'None' to skip",
+ default=ColorPalettePreset.NONE,
+ title="Color Palette Preset",
+ advanced=True,
+ )
+
+ class Output(BlockSchema):
+ result: str = SchemaField(description="Generated image URL")
+ error: str = SchemaField(description="Error message if the model run failed")
+
+ def __init__(self):
+ super().__init__(
+ id="6ab085e2-20b3-4055-bc3e-08036e01eca6",
+ description="This block runs Ideogram models with both simple and advanced settings.",
+ categories={BlockCategory.AI},
+ input_schema=IdeogramModelBlock.Input,
+ output_schema=IdeogramModelBlock.Output,
+ test_input={
+ "api_key": "test_api_key",
+ "ideogram_model_name": IdeogramModelName.V2,
+ "prompt": "A futuristic cityscape at sunset",
+ "aspect_ratio": AspectRatio.ASPECT_1_1,
+ "upscale": UpscaleOption.NO_UPSCALE,
+ "magic_prompt_option": MagicPromptOption.AUTO,
+ "seed": None,
+ "style_type": StyleType.AUTO,
+ "negative_prompt": None,
+ "color_palette_name": ColorPalettePreset.NONE,
+ },
+ test_output=[
+ (
+ "result",
+ "https://ideogram.ai/api/images/test-generated-image-url.png",
+ ),
+ ],
+ test_mock={
+ "run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name: "https://ideogram.ai/api/images/test-generated-image-url.png",
+ "upscale_image": lambda api_key, image_url: "https://ideogram.ai/api/images/test-upscaled-image-url.png",
+ },
+ )
+
+ def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ seed = input_data.seed
+
+ # Step 1: Generate the image
+ result = self.run_model(
+ api_key=input_data.api_key.get_secret_value(),
+ model_name=input_data.ideogram_model_name.value,
+ prompt=input_data.prompt,
+ seed=seed,
+ aspect_ratio=input_data.aspect_ratio.value,
+ magic_prompt_option=input_data.magic_prompt_option.value,
+ style_type=input_data.style_type.value,
+ negative_prompt=input_data.negative_prompt,
+ color_palette_name=input_data.color_palette_name.value,
+ )
+
+ # Step 2: Upscale the image if requested
+ if input_data.upscale == UpscaleOption.AI_UPSCALE:
+ result = self.upscale_image(
+ api_key=input_data.api_key.get_secret_value(),
+ image_url=result,
+ )
+
+ yield "result", result
+
+ def run_model(
+ self,
+ api_key: str,
+ model_name: str,
+ prompt: str,
+ seed: Optional[int],
+ aspect_ratio: str,
+ magic_prompt_option: str,
+ style_type: str,
+ negative_prompt: Optional[str],
+ color_palette_name: str,
+ ):
+ url = "https://api.ideogram.ai/generate"
+ headers = {"Api-Key": api_key, "Content-Type": "application/json"}
+
+ data: Dict[str, Any] = {
+ "image_request": {
+ "prompt": prompt,
+ "model": model_name,
+ "aspect_ratio": aspect_ratio,
+ "magic_prompt_option": magic_prompt_option,
+ "style_type": style_type,
+ }
+ }
+
+ if seed is not None:
+ data["image_request"]["seed"] = seed
+
+ if negative_prompt:
+ data["image_request"]["negative_prompt"] = negative_prompt
+
+ if color_palette_name != "NONE":
+ data["image_request"]["color_palette"] = {"name": color_palette_name}
+
+ try:
+ response = requests.post(url, json=data, headers=headers)
+ response.raise_for_status()
+ return response.json()["data"][0]["url"]
+ except requests.exceptions.RequestException as e:
+ raise Exception(f"Failed to fetch image: {str(e)}")
+
+ def upscale_image(self, api_key: str, image_url: str):
+ url = "https://api.ideogram.ai/upscale"
+ headers = {
+ "Api-Key": api_key,
+ }
+
+ try:
+ # Step 1: Download the image from the provided URL
+ image_response = requests.get(image_url)
+ image_response.raise_for_status()
+
+ # Step 2: Send the downloaded image to the upscale API
+ files = {
+ "image_file": ("image.png", image_response.content, "image/png"),
+ }
+
+ response = requests.post(
+ url,
+ headers=headers,
+ data={
+ "image_request": "{}", # Empty JSON object
+ },
+ files=files,
+ )
+
+ response.raise_for_status()
+ return response.json()["data"][0]["url"]
+
+ except requests.exceptions.RequestException as e:
+ raise Exception(f"Failed to upscale image: {str(e)}")
diff --git a/autogpt_platform/backend/backend/blocks/iteration.py b/autogpt_platform/backend/backend/blocks/iteration.py
index f863521c83..247a92d3c7 100644
--- a/autogpt_platform/backend/backend/blocks/iteration.py
+++ b/autogpt_platform/backend/backend/blocks/iteration.py
@@ -1,37 +1,52 @@
-from typing import Any, List, Tuple
+from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
-class ListIteratorBlock(Block):
+class StepThroughItemsBlock(Block):
class Input(BlockSchema):
- items: List[Any] = SchemaField(
- description="The list of items to iterate over",
- placeholder="[1, 2, 3, 4, 5]",
+ items: list | dict = SchemaField(
+ description="The list or dictionary of items to iterate over",
+ placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
)
class Output(BlockSchema):
- item: Tuple[int, Any] = SchemaField(
- description="A tuple with the index and current item in the iteration"
+ item: Any = SchemaField(description="The current item in the iteration")
+ key: Any = SchemaField(
+ description="The key or index of the current item in the iteration",
)
def __init__(self):
super().__init__(
- id="f8e7d6c5-b4a3-2c1d-0e9f-8g7h6i5j4k3l",
- input_schema=ListIteratorBlock.Input,
- output_schema=ListIteratorBlock.Output,
- description="Iterates over a list of items and outputs each item with its index.",
+ id="f66a3543-28d3-4ab5-8945-9b336371e2ce",
+ input_schema=StepThroughItemsBlock.Input,
+ output_schema=StepThroughItemsBlock.Output,
categories={BlockCategory.LOGIC},
- test_input={"items": [1, "two", {"three": 3}, [4, 5]]},
+ description="Iterates over a list or dictionary and outputs each item.",
+ test_input={"items": [1, 2, 3, {"key1": "value1", "key2": "value2"}]},
test_output=[
- ("item", (0, 1)),
- ("item", (1, "two")),
- ("item", (2, {"three": 3})),
- ("item", (3, [4, 5])),
+ ("item", 1),
+ ("key", 0),
+ ("item", 2),
+ ("key", 1),
+ ("item", 3),
+ ("key", 2),
+ ("item", {"key1": "value1", "key2": "value2"}),
+ ("key", 3),
],
+ test_mock={},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- for index, item in enumerate(input_data.items):
- yield "item", (index, item)
+ items = input_data.items
+ if isinstance(items, dict):
+ # If items is a dictionary, iterate over its values
+ for item in items.values():
+ yield "item", item
+ yield "key", item
+ else:
+ # If items is a list, iterate over the list
+ for index, item in enumerate(items):
+ yield "item", item
+ yield "key", index
diff --git a/autogpt_platform/backend/backend/blocks/jina/_auth.py b/autogpt_platform/backend/backend/blocks/jina/_auth.py
new file mode 100644
index 0000000000..c39443da47
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/jina/_auth.py
@@ -0,0 +1,39 @@
+from typing import Literal
+
+from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
+from pydantic import SecretStr
+
+from backend.data.model import CredentialsField, CredentialsMetaInput
+
+JinaCredentials = APIKeyCredentials
+JinaCredentialsInput = CredentialsMetaInput[
+ Literal["jina"],
+ Literal["api_key"],
+]
+
+
+def JinaCredentialsField() -> JinaCredentialsInput:
+ """
+ Creates a Jina credentials input on a block.
+
+ """
+ return CredentialsField(
+ provider="jina",
+ supported_credential_types={"api_key"},
+ description="The Jina integration can be used with an API Key.",
+ )
+
+
+TEST_CREDENTIALS = APIKeyCredentials(
+ id="01234567-89ab-cdef-0123-456789abcdef",
+ provider="jina",
+ api_key=SecretStr("mock-jina-api-key"),
+ title="Mock Jina API key",
+ expires_at=None,
+)
+TEST_CREDENTIALS_INPUT = {
+ "provider": TEST_CREDENTIALS.provider,
+ "id": TEST_CREDENTIALS.id,
+ "type": TEST_CREDENTIALS.type,
+ "title": TEST_CREDENTIALS.type,
+}
diff --git a/autogpt_platform/backend/backend/blocks/jina/chunking.py b/autogpt_platform/backend/backend/blocks/jina/chunking.py
new file mode 100644
index 0000000000..f3b0c4a34b
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/jina/chunking.py
@@ -0,0 +1,69 @@
+import requests
+
+from backend.blocks.jina._auth import (
+ JinaCredentials,
+ JinaCredentialsField,
+ JinaCredentialsInput,
+)
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
+
+
+class JinaChunkingBlock(Block):
+ class Input(BlockSchema):
+ texts: list = SchemaField(description="List of texts to chunk")
+
+ credentials: JinaCredentialsInput = JinaCredentialsField()
+ max_chunk_length: int = SchemaField(
+ description="Maximum length of each chunk", default=1000
+ )
+ return_tokens: bool = SchemaField(
+ description="Whether to return token information", default=False
+ )
+
+ class Output(BlockSchema):
+ chunks: list = SchemaField(description="List of chunked texts")
+ tokens: list = SchemaField(
+ description="List of token information for each chunk", optional=True
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="806fb15e-830f-4796-8692-557d300ff43c",
+ description="Chunks texts using Jina AI's segmentation service",
+ categories={BlockCategory.AI, BlockCategory.TEXT},
+ input_schema=JinaChunkingBlock.Input,
+ output_schema=JinaChunkingBlock.Output,
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: JinaCredentials, **kwargs
+ ) -> BlockOutput:
+ url = "https://segment.jina.ai/"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
+ }
+
+ all_chunks = []
+ all_tokens = []
+
+ for text in input_data.texts:
+ data = {
+ "content": text,
+ "return_tokens": str(input_data.return_tokens).lower(),
+ "return_chunks": "true",
+ "max_chunk_length": str(input_data.max_chunk_length),
+ }
+
+ response = requests.post(url, headers=headers, json=data)
+ response.raise_for_status()
+ result = response.json()
+
+ all_chunks.extend(result.get("chunks", []))
+ if input_data.return_tokens:
+ all_tokens.extend(result.get("tokens", []))
+
+ yield "chunks", all_chunks
+ if input_data.return_tokens:
+ yield "tokens", all_tokens
diff --git a/autogpt_platform/backend/backend/blocks/jina/embeddings.py b/autogpt_platform/backend/backend/blocks/jina/embeddings.py
new file mode 100644
index 0000000000..a33acfec9e
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/jina/embeddings.py
@@ -0,0 +1,44 @@
+import requests
+
+from backend.blocks.jina._auth import (
+ JinaCredentials,
+ JinaCredentialsField,
+ JinaCredentialsInput,
+)
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
+
+
+class JinaEmbeddingBlock(Block):
+ class Input(BlockSchema):
+ texts: list = SchemaField(description="List of texts to embed")
+ credentials: JinaCredentialsInput = JinaCredentialsField()
+ model: str = SchemaField(
+ description="Jina embedding model to use",
+ default="jina-embeddings-v2-base-en",
+ )
+
+ class Output(BlockSchema):
+ embeddings: list = SchemaField(description="List of embeddings")
+
+ def __init__(self):
+ super().__init__(
+ id="7c56b3ab-62e7-43a2-a2dc-4ec4245660b6",
+ description="Generates embeddings using Jina AI",
+ categories={BlockCategory.AI},
+ input_schema=JinaEmbeddingBlock.Input,
+ output_schema=JinaEmbeddingBlock.Output,
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: JinaCredentials, **kwargs
+ ) -> BlockOutput:
+ url = "https://api.jina.ai/v1/embeddings"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
+ }
+ data = {"input": input_data.texts, "model": input_data.model}
+ response = requests.post(url, headers=headers, json=data)
+ embeddings = [e["embedding"] for e in response.json()["data"]]
+ yield "embeddings", embeddings
diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py
index bafbaba0fb..1366429a54 100644
--- a/autogpt_platform/backend/backend/blocks/llm.py
+++ b/autogpt_platform/backend/backend/blocks/llm.py
@@ -1,7 +1,12 @@
+import ast
import logging
-from enum import Enum
+from enum import Enum, EnumMeta
from json import JSONDecodeError
-from typing import Any, List, NamedTuple
+from types import MappingProxyType
+from typing import TYPE_CHECKING, Any, List, NamedTuple
+
+if TYPE_CHECKING:
+ from enum import _EnumMemberT
import anthropic
import ollama
@@ -11,6 +16,7 @@ from groq import Groq
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import BlockSecret, SchemaField, SecretField
from backend.util import json
+from backend.util.settings import BehaveAs, Settings
logger = logging.getLogger(__name__)
@@ -28,7 +34,26 @@ class ModelMetadata(NamedTuple):
cost_factor: int
-class LlmModel(str, Enum):
+class LlmModelMeta(EnumMeta):
+ @property
+ def __members__(
+ self: type["_EnumMemberT"],
+ ) -> MappingProxyType[str, "_EnumMemberT"]:
+ if Settings().config.behave_as == BehaveAs.LOCAL:
+ members = super().__members__
+ return members
+ else:
+ removed_providers = ["ollama"]
+ existing_members = super().__members__
+ members = {
+ name: member
+ for name, member in existing_members.items()
+ if LlmModel[name].provider not in removed_providers
+ }
+ return MappingProxyType(members)
+
+
+class LlmModel(str, Enum, metaclass=LlmModelMeta):
# OpenAI models
O1_PREVIEW = "o1-preview"
O1_MINI = "o1-mini"
@@ -37,7 +62,7 @@ class LlmModel(str, Enum):
GPT4_TURBO = "gpt-4-turbo"
GPT3_5_TURBO = "gpt-3.5-turbo"
# Anthropic models
- CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620"
+ CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
# Groq models
LLAMA3_8B = "llama3-8b-8192"
@@ -57,27 +82,39 @@ class LlmModel(str, Enum):
def metadata(self) -> ModelMetadata:
return MODEL_METADATA[self]
+ @property
+ def provider(self) -> str:
+ return self.metadata.provider
+
+ @property
+ def context_window(self) -> int:
+ return self.metadata.context_window
+
+ @property
+ def cost_factor(self) -> int:
+ return self.metadata.cost_factor
+
MODEL_METADATA = {
- LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000, cost_factor=60),
- LlmModel.O1_MINI: ModelMetadata("openai", 62000, cost_factor=30),
- LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=10),
- LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=12),
- LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=11),
- LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=8),
- LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=14),
- LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=13),
- LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=6),
- LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=9),
- LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=7),
- LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=6),
- LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=7),
- LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=10),
+ LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000, cost_factor=16),
+ LlmModel.O1_MINI: ModelMetadata("openai", 62000, cost_factor=4),
+ LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=1),
+ LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=3),
+ LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=10),
+ LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=1),
+ LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=4),
+ LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=1),
+ LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=1),
+ LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=1),
+ LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=1),
+ LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=1),
+ LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=1),
+ LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=1),
# Limited to 16k during preview
- LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=15),
- LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=13),
- LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=7),
- LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=11),
+ LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=1),
+ LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=1),
+ LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=1),
+ LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=1),
}
for model in LlmModel:
@@ -85,9 +122,23 @@ for model in LlmModel:
raise ValueError(f"Missing MODEL_METADATA metadata for model: {model}")
+class MessageRole(str, Enum):
+ SYSTEM = "system"
+ USER = "user"
+ ASSISTANT = "assistant"
+
+
+class Message(BlockSchema):
+ role: MessageRole
+ content: str
+
+
class AIStructuredResponseGeneratorBlock(Block):
class Input(BlockSchema):
- prompt: str
+ prompt: str = SchemaField(
+ description="The prompt to send to the language model.",
+ placeholder="Enter your prompt here...",
+ )
expected_format: dict[str, str] = SchemaField(
description="Expected format of the response. If provided, the response will be validated against this format. "
"The keys should be the expected fields in the response, and the values should be the description of the field.",
@@ -99,15 +150,34 @@ class AIStructuredResponseGeneratorBlock(Block):
advanced=False,
)
api_key: BlockSecret = SecretField(value="")
- sys_prompt: str = ""
- retry: int = 3
+ sys_prompt: str = SchemaField(
+ title="System Prompt",
+ default="",
+ description="The system prompt to provide additional context to the model.",
+ )
+ conversation_history: list[Message] = SchemaField(
+ default=[],
+ description="The conversation history to provide context for the prompt.",
+ )
+ retry: int = SchemaField(
+ title="Retry Count",
+ default=3,
+ description="Number of times to retry the LLM call if the response does not match the expected format.",
+ )
prompt_values: dict[str, str] = SchemaField(
advanced=False, default={}, description="Values used to fill in the prompt."
)
+ max_tokens: int | None = SchemaField(
+ advanced=True,
+ default=None,
+ description="The maximum number of tokens to generate in the chat completion.",
+ )
class Output(BlockSchema):
- response: dict[str, Any]
- error: str
+ response: dict[str, Any] = SchemaField(
+ description="The response object generated by the language model."
+ )
+ error: str = SchemaField(description="Error message if the API call failed.")
def __init__(self):
super().__init__(
@@ -127,26 +197,47 @@ class AIStructuredResponseGeneratorBlock(Block):
},
test_output=("response", {"key1": "key1Value", "key2": "key2Value"}),
test_mock={
- "llm_call": lambda *args, **kwargs: json.dumps(
- {
- "key1": "key1Value",
- "key2": "key2Value",
- }
+ "llm_call": lambda *args, **kwargs: (
+ json.dumps(
+ {
+ "key1": "key1Value",
+ "key2": "key2Value",
+ }
+ ),
+ 0,
+ 0,
)
},
)
@staticmethod
def llm_call(
- api_key: str, model: LlmModel, prompt: list[dict], json_format: bool
- ) -> str:
- provider = model.metadata.provider
+ api_key: str,
+ llm_model: LlmModel,
+ prompt: list[dict],
+ json_format: bool,
+ max_tokens: int | None = None,
+ ) -> tuple[str, int, int]:
+ """
+ Args:
+ api_key: API key for the LLM provider.
+ llm_model: The LLM model to use.
+ prompt: The prompt to send to the LLM.
+ json_format: Whether the response should be in JSON format.
+ max_tokens: The maximum number of tokens to generate in the chat completion.
+
+ Returns:
+ The response from the LLM.
+ The number of tokens used in the prompt.
+ The number of tokens used in the completion.
+ """
+ provider = llm_model.metadata.provider
if provider == "openai":
openai.api_key = api_key
response_format = None
- if model in [LlmModel.O1_MINI, LlmModel.O1_PREVIEW]:
+ if llm_model in [LlmModel.O1_MINI, LlmModel.O1_PREVIEW]:
sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
prompt = [
@@ -157,11 +248,17 @@ class AIStructuredResponseGeneratorBlock(Block):
response_format = {"type": "json_object"}
response = openai.chat.completions.create(
- model=model.value,
+ model=llm_model.value,
messages=prompt, # type: ignore
response_format=response_format, # type: ignore
+ max_completion_tokens=max_tokens,
+ )
+
+ return (
+ response.choices[0].message.content or "",
+ response.usage.prompt_tokens if response.usage else 0,
+ response.usage.completion_tokens if response.usage else 0,
)
- return response.choices[0].message.content or ""
elif provider == "anthropic":
system_messages = [p["content"] for p in prompt if p["role"] == "system"]
sysprompt = " ".join(system_messages)
@@ -179,13 +276,18 @@ class AIStructuredResponseGeneratorBlock(Block):
client = anthropic.Anthropic(api_key=api_key)
try:
- response = client.messages.create(
- model=model.value,
- max_tokens=4096,
+ resp = client.messages.create(
+ model=llm_model.value,
system=sysprompt,
messages=messages,
+ max_tokens=max_tokens or 8192,
+ )
+
+ return (
+ resp.content[0].text if resp.content else "",
+ resp.usage.input_tokens,
+ resp.usage.output_tokens,
)
- return response.content[0].text if response.content else ""
except anthropic.APIError as e:
error_message = f"Anthropic API error: {str(e)}"
logger.error(error_message)
@@ -194,22 +296,35 @@ class AIStructuredResponseGeneratorBlock(Block):
client = Groq(api_key=api_key)
response_format = {"type": "json_object"} if json_format else None
response = client.chat.completions.create(
- model=model.value,
+ model=llm_model.value,
messages=prompt, # type: ignore
response_format=response_format, # type: ignore
+ max_tokens=max_tokens,
+ )
+ return (
+ response.choices[0].message.content or "",
+ response.usage.prompt_tokens if response.usage else 0,
+ response.usage.completion_tokens if response.usage else 0,
)
- return response.choices[0].message.content or ""
elif provider == "ollama":
+ sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
+ usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
response = ollama.generate(
- model=model.value,
- prompt=prompt[0]["content"],
+ model=llm_model.value,
+ prompt=f"{sys_messages}\n\n{usr_messages}",
+ stream=False,
+ )
+ return (
+ response.get("response") or "",
+ response.get("prompt_eval_count") or 0,
+ response.get("eval_count") or 0,
)
- return response["response"]
else:
raise ValueError(f"Unsupported LLM provider: {provider}")
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- prompt = []
+ logger.debug(f"Calling LLM with input data: {input_data}")
+ prompt = [p.model_dump() for p in input_data.conversation_history]
def trim_prompt(s: str) -> str:
lines = s.strip().split("\n")
@@ -238,7 +353,8 @@ class AIStructuredResponseGeneratorBlock(Block):
)
prompt.append({"role": "system", "content": sys_prompt})
- prompt.append({"role": "user", "content": input_data.prompt})
+ if input_data.prompt:
+ prompt.append({"role": "user", "content": input_data.prompt})
def parse_response(resp: str) -> tuple[dict[str, Any], str | None]:
try:
@@ -254,19 +370,26 @@ class AIStructuredResponseGeneratorBlock(Block):
logger.info(f"LLM request: {prompt}")
retry_prompt = ""
- model = input_data.model
+ llm_model = input_data.model
api_key = (
input_data.api_key.get_secret_value()
- or LlmApiKeys[model.metadata.provider].get_secret_value()
+ or LlmApiKeys[llm_model.metadata.provider].get_secret_value()
)
for retry_count in range(input_data.retry):
try:
- response_text = self.llm_call(
+ response_text, input_token, output_token = self.llm_call(
api_key=api_key,
- model=model,
+ llm_model=llm_model,
prompt=prompt,
json_format=bool(input_data.expected_format),
+ max_tokens=input_data.max_tokens,
+ )
+ self.merge_stats(
+ {
+ "input_token_count": input_token,
+ "output_token_count": output_token,
+ }
)
logger.info(f"LLM attempt-{retry_count} response: {response_text}")
@@ -303,15 +426,25 @@ class AIStructuredResponseGeneratorBlock(Block):
)
prompt.append({"role": "user", "content": retry_prompt})
except Exception as e:
- logger.error(f"Error calling LLM: {e}")
+ logger.exception(f"Error calling LLM: {e}")
retry_prompt = f"Error calling LLM: {e}"
+ finally:
+ self.merge_stats(
+ {
+ "llm_call_count": retry_count + 1,
+ "llm_retry_count": retry_count,
+ }
+ )
- yield "error", retry_prompt
+ raise RuntimeError(retry_prompt)
class AITextGeneratorBlock(Block):
class Input(BlockSchema):
- prompt: str
+ prompt: str = SchemaField(
+ description="The prompt to send to the language model.",
+ placeholder="Enter your prompt here...",
+ )
model: LlmModel = SchemaField(
title="LLM Model",
default=LlmModel.GPT4_TURBO,
@@ -319,15 +452,30 @@ class AITextGeneratorBlock(Block):
advanced=False,
)
api_key: BlockSecret = SecretField(value="")
- sys_prompt: str = ""
- retry: int = 3
+ sys_prompt: str = SchemaField(
+ title="System Prompt",
+ default="",
+ description="The system prompt to provide additional context to the model.",
+ )
+ retry: int = SchemaField(
+ title="Retry Count",
+ default=3,
+ description="Number of times to retry the LLM call if the response does not match the expected format.",
+ )
prompt_values: dict[str, str] = SchemaField(
advanced=False, default={}, description="Values used to fill in the prompt."
)
+ max_tokens: int | None = SchemaField(
+ advanced=True,
+ default=None,
+ description="The maximum number of tokens to generate in the chat completion.",
+ )
class Output(BlockSchema):
- response: str
- error: str
+ response: str = SchemaField(
+ description="The response generated by the language model."
+ )
+ error: str = SchemaField(description="Error message if the API call failed.")
def __init__(self):
super().__init__(
@@ -341,47 +489,70 @@ class AITextGeneratorBlock(Block):
test_mock={"llm_call": lambda *args, **kwargs: "Response text"},
)
- @staticmethod
- def llm_call(input_data: AIStructuredResponseGeneratorBlock.Input) -> str:
- object_block = AIStructuredResponseGeneratorBlock()
- for output_name, output_data in object_block.run(input_data):
- if output_name == "response":
- return output_data["response"]
- else:
- raise RuntimeError(output_data)
- raise ValueError("Failed to get a response from the LLM.")
+ def llm_call(self, input_data: AIStructuredResponseGeneratorBlock.Input) -> str:
+ block = AIStructuredResponseGeneratorBlock()
+ response = block.run_once(input_data, "response")
+ self.merge_stats(block.execution_stats)
+ return response["response"]
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- object_input_data = AIStructuredResponseGeneratorBlock.Input(
- **{attr: getattr(input_data, attr) for attr in input_data.model_fields},
- expected_format={},
- )
- yield "response", self.llm_call(object_input_data)
- except Exception as e:
- yield "error", str(e)
+ object_input_data = AIStructuredResponseGeneratorBlock.Input(
+ **{attr: getattr(input_data, attr) for attr in input_data.model_fields},
+ expected_format={},
+ )
+ yield "response", self.llm_call(object_input_data)
+
+
+class SummaryStyle(Enum):
+ CONCISE = "concise"
+ DETAILED = "detailed"
+ BULLET_POINTS = "bullet points"
+ NUMBERED_LIST = "numbered list"
class AITextSummarizerBlock(Block):
class Input(BlockSchema):
- text: str
+ text: str = SchemaField(
+ description="The text to summarize.",
+ placeholder="Enter the text to summarize here...",
+ )
model: LlmModel = SchemaField(
title="LLM Model",
default=LlmModel.GPT4_TURBO,
description="The language model to use for summarizing the text.",
)
+ focus: str = SchemaField(
+ title="Focus",
+ default="general information",
+ description="The topic to focus on in the summary",
+ )
+ style: SummaryStyle = SchemaField(
+ title="Summary Style",
+ default=SummaryStyle.CONCISE,
+ description="The style of the summary to generate.",
+ )
api_key: BlockSecret = SecretField(value="")
# TODO: Make this dynamic
- max_tokens: int = 4000 # Adjust based on the model's context window
- chunk_overlap: int = 100 # Overlap between chunks to maintain context
+ max_tokens: int = SchemaField(
+ title="Max Tokens",
+ default=4096,
+ description="The maximum number of tokens to generate in the chat completion.",
+ ge=1,
+ )
+ chunk_overlap: int = SchemaField(
+ title="Chunk Overlap",
+ default=100,
+ description="The number of overlapping tokens between chunks to maintain context.",
+ ge=0,
+ )
class Output(BlockSchema):
- summary: str
- error: str
+ summary: str = SchemaField(description="The final summary of the text.")
+ error: str = SchemaField(description="Error message if the API call failed.")
def __init__(self):
super().__init__(
- id="c3d4e5f6-7g8h-9i0j-1k2l-m3n4o5p6q7r8",
+ id="a0a69be1-4528-491c-a85a-a4ab6873e3f0",
description="Utilize a Large Language Model (LLM) to summarize a long text.",
categories={BlockCategory.AI, BlockCategory.TEXT},
input_schema=AITextSummarizerBlock.Input,
@@ -398,11 +569,8 @@ class AITextSummarizerBlock(Block):
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- for output in self._run(input_data):
- yield output
- except Exception as e:
- yield "error", str(e)
+ for output in self._run(input_data):
+ yield output
def _run(self, input_data: Input) -> BlockOutput:
chunks = self._split_text(
@@ -429,18 +597,14 @@ class AITextSummarizerBlock(Block):
return chunks
- @staticmethod
- def llm_call(
- input_data: AIStructuredResponseGeneratorBlock.Input,
- ) -> dict[str, str]:
- llm_block = AIStructuredResponseGeneratorBlock()
- for output_name, output_data in llm_block.run(input_data):
- if output_name == "response":
- return output_data
- raise ValueError("Failed to get a response from the LLM.")
+ def llm_call(self, input_data: AIStructuredResponseGeneratorBlock.Input) -> dict:
+ block = AIStructuredResponseGeneratorBlock()
+ response = block.run_once(input_data, "response")
+ self.merge_stats(block.execution_stats)
+ return response
def _summarize_chunk(self, chunk: str, input_data: Input) -> str:
- prompt = f"Summarize the following text concisely:\n\n{chunk}"
+ prompt = f"Summarize the following text in a {input_data.style} form. Focus your summary on the topic of `{input_data.focus}` if present, otherwise just provide a general summary:\n\n```{chunk}```"
llm_response = self.llm_call(
AIStructuredResponseGeneratorBlock.Input(
@@ -454,13 +618,10 @@ class AITextSummarizerBlock(Block):
return llm_response["summary"]
def _combine_summaries(self, summaries: list[str], input_data: Input) -> str:
- combined_text = " ".join(summaries)
+ combined_text = "\n\n".join(summaries)
if len(combined_text.split()) <= input_data.max_tokens:
- prompt = (
- "Provide a final, concise summary of the following summaries:\n\n"
- + combined_text
- )
+ prompt = f"Provide a final summary of the following section summaries in a {input_data.style} form, focus your summary on the topic of `{input_data.focus}` if present:\n\n ```{combined_text}```\n\n Just respond with the final_summary in the format specified."
llm_response = self.llm_call(
AIStructuredResponseGeneratorBlock.Input(
@@ -489,17 +650,6 @@ class AITextSummarizerBlock(Block):
] # Get the first yielded value
-class MessageRole(str, Enum):
- SYSTEM = "system"
- USER = "user"
- ASSISTANT = "assistant"
-
-
-class Message(BlockSchema):
- role: MessageRole
- content: str
-
-
class AIConversationBlock(Block):
class Input(BlockSchema):
messages: List[Message] = SchemaField(
@@ -514,9 +664,9 @@ class AIConversationBlock(Block):
value="", description="API key for the chosen language model provider."
)
max_tokens: int | None = SchemaField(
+ advanced=True,
default=None,
description="The maximum number of tokens to generate in the chat completion.",
- ge=1,
)
class Output(BlockSchema):
@@ -527,7 +677,7 @@ class AIConversationBlock(Block):
def __init__(self):
super().__init__(
- id="c3d4e5f6-g7h8-i9j0-k1l2-m3n4o5p6q7r8",
+ id="32a87eab-381e-4dd4-bdb8-4c47151be35a",
description="Advanced LLM call that takes a list of messages and sends them to the language model.",
categories={BlockCategory.AI},
input_schema=AIConversationBlock.Input,
@@ -554,65 +704,253 @@ class AIConversationBlock(Block):
},
)
- @staticmethod
- def llm_call(
- api_key: str,
- model: LlmModel,
- messages: List[dict[str, str]],
- max_tokens: int | None = None,
- ) -> str:
- provider = model.metadata.provider
-
- if provider == "openai":
- openai.api_key = api_key
- response = openai.chat.completions.create(
- model=model.value,
- messages=messages, # type: ignore
- max_tokens=max_tokens,
- )
- return response.choices[0].message.content or ""
- elif provider == "anthropic":
- client = anthropic.Anthropic(api_key=api_key)
- response = client.messages.create(
- model=model.value,
- max_tokens=max_tokens or 4096,
- messages=messages, # type: ignore
- )
- return response.content[0].text if response.content else ""
- elif provider == "groq":
- client = Groq(api_key=api_key)
- response = client.chat.completions.create(
- model=model.value,
- messages=messages, # type: ignore
- max_tokens=max_tokens,
- )
- return response.choices[0].message.content or ""
- elif provider == "ollama":
- response = ollama.chat(
- model=model.value,
- messages=messages, # type: ignore
- stream=False, # type: ignore
- )
- return response["message"]["content"]
- else:
- raise ValueError(f"Unsupported LLM provider: {provider}")
+ def llm_call(self, input_data: AIStructuredResponseGeneratorBlock.Input) -> str:
+ block = AIStructuredResponseGeneratorBlock()
+ response = block.run_once(input_data, "response")
+ self.merge_stats(block.execution_stats)
+ return response["response"]
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- api_key = (
- input_data.api_key.get_secret_value()
- or LlmApiKeys[input_data.model.metadata.provider].get_secret_value()
- )
-
- messages = [message.model_dump() for message in input_data.messages]
-
- response = self.llm_call(
- api_key=api_key,
+ response = self.llm_call(
+ AIStructuredResponseGeneratorBlock.Input(
+ prompt="",
+ api_key=input_data.api_key,
model=input_data.model,
- messages=messages,
+ conversation_history=input_data.messages,
max_tokens=input_data.max_tokens,
+ expected_format={},
)
+ )
- yield "response", response
- except Exception as e:
- yield "error", f"Error calling LLM: {str(e)}"
+ yield "response", response
+
+
+class AIListGeneratorBlock(Block):
+ class Input(BlockSchema):
+ focus: str | None = SchemaField(
+ description="The focus of the list to generate.",
+ placeholder="The top 5 most interesting news stories in the data.",
+ default=None,
+ advanced=False,
+ )
+ source_data: str | None = SchemaField(
+ description="The data to generate the list from.",
+ placeholder="News Today: Humans land on Mars: Today humans landed on mars. -- AI wins Nobel Prize: AI wins Nobel Prize for solving world hunger. -- New AI Model: A new AI model has been released.",
+ default=None,
+ advanced=False,
+ )
+ model: LlmModel = SchemaField(
+ title="LLM Model",
+ default=LlmModel.GPT4_TURBO,
+ description="The language model to use for generating the list.",
+ advanced=True,
+ )
+ api_key: BlockSecret = SecretField(value="")
+ max_retries: int = SchemaField(
+ default=3,
+ description="Maximum number of retries for generating a valid list.",
+ ge=1,
+ le=5,
+ )
+ max_tokens: int | None = SchemaField(
+ advanced=True,
+ default=None,
+ description="The maximum number of tokens to generate in the chat completion.",
+ )
+
+ class Output(BlockSchema):
+ generated_list: List[str] = SchemaField(description="The generated list.")
+ list_item: str = SchemaField(
+ description="Each individual item in the list.",
+ )
+ error: str = SchemaField(
+ description="Error message if the list generation failed."
+ )
+
+ def __init__(self):
+ super().__init__(
+ id="9c0b0450-d199-458b-a731-072189dd6593",
+ description="Generate a Python list based on the given prompt using a Large Language Model (LLM).",
+ categories={BlockCategory.AI, BlockCategory.TEXT},
+ input_schema=AIListGeneratorBlock.Input,
+ output_schema=AIListGeneratorBlock.Output,
+ test_input={
+ "focus": "planets",
+ "source_data": (
+ "Zylora Prime is a glowing jungle world with bioluminescent plants, "
+ "while Kharon-9 is a harsh desert planet with underground cities. "
+ "Vortexia's constant storms power floating cities, and Oceara is a water-covered world home to "
+ "intelligent marine life. On icy Draknos, ancient ruins lie buried beneath its frozen landscape, "
+ "drawing explorers to uncover its mysteries. Each planet showcases the limitless possibilities of "
+ "fictional worlds."
+ ),
+ "model": LlmModel.GPT4_TURBO,
+ "api_key": "test_api_key",
+ "max_retries": 3,
+ },
+ test_output=[
+ (
+ "generated_list",
+ ["Zylora Prime", "Kharon-9", "Vortexia", "Oceara", "Draknos"],
+ ),
+ ("list_item", "Zylora Prime"),
+ ("list_item", "Kharon-9"),
+ ("list_item", "Vortexia"),
+ ("list_item", "Oceara"),
+ ("list_item", "Draknos"),
+ ],
+ test_mock={
+ "llm_call": lambda input_data: {
+ "response": "['Zylora Prime', 'Kharon-9', 'Vortexia', 'Oceara', 'Draknos']"
+ },
+ },
+ )
+
+ @staticmethod
+ def llm_call(
+ input_data: AIStructuredResponseGeneratorBlock.Input,
+ ) -> dict[str, str]:
+ llm_block = AIStructuredResponseGeneratorBlock()
+ response = llm_block.run_once(input_data, "response")
+ return response
+
+ @staticmethod
+ def string_to_list(string):
+ """
+ Converts a string representation of a list into an actual Python list object.
+ """
+ logger.debug(f"Converting string to list. Input string: {string}")
+ try:
+ # Use ast.literal_eval to safely evaluate the string
+ python_list = ast.literal_eval(string)
+ if isinstance(python_list, list):
+ logger.debug(f"Successfully converted string to list: {python_list}")
+ return python_list
+ else:
+ logger.error(f"The provided string '{string}' is not a valid list")
+ raise ValueError(f"The provided string '{string}' is not a valid list.")
+ except (SyntaxError, ValueError) as e:
+ logger.error(f"Failed to convert string to list: {e}")
+ raise ValueError("Invalid list format. Could not convert to list.")
+
+ def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ logger.debug(f"Starting AIListGeneratorBlock.run with input data: {input_data}")
+
+ # Check for API key
+ api_key_check = (
+ input_data.api_key.get_secret_value()
+ or LlmApiKeys[input_data.model.metadata.provider].get_secret_value()
+ )
+ if not api_key_check:
+ raise ValueError("No LLM API key provided.")
+
+ # Prepare the system prompt
+ sys_prompt = """You are a Python list generator. Your task is to generate a Python list based on the user's prompt.
+ |Respond ONLY with a valid python list.
+ |The list can contain strings, numbers, or nested lists as appropriate.
+ |Do not include any explanations or additional text.
+
+ |Valid Example string formats:
+
+ |Example 1:
+ |```
+ |['1', '2', '3', '4']
+ |```
+
+ |Example 2:
+ |```
+ |[['1', '2'], ['3', '4'], ['5', '6']]
+ |```
+
+ |Example 3:
+ |```
+ |['1', ['2', '3'], ['4', ['5', '6']]]
+ |```
+
+ |Example 4:
+ |```
+ |['a', 'b', 'c']
+ |```
+
+ |Example 5:
+ |```
+ |['1', '2.5', 'string', 'True', ['False', 'None']]
+ |```
+
+ |Do not include any explanations or additional text, just respond with the list in the format specified above.
+ """
+ # If a focus is provided, add it to the prompt
+ if input_data.focus:
+ prompt = f"Generate a list with the following focus:\n\n\n{input_data.focus}"
+ else:
+ # If there's source data
+ if input_data.source_data:
+ prompt = "Extract the main focus of the source data to a list.\ni.e if the source data is a news website, the focus would be the news stories rather than the social links in the footer."
+ else:
+ # No focus or source data provided, generat a random list
+ prompt = "Generate a random list."
+
+ # If the source data is provided, add it to the prompt
+ if input_data.source_data:
+ prompt += f"\n\nUse the following source data to generate the list from:\n\n\n\n{input_data.source_data}\n\nDo not invent fictional data that is not present in the source data."
+ # Else, tell the LLM to synthesize the data
+ else:
+ prompt += "\n\nInvent the data to generate the list from."
+
+ for attempt in range(input_data.max_retries):
+ try:
+ logger.debug("Calling LLM")
+ llm_response = self.llm_call(
+ AIStructuredResponseGeneratorBlock.Input(
+ sys_prompt=sys_prompt,
+ prompt=prompt,
+ api_key=input_data.api_key,
+ model=input_data.model,
+ expected_format={}, # Do not use structured response
+ )
+ )
+
+ logger.debug(f"LLM response: {llm_response}")
+
+ # Extract Response string
+ response_string = llm_response["response"]
+ logger.debug(f"Response string: {response_string}")
+
+ # Convert the string to a Python list
+ logger.debug("Converting string to Python list")
+ parsed_list = self.string_to_list(response_string)
+ logger.debug(f"Parsed list: {parsed_list}")
+
+ # If we reach here, we have a valid Python list
+ logger.debug("Successfully generated a valid Python list")
+ yield "generated_list", parsed_list
+
+ # Yield each item in the list
+ for item in parsed_list:
+ yield "list_item", item
+ return
+
+ except Exception as e:
+ logger.error(f"Error in attempt {attempt + 1}: {str(e)}")
+ if attempt == input_data.max_retries - 1:
+ logger.error(
+ f"Failed to generate a valid Python list after {input_data.max_retries} attempts"
+ )
+ raise RuntimeError(
+ f"Failed to generate a valid Python list after {input_data.max_retries} attempts. Last error: {str(e)}"
+ )
+ else:
+ # Add a retry prompt
+ logger.debug("Preparing retry prompt")
+ prompt = f"""
+ The previous attempt failed due to `{e}`
+ Generate a valid Python list based on the original prompt.
+ Remember to respond ONLY with a valid Python list as per the format specified earlier.
+ Original prompt:
+ ```{prompt}```
+
+ Respond only with the list in the format specified with no commentary or apologies.
+ """
+ logger.debug(f"Retry prompt: {prompt}")
+
+ logger.debug("AIListGeneratorBlock.run completed")
diff --git a/autogpt_platform/backend/backend/blocks/medium.py b/autogpt_platform/backend/backend/blocks/medium.py
index 9ca9b41bf4..1d85e09780 100644
--- a/autogpt_platform/backend/backend/blocks/medium.py
+++ b/autogpt_platform/backend/backend/blocks/medium.py
@@ -1,3 +1,4 @@
+from enum import Enum
from typing import List
import requests
@@ -6,6 +7,12 @@ from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import BlockSecret, SchemaField, SecretField
+class PublishToMediumStatus(str, Enum):
+ PUBLIC = "public"
+ DRAFT = "draft"
+ UNLISTED = "unlisted"
+
+
class PublishToMediumBlock(Block):
class Input(BlockSchema):
author_id: BlockSecret = SecretField(
@@ -34,9 +41,9 @@ class PublishToMediumBlock(Block):
description="The original home of this content, if it was originally published elsewhere",
placeholder="https://yourblog.com/original-post",
)
- publish_status: str = SchemaField(
- description="The publish status: 'public', 'draft', or 'unlisted'",
- placeholder="public",
+ publish_status: PublishToMediumStatus = SchemaField(
+ description="The publish status",
+ placeholder=PublishToMediumStatus.DRAFT,
)
license: str = SchemaField(
default="all-rights-reserved",
@@ -79,7 +86,7 @@ class PublishToMediumBlock(Block):
"tags": ["test", "automation"],
"license": "all-rights-reserved",
"notify_followers": False,
- "publish_status": "draft",
+ "publish_status": PublishToMediumStatus.DRAFT.value,
"api_key": "your_test_api_key",
},
test_output=[
@@ -138,31 +145,25 @@ class PublishToMediumBlock(Block):
return response.json()
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- response = self.create_post(
- input_data.api_key.get_secret_value(),
- input_data.author_id.get_secret_value(),
- input_data.title,
- input_data.content,
- input_data.content_format,
- input_data.tags,
- input_data.canonical_url,
- input_data.publish_status,
- input_data.license,
- input_data.notify_followers,
+ response = self.create_post(
+ input_data.api_key.get_secret_value(),
+ input_data.author_id.get_secret_value(),
+ input_data.title,
+ input_data.content,
+ input_data.content_format,
+ input_data.tags,
+ input_data.canonical_url,
+ input_data.publish_status,
+ input_data.license,
+ input_data.notify_followers,
+ )
+
+ if "data" in response:
+ yield "post_id", response["data"]["id"]
+ yield "post_url", response["data"]["url"]
+ yield "published_at", response["data"]["publishedAt"]
+ else:
+ error_message = response.get("errors", [{}])[0].get(
+ "message", "Unknown error occurred"
)
-
- if "data" in response:
- yield "post_id", response["data"]["id"]
- yield "post_url", response["data"]["url"]
- yield "published_at", response["data"]["publishedAt"]
- else:
- error_message = response.get("errors", [{}])[0].get(
- "message", "Unknown error occurred"
- )
- yield "error", f"Failed to create Medium post: {error_message}"
-
- except requests.RequestException as e:
- yield "error", f"Network error occurred while creating Medium post: {str(e)}"
- except Exception as e:
- yield "error", f"Error occurred while creating Medium post: {str(e)}"
+ raise RuntimeError(f"Failed to create Medium post: {error_message}")
diff --git a/autogpt_platform/backend/backend/blocks/pinecone.py b/autogpt_platform/backend/backend/blocks/pinecone.py
new file mode 100644
index 0000000000..91364fce92
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/pinecone.py
@@ -0,0 +1,131 @@
+from typing import Literal
+
+from autogpt_libs.supabase_integration_credentials_store import APIKeyCredentials
+from pinecone import Pinecone, ServerlessSpec
+
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
+
+PineconeCredentials = APIKeyCredentials
+PineconeCredentialsInput = CredentialsMetaInput[
+ Literal["pinecone"],
+ Literal["api_key"],
+]
+
+
+def PineconeCredentialsField() -> PineconeCredentialsInput:
+ """
+ Creates a Pinecone credentials input on a block.
+
+ """
+ return CredentialsField(
+ provider="pinecone",
+ supported_credential_types={"api_key"},
+ description="The Pinecone integration can be used with an API Key.",
+ )
+
+
+class PineconeInitBlock(Block):
+ class Input(BlockSchema):
+ credentials: PineconeCredentialsInput = PineconeCredentialsField()
+ index_name: str = SchemaField(description="Name of the Pinecone index")
+ dimension: int = SchemaField(
+ description="Dimension of the vectors", default=768
+ )
+ metric: str = SchemaField(
+ description="Distance metric for the index", default="cosine"
+ )
+ cloud: str = SchemaField(
+ description="Cloud provider for serverless", default="aws"
+ )
+ region: str = SchemaField(
+ description="Region for serverless", default="us-east-1"
+ )
+
+ class Output(BlockSchema):
+ index: str = SchemaField(description="Name of the initialized Pinecone index")
+ message: str = SchemaField(description="Status message")
+
+ def __init__(self):
+ super().__init__(
+ id="48d8fdab-8f03-41f3-8407-8107ba11ec9b",
+ description="Initializes a Pinecone index",
+ categories={BlockCategory.LOGIC},
+ input_schema=PineconeInitBlock.Input,
+ output_schema=PineconeInitBlock.Output,
+ )
+
+ def run(
+ self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
+ ) -> BlockOutput:
+ pc = Pinecone(api_key=credentials.api_key.get_secret_value())
+
+ try:
+ existing_indexes = pc.list_indexes()
+ if input_data.index_name not in [index.name for index in existing_indexes]:
+ pc.create_index(
+ name=input_data.index_name,
+ dimension=input_data.dimension,
+ metric=input_data.metric,
+ spec=ServerlessSpec(
+ cloud=input_data.cloud, region=input_data.region
+ ),
+ )
+ message = f"Created new index: {input_data.index_name}"
+ else:
+ message = f"Using existing index: {input_data.index_name}"
+
+ yield "index", input_data.index_name
+ yield "message", message
+ except Exception as e:
+ yield "message", f"Error initializing Pinecone index: {str(e)}"
+
+
+class PineconeQueryBlock(Block):
+ class Input(BlockSchema):
+ credentials: PineconeCredentialsInput = PineconeCredentialsField()
+ query_vector: list = SchemaField(description="Query vector")
+ namespace: str = SchemaField(
+ description="Namespace to query in Pinecone", default=""
+ )
+ top_k: int = SchemaField(
+ description="Number of top results to return", default=3
+ )
+ include_values: bool = SchemaField(
+ description="Whether to include vector values in the response",
+ default=False,
+ )
+ include_metadata: bool = SchemaField(
+ description="Whether to include metadata in the response", default=True
+ )
+ host: str = SchemaField(description="Host for pinecone")
+
+ class Output(BlockSchema):
+ results: dict = SchemaField(description="Query results from Pinecone")
+
+ def __init__(self):
+ super().__init__(
+ id="9ad93d0f-91b4-4c9c-8eb1-82e26b4a01c5",
+ description="Queries a Pinecone index",
+ categories={BlockCategory.LOGIC},
+ input_schema=PineconeQueryBlock.Input,
+ output_schema=PineconeQueryBlock.Output,
+ )
+
+ def run(
+ self,
+ input_data: Input,
+ *,
+ credentials: APIKeyCredentials,
+ **kwargs,
+ ) -> BlockOutput:
+ pc = Pinecone(api_key=credentials.api_key.get_secret_value())
+ idx = pc.Index(host=input_data.host)
+ results = idx.query(
+ namespace=input_data.namespace,
+ vector=input_data.query_vector,
+ top_k=input_data.top_k,
+ include_values=input_data.include_values,
+ include_metadata=input_data.include_metadata,
+ )
+ yield "results", results
diff --git a/autogpt_platform/backend/backend/blocks/reddit.py b/autogpt_platform/backend/backend/blocks/reddit.py
index 065436ae73..9e4f3f3aca 100644
--- a/autogpt_platform/backend/backend/blocks/reddit.py
+++ b/autogpt_platform/backend/backend/blocks/reddit.py
@@ -2,10 +2,10 @@ from datetime import datetime, timezone
from typing import Iterator
import praw
-from pydantic import BaseModel, ConfigDict, Field
+from pydantic import BaseModel, ConfigDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
-from backend.data.model import BlockSecret, SecretField
+from backend.data.model import BlockSecret, SchemaField, SecretField
from backend.util.mock import MockObject
@@ -48,25 +48,25 @@ def get_praw(creds: RedditCredentials) -> praw.Reddit:
class GetRedditPostsBlock(Block):
class Input(BlockSchema):
- subreddit: str = Field(description="Subreddit name")
- creds: RedditCredentials = Field(
+ subreddit: str = SchemaField(description="Subreddit name")
+ creds: RedditCredentials = SchemaField(
description="Reddit credentials",
default=RedditCredentials(),
)
- last_minutes: int | None = Field(
+ last_minutes: int | None = SchemaField(
description="Post time to stop minutes ago while fetching posts",
default=None,
)
- last_post: str | None = Field(
+ last_post: str | None = SchemaField(
description="Post ID to stop when reached while fetching posts",
default=None,
)
- post_limit: int | None = Field(
+ post_limit: int | None = SchemaField(
description="Number of posts to fetch", default=10
)
class Output(BlockSchema):
- post: RedditPost = Field(description="Reddit post")
+ post: RedditPost = SchemaField(description="Reddit post")
def __init__(self):
super().__init__(
@@ -140,13 +140,13 @@ class GetRedditPostsBlock(Block):
class PostRedditCommentBlock(Block):
class Input(BlockSchema):
- creds: RedditCredentials = Field(
+ creds: RedditCredentials = SchemaField(
description="Reddit credentials", default=RedditCredentials()
)
- data: RedditComment = Field(description="Reddit comment")
+ data: RedditComment = SchemaField(description="Reddit comment")
class Output(BlockSchema):
- comment_id: str
+ comment_id: str = SchemaField(description="Posted comment ID")
def __init__(self):
super().__init__(
diff --git a/autogpt_platform/backend/backend/blocks/replicate_flux_advanced.py b/autogpt_platform/backend/backend/blocks/replicate_flux_advanced.py
new file mode 100644
index 0000000000..38abc8da20
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/replicate_flux_advanced.py
@@ -0,0 +1,201 @@
+import os
+from enum import Enum
+
+import replicate
+
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import BlockSecret, SchemaField, SecretField
+
+
+# Model name enum
+class ReplicateFluxModelName(str, Enum):
+ FLUX_SCHNELL = ("Flux Schnell",)
+ FLUX_PRO = ("Flux Pro",)
+ FLUX_PRO1_1 = ("Flux Pro 1.1",)
+
+ @property
+ def api_name(self):
+ api_names = {
+ ReplicateFluxModelName.FLUX_SCHNELL: "black-forest-labs/flux-schnell",
+ ReplicateFluxModelName.FLUX_PRO: "black-forest-labs/flux-pro",
+ ReplicateFluxModelName.FLUX_PRO1_1: "black-forest-labs/flux-1.1-pro",
+ }
+ return api_names[self]
+
+
+# Image type Enum
+class ImageType(str, Enum):
+ WEBP = "webp"
+ JPG = "jpg"
+ PNG = "png"
+
+
+class ReplicateFluxAdvancedModelBlock(Block):
+ class Input(BlockSchema):
+ api_key: BlockSecret = SecretField(
+ key="replicate_api_key",
+ description="Replicate API Key",
+ )
+ prompt: str = SchemaField(
+ description="Text prompt for image generation",
+ placeholder="e.g., 'A futuristic cityscape at sunset'",
+ title="Prompt",
+ )
+ replicate_model_name: ReplicateFluxModelName = SchemaField(
+ description="The name of the Image Generation Model, i.e Flux Schnell",
+ default=ReplicateFluxModelName.FLUX_SCHNELL,
+ title="Image Generation Model",
+ advanced=False,
+ )
+ seed: int | None = SchemaField(
+ description="Random seed. Set for reproducible generation",
+ default=None,
+ title="Seed",
+ )
+ steps: int = SchemaField(
+ description="Number of diffusion steps",
+ default=25,
+ title="Steps",
+ )
+ guidance: float = SchemaField(
+ description=(
+ "Controls the balance between adherence to the text prompt and image quality/diversity. "
+ "Higher values make the output more closely match the prompt but may reduce overall image quality."
+ ),
+ default=3,
+ title="Guidance",
+ )
+ interval: float = SchemaField(
+ description=(
+ "Interval is a setting that increases the variance in possible outputs. "
+ "Setting this value low will ensure strong prompt following with more consistent outputs."
+ ),
+ default=2,
+ title="Interval",
+ )
+ aspect_ratio: str = SchemaField(
+ description="Aspect ratio for the generated image",
+ default="1:1",
+ title="Aspect Ratio",
+ placeholder="Choose from: 1:1, 16:9, 2:3, 3:2, 4:5, 5:4, 9:16",
+ )
+ output_format: ImageType = SchemaField(
+ description="File format of the output image",
+ default=ImageType.WEBP,
+ title="Output Format",
+ )
+ output_quality: int = SchemaField(
+ description=(
+ "Quality when saving the output images, from 0 to 100. "
+ "Not relevant for .png outputs"
+ ),
+ default=80,
+ title="Output Quality",
+ )
+ safety_tolerance: int = SchemaField(
+ description="Safety tolerance, 1 is most strict and 5 is most permissive",
+ default=2,
+ title="Safety Tolerance",
+ )
+
+ class Output(BlockSchema):
+ result: str = SchemaField(description="Generated output")
+ error: str = SchemaField(description="Error message if the model run failed")
+
+ def __init__(self):
+ super().__init__(
+ id="90f8c45e-e983-4644-aa0b-b4ebe2f531bc",
+ description="This block runs Flux models on Replicate with advanced settings.",
+ categories={BlockCategory.AI},
+ input_schema=ReplicateFluxAdvancedModelBlock.Input,
+ output_schema=ReplicateFluxAdvancedModelBlock.Output,
+ test_input={
+ "api_key": "test_api_key",
+ "replicate_model_name": ReplicateFluxModelName.FLUX_SCHNELL,
+ "prompt": "A beautiful landscape painting of a serene lake at sunrise",
+ "seed": None,
+ "steps": 25,
+ "guidance": 3.0,
+ "interval": 2.0,
+ "aspect_ratio": "1:1",
+ "output_format": ImageType.PNG,
+ "output_quality": 80,
+ "safety_tolerance": 2,
+ },
+ test_output=[
+ (
+ "result",
+ "https://replicate.com/output/generated-image-url.jpg",
+ ),
+ ],
+ test_mock={
+ "run_model": lambda api_key, model_name, prompt, seed, steps, guidance, interval, aspect_ratio, output_format, output_quality, safety_tolerance: "https://replicate.com/output/generated-image-url.jpg",
+ },
+ )
+
+ def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ # If the seed is not provided, generate a random seed
+ seed = input_data.seed
+ if seed is None:
+ seed = int.from_bytes(os.urandom(4), "big")
+
+ # Run the model using the provided inputs
+ result = self.run_model(
+ api_key=input_data.api_key.get_secret_value(),
+ model_name=input_data.replicate_model_name.api_name,
+ prompt=input_data.prompt,
+ seed=seed,
+ steps=input_data.steps,
+ guidance=input_data.guidance,
+ interval=input_data.interval,
+ aspect_ratio=input_data.aspect_ratio,
+ output_format=input_data.output_format,
+ output_quality=input_data.output_quality,
+ safety_tolerance=input_data.safety_tolerance,
+ )
+ yield "result", result
+
+ def run_model(
+ self,
+ api_key,
+ model_name,
+ prompt,
+ seed,
+ steps,
+ guidance,
+ interval,
+ aspect_ratio,
+ output_format,
+ output_quality,
+ safety_tolerance,
+ ):
+ # Initialize Replicate client with the API key
+ client = replicate.Client(api_token=api_key)
+
+ # Run the model with additional parameters
+ output = client.run(
+ f"{model_name}",
+ input={
+ "prompt": prompt,
+ "seed": seed,
+ "steps": steps,
+ "guidance": guidance,
+ "interval": interval,
+ "aspect_ratio": aspect_ratio,
+ "output_format": output_format,
+ "output_quality": output_quality,
+ "safety_tolerance": safety_tolerance,
+ },
+ )
+
+ # Check if output is a list or a string and extract accordingly; otherwise, assign a default message
+ if isinstance(output, list) and len(output) > 0:
+ result_url = output[0] # If output is a list, get the first element
+ elif isinstance(output, str):
+ result_url = output # If output is a string, use it directly
+ else:
+ result_url = (
+ "No output received" # Fallback message if output is not as expected
+ )
+
+ return result_url
diff --git a/autogpt_platform/backend/backend/blocks/rss.py b/autogpt_platform/backend/backend/blocks/rss.py
index 3dd570385b..9a5a17ebee 100644
--- a/autogpt_platform/backend/backend/blocks/rss.py
+++ b/autogpt_platform/backend/backend/blocks/rss.py
@@ -43,7 +43,7 @@ class ReadRSSFeedBlock(Block):
def __init__(self):
super().__init__(
- id="c6731acb-4105-4zp1-bc9b-03d0036h370g",
+ id="5ebe6768-8e5d-41e3-9134-1c7bd89a8d52",
input_schema=ReadRSSFeedBlock.Input,
output_schema=ReadRSSFeedBlock.Output,
description="Reads RSS feed entries from a given URL.",
diff --git a/autogpt_platform/backend/backend/blocks/search.py b/autogpt_platform/backend/backend/blocks/search.py
index 7414ca2f8a..27a4322ce6 100644
--- a/autogpt_platform/backend/backend/blocks/search.py
+++ b/autogpt_platform/backend/backend/blocks/search.py
@@ -4,7 +4,7 @@ from urllib.parse import quote
import requests
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
-from backend.data.model import BlockSecret, SecretField
+from backend.data.model import BlockSecret, SchemaField, SecretField
class GetRequest:
@@ -17,15 +17,17 @@ class GetRequest:
class GetWikipediaSummaryBlock(Block, GetRequest):
class Input(BlockSchema):
- topic: str
+ topic: str = SchemaField(description="The topic to fetch the summary for")
class Output(BlockSchema):
- summary: str
- error: str
+ summary: str = SchemaField(description="The summary of the given topic")
+ error: str = SchemaField(
+ description="Error message if the summary cannot be retrieved"
+ )
def __init__(self):
super().__init__(
- id="h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m",
+ id="f5b0f5d0-1862-4d61-94be-3ad0fa772760",
description="This block fetches the summary of a given topic from Wikipedia.",
categories={BlockCategory.SEARCH},
input_schema=GetWikipediaSummaryBlock.Input,
@@ -36,33 +38,27 @@ class GetWikipediaSummaryBlock(Block, GetRequest):
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- topic = input_data.topic
- url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
- response = self.get_request(url, json=True)
- yield "summary", response["extract"]
-
- except requests.exceptions.HTTPError as http_err:
- yield "error", f"HTTP error occurred: {http_err}"
-
- except requests.RequestException as e:
- yield "error", f"Request to Wikipedia failed: {e}"
-
- except KeyError as e:
- yield "error", f"Error parsing Wikipedia response: {e}"
+ topic = input_data.topic
+ url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
+ response = self.get_request(url, json=True)
+ if "extract" not in response:
+ raise RuntimeError(f"Unable to parse Wikipedia response: {response}")
+ yield "summary", response["extract"]
class SearchTheWebBlock(Block, GetRequest):
class Input(BlockSchema):
- query: str # The search query
+ query: str = SchemaField(description="The search query to search the web for")
class Output(BlockSchema):
- results: str # The search results including content from top 5 URLs
- error: str # Error message if the search fails
+ results: str = SchemaField(
+ description="The search results including content from top 5 URLs"
+ )
+ error: str = SchemaField(description="Error message if the search fails")
def __init__(self):
super().__init__(
- id="b2c3d4e5-6f7g-8h9i-0j1k-l2m3n4o5p6q7",
+ id="87840993-2053-44b7-8da4-187ad4ee518c",
description="This block searches the internet for the given search query.",
categories={BlockCategory.SEARCH},
input_schema=SearchTheWebBlock.Input,
@@ -73,37 +69,38 @@ class SearchTheWebBlock(Block, GetRequest):
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- # Encode the search query
- encoded_query = quote(input_data.query)
+ # Encode the search query
+ encoded_query = quote(input_data.query)
- # Prepend the Jina Search URL to the encoded query
- jina_search_url = f"https://s.jina.ai/{encoded_query}"
+ # Prepend the Jina Search URL to the encoded query
+ jina_search_url = f"https://s.jina.ai/{encoded_query}"
- # Make the request to Jina Search
- response = self.get_request(jina_search_url, json=False)
+ # Make the request to Jina Search
+ response = self.get_request(jina_search_url, json=False)
- # Output the search results
- yield "results", response
-
- except requests.exceptions.HTTPError as http_err:
- yield "error", f"HTTP error occurred: {http_err}"
-
- except requests.RequestException as e:
- yield "error", f"Request to Jina Search failed: {e}"
+ # Output the search results
+ yield "results", response
class ExtractWebsiteContentBlock(Block, GetRequest):
class Input(BlockSchema):
- url: str # The URL to scrape
+ url: str = SchemaField(description="The URL to scrape the content from")
+ raw_content: bool = SchemaField(
+ default=False,
+ title="Raw Content",
+ description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content",
+ advanced=True,
+ )
class Output(BlockSchema):
- content: str # The scraped content from the URL
- error: str
+ content: str = SchemaField(description="The scraped content from the given URL")
+ error: str = SchemaField(
+ description="Error message if the content cannot be retrieved"
+ )
def __init__(self):
super().__init__(
- id="a1b2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6", # Unique ID for the block
+ id="436c3984-57fd-4b85-8e9a-459b356883bd",
description="This block scrapes the content from the given web URL.",
categories={BlockCategory.SEARCH},
input_schema=ExtractWebsiteContentBlock.Input,
@@ -114,34 +111,37 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- # Prepend the Jina-ai Reader URL to the input URL
- jina_url = f"https://r.jina.ai/{input_data.url}"
+ if input_data.raw_content:
+ url = input_data.url
+ else:
+ url = f"https://r.jina.ai/{input_data.url}"
- # Make the request to Jina-ai Reader
- response = self.get_request(jina_url, json=False)
-
- # Output the scraped content
- yield "content", response
-
- except requests.exceptions.HTTPError as http_err:
- yield "error", f"HTTP error occurred: {http_err}"
-
- except requests.RequestException as e:
- yield "error", f"Request to Jina-ai Reader failed: {e}"
+ content = self.get_request(url, json=False)
+ yield "content", content
class GetWeatherInformationBlock(Block, GetRequest):
class Input(BlockSchema):
- location: str
+ location: str = SchemaField(
+ description="Location to get weather information for"
+ )
api_key: BlockSecret = SecretField(key="openweathermap_api_key")
- use_celsius: bool = True
+ use_celsius: bool = SchemaField(
+ default=True,
+ description="Whether to use Celsius or Fahrenheit for temperature",
+ )
class Output(BlockSchema):
- temperature: str
- humidity: str
- condition: str
- error: str
+ temperature: str = SchemaField(
+ description="Temperature in the specified location"
+ )
+ humidity: str = SchemaField(description="Humidity in the specified location")
+ condition: str = SchemaField(
+ description="Weather condition in the specified location"
+ )
+ error: str = SchemaField(
+ description="Error message if the weather information cannot be retrieved"
+ )
def __init__(self):
super().__init__(
@@ -168,26 +168,15 @@ class GetWeatherInformationBlock(Block, GetRequest):
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- units = "metric" if input_data.use_celsius else "imperial"
- api_key = input_data.api_key.get_secret_value()
- location = input_data.location
- url = f"http://api.openweathermap.org/data/2.5/weather?q={quote(location)}&appid={api_key}&units={units}"
- weather_data = self.get_request(url, json=True)
+ units = "metric" if input_data.use_celsius else "imperial"
+ api_key = input_data.api_key.get_secret_value()
+ location = input_data.location
+ url = f"http://api.openweathermap.org/data/2.5/weather?q={quote(location)}&appid={api_key}&units={units}"
+ weather_data = self.get_request(url, json=True)
- if "main" in weather_data and "weather" in weather_data:
- yield "temperature", str(weather_data["main"]["temp"])
- yield "humidity", str(weather_data["main"]["humidity"])
- yield "condition", weather_data["weather"][0]["description"]
- else:
- yield "error", f"Expected keys not found in response: {weather_data}"
-
- except requests.exceptions.HTTPError as http_err:
- if http_err.response.status_code == 403:
- yield "error", "Request to weather API failed: 403 Forbidden. Check your API key and permissions."
- else:
- yield "error", f"HTTP error occurred: {http_err}"
- except requests.RequestException as e:
- yield "error", f"Request to weather API failed: {e}"
- except KeyError as e:
- yield "error", f"Error processing weather data: {e}"
+ if "main" in weather_data and "weather" in weather_data:
+ yield "temperature", str(weather_data["main"]["temp"])
+ yield "humidity", str(weather_data["main"]["humidity"])
+ yield "condition", weather_data["weather"][0]["description"]
+ else:
+ raise RuntimeError(f"Expected keys not found in response: {weather_data}")
diff --git a/autogpt_platform/backend/backend/blocks/talking_head.py b/autogpt_platform/backend/backend/blocks/talking_head.py
index e1851ae030..f4497d85ff 100644
--- a/autogpt_platform/backend/backend/blocks/talking_head.py
+++ b/autogpt_platform/backend/backend/blocks/talking_head.py
@@ -13,7 +13,8 @@ class CreateTalkingAvatarVideoBlock(Block):
key="did_api_key", description="D-ID API Key"
)
script_input: str = SchemaField(
- description="The text input for the script", default="Welcome to AutoGPT"
+ description="The text input for the script",
+ placeholder="Welcome to AutoGPT",
)
provider: Literal["microsoft", "elevenlabs", "amazon"] = SchemaField(
description="The voice provider to use", default="microsoft"
@@ -106,41 +107,40 @@ class CreateTalkingAvatarVideoBlock(Block):
return response.json()
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- # Create the clip
- payload = {
- "script": {
- "type": "text",
- "subtitles": str(input_data.subtitles).lower(),
- "provider": {
- "type": input_data.provider,
- "voice_id": input_data.voice_id,
- },
- "ssml": str(input_data.ssml).lower(),
- "input": input_data.script_input,
+ # Create the clip
+ payload = {
+ "script": {
+ "type": "text",
+ "subtitles": str(input_data.subtitles).lower(),
+ "provider": {
+ "type": input_data.provider,
+ "voice_id": input_data.voice_id,
},
- "config": {"result_format": input_data.result_format},
- "presenter_config": {"crop": {"type": input_data.crop_type}},
- "presenter_id": input_data.presenter_id,
- "driver_id": input_data.driver_id,
- }
+ "ssml": str(input_data.ssml).lower(),
+ "input": input_data.script_input,
+ },
+ "config": {"result_format": input_data.result_format},
+ "presenter_config": {"crop": {"type": input_data.crop_type}},
+ "presenter_id": input_data.presenter_id,
+ "driver_id": input_data.driver_id,
+ }
- response = self.create_clip(input_data.api_key.get_secret_value(), payload)
- clip_id = response["id"]
+ response = self.create_clip(input_data.api_key.get_secret_value(), payload)
+ clip_id = response["id"]
- # Poll for clip status
- for _ in range(input_data.max_polling_attempts):
- status_response = self.get_clip_status(
- input_data.api_key.get_secret_value(), clip_id
+ # Poll for clip status
+ for _ in range(input_data.max_polling_attempts):
+ status_response = self.get_clip_status(
+ input_data.api_key.get_secret_value(), clip_id
+ )
+ if status_response["status"] == "done":
+ yield "video_url", status_response["result_url"]
+ return
+ elif status_response["status"] == "error":
+ raise RuntimeError(
+ f"Clip creation failed: {status_response.get('error', 'Unknown error')}"
)
- if status_response["status"] == "done":
- yield "video_url", status_response["result_url"]
- return
- elif status_response["status"] == "error":
- yield "error", f"Clip creation failed: {status_response.get('error', 'Unknown error')}"
- return
- time.sleep(input_data.polling_interval)
- yield "error", "Clip creation timed out"
- except Exception as e:
- yield "error", str(e)
+ time.sleep(input_data.polling_interval)
+
+ raise TimeoutError("Clip creation timed out")
diff --git a/autogpt_platform/backend/backend/blocks/text.py b/autogpt_platform/backend/backend/blocks/text.py
index da287b94fa..1d8c050fa1 100644
--- a/autogpt_platform/backend/backend/blocks/text.py
+++ b/autogpt_platform/backend/backend/blocks/text.py
@@ -2,9 +2,9 @@ import re
from typing import Any
from jinja2 import BaseLoader, Environment
-from pydantic import Field
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
from backend.util import json
jinja = Environment(loader=BaseLoader())
@@ -12,15 +12,17 @@ jinja = Environment(loader=BaseLoader())
class MatchTextPatternBlock(Block):
class Input(BlockSchema):
- text: Any = Field(description="Text to match")
- match: str = Field(description="Pattern (Regex) to match")
- data: Any = Field(description="Data to be forwarded to output")
- case_sensitive: bool = Field(description="Case sensitive match", default=True)
- dot_all: bool = Field(description="Dot matches all", default=True)
+ text: Any = SchemaField(description="Text to match")
+ match: str = SchemaField(description="Pattern (Regex) to match")
+ data: Any = SchemaField(description="Data to be forwarded to output")
+ case_sensitive: bool = SchemaField(
+ description="Case sensitive match", default=True
+ )
+ dot_all: bool = SchemaField(description="Dot matches all", default=True)
class Output(BlockSchema):
- positive: Any = Field(description="Output data if match is found")
- negative: Any = Field(description="Output data if match is not found")
+ positive: Any = SchemaField(description="Output data if match is found")
+ negative: Any = SchemaField(description="Output data if match is not found")
def __init__(self):
super().__init__(
@@ -64,15 +66,17 @@ class MatchTextPatternBlock(Block):
class ExtractTextInformationBlock(Block):
class Input(BlockSchema):
- text: Any = Field(description="Text to parse")
- pattern: str = Field(description="Pattern (Regex) to parse")
- group: int = Field(description="Group number to extract", default=0)
- case_sensitive: bool = Field(description="Case sensitive match", default=True)
- dot_all: bool = Field(description="Dot matches all", default=True)
+ text: Any = SchemaField(description="Text to parse")
+ pattern: str = SchemaField(description="Pattern (Regex) to parse")
+ group: int = SchemaField(description="Group number to extract", default=0)
+ case_sensitive: bool = SchemaField(
+ description="Case sensitive match", default=True
+ )
+ dot_all: bool = SchemaField(description="Dot matches all", default=True)
class Output(BlockSchema):
- positive: str = Field(description="Extracted text")
- negative: str = Field(description="Original text")
+ positive: str = SchemaField(description="Extracted text")
+ negative: str = SchemaField(description="Original text")
def __init__(self):
super().__init__(
@@ -116,11 +120,15 @@ class ExtractTextInformationBlock(Block):
class FillTextTemplateBlock(Block):
class Input(BlockSchema):
- values: dict[str, Any] = Field(description="Values (dict) to be used in format")
- format: str = Field(description="Template to format the text using `values`")
+ values: dict[str, Any] = SchemaField(
+ description="Values (dict) to be used in format"
+ )
+ format: str = SchemaField(
+ description="Template to format the text using `values`"
+ )
class Output(BlockSchema):
- output: str
+ output: str = SchemaField(description="Formatted text")
def __init__(self):
super().__init__(
@@ -155,11 +163,13 @@ class FillTextTemplateBlock(Block):
class CombineTextsBlock(Block):
class Input(BlockSchema):
- input: list[str] = Field(description="text input to combine")
- delimiter: str = Field(description="Delimiter to combine texts", default="")
+ input: list[str] = SchemaField(description="text input to combine")
+ delimiter: str = SchemaField(
+ description="Delimiter to combine texts", default=""
+ )
class Output(BlockSchema):
- output: str = Field(description="Combined text")
+ output: str = SchemaField(description="Combined text")
def __init__(self):
super().__init__(
diff --git a/autogpt_platform/backend/backend/blocks/text_to_speech_block.py b/autogpt_platform/backend/backend/blocks/text_to_speech_block.py
new file mode 100644
index 0000000000..4141276340
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/text_to_speech_block.py
@@ -0,0 +1,76 @@
+from typing import Any
+
+import requests
+
+from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import BlockSecret, SchemaField, SecretField
+
+
+class UnrealTextToSpeechBlock(Block):
+ class Input(BlockSchema):
+ text: str = SchemaField(
+ description="The text to be converted to speech",
+ placeholder="Enter the text you want to convert to speech",
+ )
+ voice_id: str = SchemaField(
+ description="The voice ID to use for text-to-speech conversion",
+ placeholder="Scarlett",
+ default="Scarlett",
+ )
+ api_key: BlockSecret = SecretField(
+ key="unreal_speech_api_key", description="Your Unreal Speech API key"
+ )
+
+ class Output(BlockSchema):
+ mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
+ error: str = SchemaField(description="Error message if the API call failed")
+
+ def __init__(self):
+ super().__init__(
+ id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
+ description="Converts text to speech using the Unreal Speech API",
+ categories={BlockCategory.AI, BlockCategory.TEXT},
+ input_schema=UnrealTextToSpeechBlock.Input,
+ output_schema=UnrealTextToSpeechBlock.Output,
+ test_input={
+ "text": "This is a test of the text to speech API.",
+ "voice_id": "Scarlett",
+ "api_key": "test_api_key",
+ },
+ test_output=[("mp3_url", "https://example.com/test.mp3")],
+ test_mock={
+ "call_unreal_speech_api": lambda *args, **kwargs: {
+ "OutputUri": "https://example.com/test.mp3"
+ }
+ },
+ )
+
+ @staticmethod
+ def call_unreal_speech_api(
+ api_key: str, text: str, voice_id: str
+ ) -> dict[str, Any]:
+ url = "https://api.v7.unrealspeech.com/speech"
+ headers = {
+ "Authorization": f"Bearer {api_key}",
+ "Content-Type": "application/json",
+ }
+ data = {
+ "Text": text,
+ "VoiceId": voice_id,
+ "Bitrate": "192k",
+ "Speed": "0",
+ "Pitch": "1",
+ "TimestampType": "sentence",
+ }
+
+ response = requests.post(url, headers=headers, json=data)
+ response.raise_for_status()
+ return response.json()
+
+ def run(self, input_data: Input, **kwargs) -> BlockOutput:
+ api_response = self.call_unreal_speech_api(
+ input_data.api_key.get_secret_value(),
+ input_data.text,
+ input_data.voice_id,
+ )
+ yield "mp3_url", api_response["OutputUri"]
diff --git a/autogpt_platform/backend/backend/blocks/time_blocks.py b/autogpt_platform/backend/backend/blocks/time_blocks.py
index 338ee88a42..eb886b5352 100644
--- a/autogpt_platform/backend/backend/blocks/time_blocks.py
+++ b/autogpt_platform/backend/backend/blocks/time_blocks.py
@@ -3,14 +3,22 @@ from datetime import datetime, timedelta
from typing import Any, Union
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
+from backend.data.model import SchemaField
class GetCurrentTimeBlock(Block):
class Input(BlockSchema):
- trigger: str
+ trigger: str = SchemaField(
+ description="Trigger any data to output the current time"
+ )
+ format: str = SchemaField(
+ description="Format of the time to output", default="%H:%M:%S"
+ )
class Output(BlockSchema):
- time: str
+ time: str = SchemaField(
+ description="Current time in the specified format (default: %H:%M:%S)"
+ )
def __init__(self):
super().__init__(
@@ -20,25 +28,38 @@ class GetCurrentTimeBlock(Block):
input_schema=GetCurrentTimeBlock.Input,
output_schema=GetCurrentTimeBlock.Output,
test_input=[
- {"trigger": "Hello", "format": "{time}"},
+ {"trigger": "Hello"},
+ {"trigger": "Hello", "format": "%H:%M"},
],
test_output=[
("time", lambda _: time.strftime("%H:%M:%S")),
+ ("time", lambda _: time.strftime("%H:%M")),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- current_time = time.strftime("%H:%M:%S")
+ current_time = time.strftime(input_data.format)
yield "time", current_time
class GetCurrentDateBlock(Block):
class Input(BlockSchema):
- trigger: str
- offset: Union[int, str]
+ trigger: str = SchemaField(
+ description="Trigger any data to output the current date"
+ )
+ offset: Union[int, str] = SchemaField(
+ title="Days Offset",
+ description="Offset in days from the current date",
+ default=0,
+ )
+ format: str = SchemaField(
+ description="Format of the date to output", default="%Y-%m-%d"
+ )
class Output(BlockSchema):
- date: str
+ date: str = SchemaField(
+ description="Current date in the specified format (default: YYYY-MM-DD)"
+ )
def __init__(self):
super().__init__(
@@ -48,7 +69,8 @@ class GetCurrentDateBlock(Block):
input_schema=GetCurrentDateBlock.Input,
output_schema=GetCurrentDateBlock.Output,
test_input=[
- {"trigger": "Hello", "format": "{date}", "offset": "7"},
+ {"trigger": "Hello", "offset": "7"},
+ {"trigger": "Hello", "offset": "7", "format": "%m/%d/%Y"},
],
test_output=[
(
@@ -56,6 +78,12 @@ class GetCurrentDateBlock(Block):
lambda t: abs(datetime.now() - datetime.strptime(t, "%Y-%m-%d"))
< timedelta(days=8), # 7 days difference + 1 day error margin.
),
+ (
+ "date",
+ lambda t: abs(datetime.now() - datetime.strptime(t, "%m/%d/%Y"))
+ < timedelta(days=8),
+ # 7 days difference + 1 day error margin.
+ ),
],
)
@@ -65,25 +93,33 @@ class GetCurrentDateBlock(Block):
except ValueError:
offset = 0
current_date = datetime.now() - timedelta(days=offset)
- yield "date", current_date.strftime("%Y-%m-%d")
+ yield "date", current_date.strftime(input_data.format)
class GetCurrentDateAndTimeBlock(Block):
class Input(BlockSchema):
- trigger: str
+ trigger: str = SchemaField(
+ description="Trigger any data to output the current date and time"
+ )
+ format: str = SchemaField(
+ description="Format of the date and time to output",
+ default="%Y-%m-%d %H:%M:%S",
+ )
class Output(BlockSchema):
- date_time: str
+ date_time: str = SchemaField(
+ description="Current date and time in the specified format (default: YYYY-MM-DD HH:MM:SS)"
+ )
def __init__(self):
super().__init__(
- id="b29c1b50-5d0e-4d9f-8f9d-1b0e6fcbf0h2",
+ id="716a67b3-6760-42e7-86dc-18645c6e00fc",
description="This block outputs the current date and time.",
categories={BlockCategory.TEXT},
input_schema=GetCurrentDateAndTimeBlock.Input,
output_schema=GetCurrentDateAndTimeBlock.Output,
test_input=[
- {"trigger": "Hello", "format": "{date_time}"},
+ {"trigger": "Hello"},
],
test_output=[
(
@@ -97,20 +133,29 @@ class GetCurrentDateAndTimeBlock(Block):
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- current_date_time = time.strftime("%Y-%m-%d %H:%M:%S")
+ current_date_time = time.strftime(input_data.format)
yield "date_time", current_date_time
class CountdownTimerBlock(Block):
class Input(BlockSchema):
- input_message: Any = "timer finished"
- seconds: Union[int, str] = 0
- minutes: Union[int, str] = 0
- hours: Union[int, str] = 0
- days: Union[int, str] = 0
+ input_message: Any = SchemaField(
+ description="Message to output after the timer finishes",
+ default="timer finished",
+ )
+ seconds: Union[int, str] = SchemaField(
+ description="Duration in seconds", default=0
+ )
+ minutes: Union[int, str] = SchemaField(
+ description="Duration in minutes", default=0
+ )
+ hours: Union[int, str] = SchemaField(description="Duration in hours", default=0)
+ days: Union[int, str] = SchemaField(description="Duration in days", default=0)
class Output(BlockSchema):
- output_message: str
+ output_message: str = SchemaField(
+ description="Message after the timer finishes"
+ )
def __init__(self):
super().__init__(
diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py
index e299b121aa..b4f0259d98 100644
--- a/autogpt_platform/backend/backend/blocks/youtube.py
+++ b/autogpt_platform/backend/backend/blocks/youtube.py
@@ -7,9 +7,10 @@ from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
-class TranscribeYouTubeVideoBlock(Block):
+class TranscribeYoutubeVideoBlock(Block):
class Input(BlockSchema):
youtube_url: str = SchemaField(
+ title="YouTube URL",
description="The URL of the YouTube video to transcribe",
placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ",
)
@@ -24,8 +25,8 @@ class TranscribeYouTubeVideoBlock(Block):
def __init__(self):
super().__init__(
id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c",
- input_schema=TranscribeYouTubeVideoBlock.Input,
- output_schema=TranscribeYouTubeVideoBlock.Output,
+ input_schema=TranscribeYoutubeVideoBlock.Input,
+ output_schema=TranscribeYoutubeVideoBlock.Output,
description="Transcribes a YouTube video.",
categories={BlockCategory.SOCIAL},
test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"},
@@ -64,14 +65,11 @@ class TranscribeYouTubeVideoBlock(Block):
return YouTubeTranscriptApi.get_transcript(video_id)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
- try:
- video_id = self.extract_video_id(input_data.youtube_url)
- yield "video_id", video_id
+ video_id = self.extract_video_id(input_data.youtube_url)
+ yield "video_id", video_id
- transcript = self.get_transcript(video_id)
- formatter = TextFormatter()
- transcript_text = formatter.format_transcript(transcript)
+ transcript = self.get_transcript(video_id)
+ formatter = TextFormatter()
+ transcript_text = formatter.format_transcript(transcript)
- yield "transcript", transcript_text
- except Exception as e:
- yield "error", str(e)
+ yield "transcript", transcript_text
diff --git a/autogpt_platform/backend/backend/cli.py b/autogpt_platform/backend/backend/cli.py
index 86fead6c90..154c22207b 100755
--- a/autogpt_platform/backend/backend/cli.py
+++ b/autogpt_platform/backend/backend/cli.py
@@ -217,13 +217,13 @@ def websocket(server_address: str, graph_id: str):
"""
import asyncio
- import websockets
+ import websockets.asyncio.client
from backend.server.ws_api import ExecutionSubscription, Methods, WsMessage
async def send_message(server_address: str):
uri = f"ws://{server_address}"
- async with websockets.connect(uri) as websocket:
+ async with websockets.asyncio.client.connect(uri) as websocket:
try:
msg = WsMessage(
method=Methods.SUBSCRIBE,
diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py
index a3b89cc6f9..e89013b3b3 100644
--- a/autogpt_platform/backend/backend/data/block.py
+++ b/autogpt_platform/backend/backend/data/block.py
@@ -45,7 +45,9 @@ class BlockCategory(Enum):
INPUT = "Block that interacts with input of the graph."
OUTPUT = "Block that interacts with output of the graph."
LOGIC = "Programming logic to control the flow of your agent"
+ COMMUNICATION = "Block that interacts with communication platforms."
DEVELOPER_TOOLS = "Developer tools such as GitHub blocks."
+ DATA = "Block that interacts with structured data."
def dict(self) -> dict[str, str]:
return {"category": self.name, "description": self.value}
@@ -228,6 +230,11 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
self.disabled = disabled
self.static_output = static_output
self.block_type = block_type
+ self.execution_stats = {}
+
+ @classmethod
+ def create(cls: Type["Block"]) -> "Block":
+ return cls()
@abstractmethod
def run(self, input_data: BlockSchemaInputType, **kwargs) -> BlockOutput:
@@ -242,6 +249,26 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
"""
pass
+ def run_once(self, input_data: BlockSchemaInputType, output: str, **kwargs) -> Any:
+ for name, data in self.run(input_data, **kwargs):
+ if name == output:
+ return data
+ raise ValueError(f"{self.name} did not produce any output for {output}")
+
+ def merge_stats(self, stats: dict[str, Any]) -> dict[str, Any]:
+ for key, value in stats.items():
+ if isinstance(value, dict):
+ self.execution_stats.setdefault(key, {}).update(value)
+ elif isinstance(value, (int, float)):
+ self.execution_stats.setdefault(key, 0)
+ self.execution_stats[key] += value
+ elif isinstance(value, list):
+ self.execution_stats.setdefault(key, [])
+ self.execution_stats[key].extend(value)
+ else:
+ self.execution_stats[key] = value
+ return self.execution_stats
+
@property
def name(self):
return self.__class__.__name__
@@ -270,6 +297,8 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
for output_name, output_data in self.run(
self.input_schema(**input_data), **kwargs
):
+ if output_name == "error":
+ raise RuntimeError(output_data)
if error := self.output_schema.validate_field(output_name, output_data):
raise ValueError(f"Block produced an invalid output data: {error}")
yield output_name, output_data
@@ -278,15 +307,18 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
# ======================= Block Helper Functions ======================= #
-def get_blocks() -> dict[str, Block]:
+def get_blocks() -> dict[str, Type[Block]]:
from backend.blocks import AVAILABLE_BLOCKS # noqa: E402
return AVAILABLE_BLOCKS
async def initialize_blocks() -> None:
- for block in get_blocks().values():
- existing_block = await AgentBlock.prisma().find_unique(where={"id": block.id})
+ for cls in get_blocks().values():
+ block = cls()
+ existing_block = await AgentBlock.prisma().find_first(
+ where={"OR": [{"id": block.id}, {"name": block.name}]}
+ )
if not existing_block:
await AgentBlock.prisma().create(
data={
@@ -301,13 +333,15 @@ async def initialize_blocks() -> None:
input_schema = json.dumps(block.input_schema.jsonschema())
output_schema = json.dumps(block.output_schema.jsonschema())
if (
- block.name != existing_block.name
+ block.id != existing_block.id
+ or block.name != existing_block.name
or input_schema != existing_block.inputSchema
or output_schema != existing_block.outputSchema
):
await AgentBlock.prisma().update(
- where={"id": block.id},
+ where={"id": existing_block.id},
data={
+ "id": block.id,
"name": block.name,
"inputSchema": input_schema,
"outputSchema": output_schema,
@@ -316,4 +350,5 @@ async def initialize_blocks() -> None:
def get_block(block_id: str) -> Block | None:
- return get_blocks().get(block_id)
+ cls = get_blocks().get(block_id)
+ return cls() if cls else None
diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py
index 8147f702af..5581a78542 100644
--- a/autogpt_platform/backend/backend/data/credit.py
+++ b/autogpt_platform/backend/backend/data/credit.py
@@ -17,8 +17,9 @@ from backend.blocks.llm import (
AITextSummarizerBlock,
LlmModel,
)
+from backend.blocks.search import ExtractWebsiteContentBlock, SearchTheWebBlock
from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock
-from backend.data.block import Block, BlockInput
+from backend.data.block import Block, BlockInput, get_block
from backend.util.settings import Config
@@ -74,6 +75,10 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
CreateTalkingAvatarVideoBlock: [
BlockCost(cost_amount=15, cost_filter={"api_key": None})
],
+ SearchTheWebBlock: [BlockCost(cost_amount=1)],
+ ExtractWebsiteContentBlock: [
+ BlockCost(cost_amount=1, cost_filter={"raw_content": False})
+ ],
}
@@ -96,7 +101,7 @@ class UserCreditBase(ABC):
self,
user_id: str,
user_credit: int,
- block: Block,
+ block_id: str,
input_data: BlockInput,
data_size: float,
run_time: float,
@@ -107,7 +112,7 @@ class UserCreditBase(ABC):
Args:
user_id (str): The user ID.
user_credit (int): The current credit for the user.
- block (Block): The block that is being used.
+ block_id (str): The block ID.
input_data (BlockInput): The input data for the block.
data_size (float): The size of the data being processed.
run_time (float): The time taken to run the block.
@@ -208,12 +213,16 @@ class UserCredit(UserCreditBase):
self,
user_id: str,
user_credit: int,
- block: Block,
+ block_id: str,
input_data: BlockInput,
data_size: float,
run_time: float,
validate_balance: bool = True,
) -> int:
+ block = get_block(block_id)
+ if not block:
+ raise ValueError(f"Block not found: {block_id}")
+
cost, matching_filter = self._block_usage_cost(
block=block, input_data=input_data, data_size=data_size, run_time=run_time
)
diff --git a/autogpt_platform/backend/backend/data/db.py b/autogpt_platform/backend/backend/data/db.py
index e5f7b1965e..1bf5d930f6 100644
--- a/autogpt_platform/backend/backend/data/db.py
+++ b/autogpt_platform/backend/backend/data/db.py
@@ -1,4 +1,3 @@
-import asyncio
import logging
import os
from contextlib import asynccontextmanager
@@ -8,40 +7,30 @@ from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
+from backend.util.retry import conn_retry
+
load_dotenv()
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma")
os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA
-prisma, conn_id = Prisma(auto_register=True), ""
+prisma = Prisma(auto_register=True)
logger = logging.getLogger(__name__)
-async def connect(call_count=0):
- global conn_id
- if not conn_id:
- conn_id = str(uuid4())
-
- try:
- logger.info(f"[Prisma-{conn_id}] Acquiring connection..")
- if not prisma.is_connected():
- await prisma.connect()
- logger.info(f"[Prisma-{conn_id}] Connection acquired!")
- except Exception as e:
- if call_count <= 5:
- logger.info(f"[Prisma-{conn_id}] Connection failed: {e}. Retrying now..")
- await asyncio.sleep(2**call_count)
- await connect(call_count + 1)
- else:
- raise e
-
-
-async def disconnect():
+@conn_retry("Prisma", "Acquiring connection")
+async def connect():
if prisma.is_connected():
- logger.info(f"[Prisma-{conn_id}] Releasing connection.")
- await prisma.disconnect()
- logger.info(f"[Prisma-{conn_id}] Connection released.")
+ return
+ await prisma.connect()
+
+
+@conn_retry("Prisma", "Releasing connection")
+async def disconnect():
+ if not prisma.is_connected():
+ return
+ await prisma.disconnect()
@asynccontextmanager
diff --git a/autogpt_platform/backend/backend/data/execution.py b/autogpt_platform/backend/backend/data/execution.py
index 962bbfa293..aaf06ffef7 100644
--- a/autogpt_platform/backend/backend/data/execution.py
+++ b/autogpt_platform/backend/backend/data/execution.py
@@ -3,7 +3,6 @@ from datetime import datetime, timezone
from multiprocessing import Manager
from typing import Any, Generic, TypeVar
-from autogpt_libs.supabase_integration_credentials_store.types import Credentials
from prisma.enums import AgentExecutionStatus
from prisma.models import (
AgentGraphExecution,
@@ -26,7 +25,6 @@ class GraphExecution(BaseModel):
graph_exec_id: str
graph_id: str
start_node_execs: list["NodeExecution"]
- node_input_credentials: dict[str, Credentials] # dict[node_id, Credentials]
class NodeExecution(BaseModel):
@@ -268,10 +266,29 @@ async def update_graph_execution_start_time(graph_exec_id: str):
)
-async def update_graph_execution_stats(graph_exec_id: str, stats: dict[str, Any]):
+async def update_graph_execution_stats(
+ graph_exec_id: str,
+ error: Exception | None,
+ wall_time: float,
+ cpu_time: float,
+ node_count: int,
+):
+ status = ExecutionStatus.FAILED if error else ExecutionStatus.COMPLETED
+ stats = (
+ {
+ "walltime": wall_time,
+ "cputime": cpu_time,
+ "nodecount": node_count,
+ "error": str(error) if error else None,
+ },
+ )
+
await AgentGraphExecution.prisma().update(
where={"id": graph_exec_id},
- data={"executionStatus": ExecutionStatus.COMPLETED, "stats": json.dumps(stats)},
+ data={
+ "executionStatus": status,
+ "stats": json.dumps(stats),
+ },
)
diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py
index bbe9afc237..b4f8f8aeb7 100644
--- a/autogpt_platform/backend/backend/data/graph.py
+++ b/autogpt_platform/backend/backend/data/graph.py
@@ -2,20 +2,18 @@ import asyncio
import logging
import uuid
from datetime import datetime, timezone
-from pathlib import Path
from typing import Any, Literal
import prisma.types
from prisma.models import AgentGraph, AgentGraphExecution, AgentNode, AgentNodeLink
from prisma.types import AgentGraphInclude
-from pydantic import BaseModel, PrivateAttr
+from pydantic import BaseModel
from pydantic_core import PydanticUndefinedType
from backend.blocks.basic import AgentInputBlock, AgentOutputBlock
from backend.data.block import BlockInput, get_block, get_blocks
from backend.data.db import BaseDbModel, transaction
from backend.data.execution import ExecutionStatus
-from backend.data.user import DEFAULT_USER_ID
from backend.util import json
logger = logging.getLogger(__name__)
@@ -53,17 +51,8 @@ class Node(BaseDbModel):
block_id: str
input_default: BlockInput = {} # dict[input_name, default_value]
metadata: dict[str, Any] = {}
-
- _input_links: list[Link] = PrivateAttr(default=[])
- _output_links: list[Link] = PrivateAttr(default=[])
-
- @property
- def input_links(self) -> list[Link]:
- return self._input_links
-
- @property
- def output_links(self) -> list[Link]:
- return self._output_links
+ input_links: list[Link] = []
+ output_links: list[Link] = []
@staticmethod
def from_db(node: AgentNode):
@@ -75,8 +64,8 @@ class Node(BaseDbModel):
input_default=json.loads(node.constantInput),
metadata=json.loads(node.metadata),
)
- obj._input_links = [Link.from_db(link) for link in node.Input or []]
- obj._output_links = [Link.from_db(link) for link in node.Output or []]
+ obj.input_links = [Link.from_db(link) for link in node.Input or []]
+ obj.output_links = [Link.from_db(link) for link in node.Output or []]
return obj
@@ -268,7 +257,7 @@ class Graph(GraphMeta):
block = get_block(node.block_id)
if not block:
- blocks = {v.id: v.name for v in get_blocks().values()}
+ blocks = {v().id: v().name for v in get_blocks().values()}
raise ValueError(
f"{suffix}, {node.block_id} is invalid block id, available blocks: {blocks}"
)
@@ -330,7 +319,7 @@ class Graph(GraphMeta):
return input_schema
@staticmethod
- def from_db(graph: AgentGraph):
+ def from_db(graph: AgentGraph, hide_credentials: bool = False):
nodes = [
*(graph.AgentNodes or []),
*(
@@ -341,7 +330,7 @@ class Graph(GraphMeta):
]
return Graph(
**GraphMeta.from_db(graph).model_dump(),
- nodes=[Node.from_db(node) for node in nodes],
+ nodes=[Graph._process_node(node, hide_credentials) for node in nodes],
links=list(
{
Link.from_db(link)
@@ -355,6 +344,31 @@ class Graph(GraphMeta):
},
)
+ @staticmethod
+ def _process_node(node: AgentNode, hide_credentials: bool) -> Node:
+ node_dict = node.model_dump()
+ if hide_credentials and "constantInput" in node_dict:
+ constant_input = json.loads(node_dict["constantInput"])
+ constant_input = Graph._hide_credentials_in_input(constant_input)
+ node_dict["constantInput"] = json.dumps(constant_input)
+ return Node.from_db(AgentNode(**node_dict))
+
+ @staticmethod
+ def _hide_credentials_in_input(input_data: dict[str, Any]) -> dict[str, Any]:
+ sensitive_keys = ["credentials", "api_key", "password", "token", "secret"]
+ result = {}
+ for key, value in input_data.items():
+ if isinstance(value, dict):
+ result[key] = Graph._hide_credentials_in_input(value)
+ elif isinstance(value, str) and any(
+ sensitive_key in key.lower() for sensitive_key in sensitive_keys
+ ):
+ # Skip this key-value pair in the result
+ continue
+ else:
+ result[key] = value
+ return result
+
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
@@ -382,9 +396,9 @@ async def get_node(node_id: str) -> Node:
async def get_graphs_meta(
+ user_id: str,
include_executions: bool = False,
filter_by: Literal["active", "template"] | None = "active",
- user_id: str | None = None,
) -> list[GraphMeta]:
"""
Retrieves graph metadata objects.
@@ -393,6 +407,7 @@ async def get_graphs_meta(
Args:
include_executions: Whether to include executions in the graph metadata.
filter_by: An optional filter to either select templates or active graphs.
+ user_id: The ID of the user that owns the graph.
Returns:
list[GraphMeta]: A list of objects representing the retrieved graph metadata.
@@ -404,8 +419,7 @@ async def get_graphs_meta(
elif filter_by == "template":
where_clause["isTemplate"] = True
- if user_id and filter_by != "template":
- where_clause["userId"] = user_id
+ where_clause["userId"] = user_id
graphs = await AgentGraph.prisma().find_many(
where=where_clause,
@@ -431,6 +445,7 @@ async def get_graph(
version: int | None = None,
template: bool = False,
user_id: str | None = None,
+ hide_credentials: bool = False,
) -> Graph | None:
"""
Retrieves a graph from the DB.
@@ -456,7 +471,7 @@ async def get_graph(
include=AGENT_GRAPH_INCLUDE,
order={"version": "desc"},
)
- return Graph.from_db(graph) if graph else None
+ return Graph.from_db(graph, hide_credentials) if graph else None
async def set_graph_active_version(graph_id: str, version: int, user_id: str) -> None:
@@ -500,6 +515,15 @@ async def get_graph_all_versions(graph_id: str, user_id: str) -> list[Graph]:
return [Graph.from_db(graph) for graph in graph_versions]
+async def delete_graph(graph_id: str, user_id: str) -> int:
+ entries_count = await AgentGraph.prisma().delete_many(
+ where={"id": graph_id, "userId": user_id}
+ )
+ if entries_count:
+ logger.info(f"Deleted {entries_count} graph entries for Graph #{graph_id}")
+ return entries_count
+
+
async def create_graph(graph: Graph, user_id: str) -> Graph:
async with transaction() as tx:
await __create_graph(tx, graph, user_id)
@@ -576,30 +600,3 @@ async def __create_graph(tx, graph: Graph, user_id: str):
for link in graph.links
]
)
-
-
-# --------------------- Helper functions --------------------- #
-
-
-TEMPLATES_DIR = Path(__file__).parent.parent.parent / "graph_templates"
-
-
-async def import_packaged_templates() -> None:
- templates_in_db = await get_graphs_meta(filter_by="template")
-
- logging.info("Loading templates...")
- for template_file in TEMPLATES_DIR.glob("*.json"):
- template_data = json.loads(template_file.read_bytes())
-
- template = Graph.model_validate(template_data)
- if not template.is_template:
- logging.warning(
- f"pre-packaged graph file {template_file} is not a template"
- )
- continue
- if (
- exists := next((t for t in templates_in_db if t.id == template.id), None)
- ) and exists.version >= template.version:
- continue
- await create_graph(template, DEFAULT_USER_ID)
- logging.info(f"Loaded template '{template.name}' ({template.id})")
diff --git a/autogpt_platform/backend/backend/data/queue.py b/autogpt_platform/backend/backend/data/queue.py
index 977e8b084c..3b3db57ecd 100644
--- a/autogpt_platform/backend/backend/data/queue.py
+++ b/autogpt_platform/backend/backend/data/queue.py
@@ -1,14 +1,19 @@
import json
import logging
-import os
from abc import ABC, abstractmethod
from datetime import datetime
+from typing import Any, AsyncGenerator, Generator, Generic, TypeVar
-from redis.asyncio import Redis
+from pydantic import BaseModel
+from redis.asyncio.client import PubSub as AsyncPubSub
+from redis.client import PubSub
+from backend.data import redis
from backend.data.execution import ExecutionResult
+from backend.util.settings import Config
logger = logging.getLogger(__name__)
+config = Config()
class DateTimeEncoder(json.JSONEncoder):
@@ -18,60 +23,122 @@ class DateTimeEncoder(json.JSONEncoder):
return super().default(o)
-class AsyncEventQueue(ABC):
+M = TypeVar("M", bound=BaseModel)
+
+
+class BaseRedisEventBus(Generic[M], ABC):
+ Model: type[M]
+
+ @property
@abstractmethod
- async def connect(self):
+ def event_bus_name(self) -> str:
pass
- @abstractmethod
- async def close(self):
- pass
+ def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
+ message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
+ channel_name = f"{self.event_bus_name}-{channel_key}"
+ logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
+ return message, channel_name
- @abstractmethod
- async def put(self, execution_result: ExecutionResult):
- pass
+ def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
+ message_type = "pmessage" if "*" in channel_key else "message"
+ if msg["type"] != message_type:
+ return None
+ try:
+ data = json.loads(msg["data"])
+ logger.info(f"Consuming an event from Redis {data}")
+ return self.Model(**data)
+ except Exception as e:
+ logger.error(f"Failed to parse event result from Redis {msg} {e}")
- @abstractmethod
- async def get(self) -> ExecutionResult | None:
- pass
+ def _subscribe(
+ self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
+ ) -> tuple[PubSub | AsyncPubSub, str]:
+ channel_name = f"{self.event_bus_name}-{channel_key}"
+ pubsub = connection.pubsub()
+ return pubsub, channel_name
-class AsyncRedisEventQueue(AsyncEventQueue):
- def __init__(self):
- self.host = os.getenv("REDIS_HOST", "localhost")
- self.port = int(os.getenv("REDIS_PORT", "6379"))
- self.password = os.getenv("REDIS_PASSWORD", "password")
- self.queue_name = os.getenv("REDIS_QUEUE", "execution_events")
- self.connection = None
+class RedisEventBus(BaseRedisEventBus[M], ABC):
+ Model: type[M]
- async def connect(self):
- if not self.connection:
- self.connection = Redis(
- host=self.host,
- port=self.port,
- password=self.password,
- decode_responses=True,
- )
- await self.connection.ping()
- logger.info(f"Connected to Redis on {self.host}:{self.port}")
+ @property
+ def connection(self) -> redis.Redis:
+ return redis.get_redis()
- async def put(self, execution_result: ExecutionResult):
- if self.connection:
- message = json.dumps(execution_result.model_dump(), cls=DateTimeEncoder)
- logger.info(f"Putting execution result to Redis {message}")
- await self.connection.lpush(self.queue_name, message) # type: ignore
+ def publish_event(self, event: M, channel_key: str):
+ message, channel_name = self._serialize_message(event, channel_key)
+ self.connection.publish(channel_name, message)
- async def get(self) -> ExecutionResult | None:
- if self.connection:
- message = await self.connection.rpop(self.queue_name) # type: ignore
- if message is not None and isinstance(message, (str, bytes, bytearray)):
- data = json.loads(message)
- logger.info(f"Getting execution result from Redis {data}")
- return ExecutionResult(**data)
- return None
+ def listen_events(self, channel_key: str) -> Generator[M, None, None]:
+ pubsub, channel_name = self._subscribe(self.connection, channel_key)
+ assert isinstance(pubsub, PubSub)
- async def close(self):
- if self.connection:
- await self.connection.close()
- self.connection = None
- logger.info("Closed connection to Redis")
+ if "*" in channel_key:
+ pubsub.psubscribe(channel_name)
+ else:
+ pubsub.subscribe(channel_name)
+
+ for message in pubsub.listen():
+ if event := self._deserialize_message(message, channel_key):
+ yield event
+
+
+class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
+ Model: type[M]
+
+ @property
+ async def connection(self) -> redis.AsyncRedis:
+ return await redis.get_redis_async()
+
+ async def publish_event(self, event: M, channel_key: str):
+ message, channel_name = self._serialize_message(event, channel_key)
+ connection = await self.connection
+ await connection.publish(channel_name, message)
+
+ async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
+ pubsub, channel_name = self._subscribe(await self.connection, channel_key)
+ assert isinstance(pubsub, AsyncPubSub)
+
+ if "*" in channel_key:
+ await pubsub.psubscribe(channel_name)
+ else:
+ await pubsub.subscribe(channel_name)
+
+ async for message in pubsub.listen():
+ if event := self._deserialize_message(message, channel_key):
+ yield event
+
+
+class RedisExecutionEventBus(RedisEventBus[ExecutionResult]):
+ Model = ExecutionResult
+
+ @property
+ def event_bus_name(self) -> str:
+ return config.execution_event_bus_name
+
+ def publish(self, res: ExecutionResult):
+ self.publish_event(res, f"{res.graph_id}-{res.graph_exec_id}")
+
+ def listen(
+ self, graph_id: str = "*", graph_exec_id: str = "*"
+ ) -> Generator[ExecutionResult, None, None]:
+ for execution_result in self.listen_events(f"{graph_id}-{graph_exec_id}"):
+ yield execution_result
+
+
+class AsyncRedisExecutionEventBus(AsyncRedisEventBus[ExecutionResult]):
+ Model = ExecutionResult
+
+ @property
+ def event_bus_name(self) -> str:
+ return config.execution_event_bus_name
+
+ async def publish(self, res: ExecutionResult):
+ await self.publish_event(res, f"{res.graph_id}-{res.graph_exec_id}")
+
+ async def listen(
+ self, graph_id: str = "*", graph_exec_id: str = "*"
+ ) -> AsyncGenerator[ExecutionResult, None]:
+ async for execution_result in self.listen_events(f"{graph_id}-{graph_exec_id}"):
+ yield execution_result
diff --git a/autogpt_platform/backend/backend/data/redis.py b/autogpt_platform/backend/backend/data/redis.py
new file mode 100644
index 0000000000..36410fe29c
--- /dev/null
+++ b/autogpt_platform/backend/backend/data/redis.py
@@ -0,0 +1,84 @@
+import logging
+import os
+
+from dotenv import load_dotenv
+from redis import Redis
+from redis.asyncio import Redis as AsyncRedis
+
+from backend.util.retry import conn_retry
+
+load_dotenv()
+
+HOST = os.getenv("REDIS_HOST", "localhost")
+PORT = int(os.getenv("REDIS_PORT", "6379"))
+PASSWORD = os.getenv("REDIS_PASSWORD", "password")
+
+logger = logging.getLogger(__name__)
+connection: Redis | None = None
+connection_async: AsyncRedis | None = None
+
+
+@conn_retry("Redis", "Acquiring connection")
+def connect() -> Redis:
+ global connection
+ if connection:
+ return connection
+
+ c = Redis(
+ host=HOST,
+ port=PORT,
+ password=PASSWORD,
+ decode_responses=True,
+ )
+ c.ping()
+ connection = c
+ return connection
+
+
+@conn_retry("Redis", "Releasing connection")
+def disconnect():
+ global connection
+ if connection:
+ connection.close()
+ connection = None
+
+
+def get_redis(auto_connect: bool = True) -> Redis:
+ if connection:
+ return connection
+ if auto_connect:
+ return connect()
+ raise RuntimeError("Redis connection is not established")
+
+
+@conn_retry("AsyncRedis", "Acquiring connection")
+async def connect_async() -> AsyncRedis:
+ global connection_async
+ if connection_async:
+ return connection_async
+
+ c = AsyncRedis(
+ host=HOST,
+ port=PORT,
+ password=PASSWORD,
+ decode_responses=True,
+ )
+ await c.ping()
+ connection_async = c
+ return connection_async
+
+
+@conn_retry("AsyncRedis", "Releasing connection")
+async def disconnect_async():
+ global connection_async
+ if connection_async:
+ await connection_async.close()
+ connection_async = None
+
+
+async def get_redis_async(auto_connect: bool = True) -> AsyncRedis:
+ if connection_async:
+ return connection_async
+ if auto_connect:
+ return await connect_async()
+ raise RuntimeError("AsyncRedis connection is not established")
diff --git a/autogpt_platform/backend/backend/data/user.py b/autogpt_platform/backend/backend/data/user.py
index db60eea235..477b3bae65 100644
--- a/autogpt_platform/backend/backend/data/user.py
+++ b/autogpt_platform/backend/backend/data/user.py
@@ -1,6 +1,8 @@
from typing import Optional
+from autogpt_libs.supabase_integration_credentials_store.types import UserMetadataRaw
from fastapi import HTTPException
+from prisma import Json
from prisma.models import User
from backend.data.db import prisma
@@ -35,16 +37,32 @@ async def get_user_by_id(user_id: str) -> Optional[User]:
return User.model_validate(user) if user else None
-async def create_default_user(enable_auth: str) -> Optional[User]:
- if not enable_auth.lower() == "true":
- user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID})
- if not user:
- user = await prisma.user.create(
- data={
- "id": DEFAULT_USER_ID,
- "email": "default@example.com",
- "name": "Default User",
- }
- )
- return User.model_validate(user)
- return None
+async def create_default_user() -> Optional[User]:
+ user = await prisma.user.find_unique(where={"id": DEFAULT_USER_ID})
+ if not user:
+ user = await prisma.user.create(
+ data={
+ "id": DEFAULT_USER_ID,
+ "email": "default@example.com",
+ "name": "Default User",
+ }
+ )
+ return User.model_validate(user)
+
+
+async def get_user_metadata(user_id: str) -> UserMetadataRaw:
+ user = await User.prisma().find_unique_or_raise(
+ where={"id": user_id},
+ )
+ return (
+ UserMetadataRaw.model_validate(user.metadata)
+ if user.metadata
+ else UserMetadataRaw()
+ )
+
+
+async def update_user_metadata(user_id: str, metadata: UserMetadataRaw):
+ await User.prisma().update(
+ where={"id": user_id},
+ data={"metadata": Json(metadata.model_dump())},
+ )
diff --git a/autogpt_platform/backend/backend/exec.py b/autogpt_platform/backend/backend/exec.py
index aaea21e64a..6e902c64df 100644
--- a/autogpt_platform/backend/backend/exec.py
+++ b/autogpt_platform/backend/backend/exec.py
@@ -1,5 +1,5 @@
from backend.app import run_processes
-from backend.executor import ExecutionManager
+from backend.executor import DatabaseManager, ExecutionManager
def main():
@@ -7,6 +7,7 @@ def main():
Run all the processes required for the AutoGPT-server REST API.
"""
run_processes(
+ DatabaseManager(),
ExecutionManager(),
)
diff --git a/autogpt_platform/backend/backend/executor/__init__.py b/autogpt_platform/backend/backend/executor/__init__.py
index 953d09aa07..59a3595eea 100644
--- a/autogpt_platform/backend/backend/executor/__init__.py
+++ b/autogpt_platform/backend/backend/executor/__init__.py
@@ -1,7 +1,9 @@
+from .database import DatabaseManager
from .manager import ExecutionManager
from .scheduler import ExecutionScheduler
__all__ = [
+ "DatabaseManager",
"ExecutionManager",
"ExecutionScheduler",
]
diff --git a/autogpt_platform/backend/backend/executor/database.py b/autogpt_platform/backend/backend/executor/database.py
new file mode 100644
index 0000000000..0d33c28460
--- /dev/null
+++ b/autogpt_platform/backend/backend/executor/database.py
@@ -0,0 +1,84 @@
+from functools import wraps
+from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
+
+from backend.data.credit import get_user_credit_model
+from backend.data.execution import (
+ ExecutionResult,
+ create_graph_execution,
+ get_execution_results,
+ get_incomplete_executions,
+ get_latest_execution,
+ update_execution_status,
+ update_graph_execution_stats,
+ update_node_execution_stats,
+ upsert_execution_input,
+ upsert_execution_output,
+)
+from backend.data.graph import get_graph, get_node
+from backend.data.queue import RedisExecutionEventBus
+from backend.data.user import get_user_metadata, update_user_metadata
+from backend.util.service import AppService, expose
+from backend.util.settings import Config
+
+P = ParamSpec("P")
+R = TypeVar("R")
+
+
+class DatabaseManager(AppService):
+
+ def __init__(self):
+ super().__init__()
+ self.use_db = True
+ self.use_redis = True
+ self.event_queue = RedisExecutionEventBus()
+
+ @classmethod
+ def get_port(cls) -> int:
+ return Config().database_api_port
+
+ @expose
+ def send_execution_update(self, execution_result_dict: dict[Any, Any]):
+ self.event_queue.publish(ExecutionResult(**execution_result_dict))
+
+ @staticmethod
+ def exposed_run_and_wait(
+ f: Callable[P, Coroutine[None, None, R]]
+ ) -> Callable[Concatenate[object, P], R]:
+ @expose
+ @wraps(f)
+ def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
+ coroutine = f(*args, **kwargs)
+ res = self.run_and_wait(coroutine)
+ return res
+
+ return wrapper
+
+ # Executions
+ create_graph_execution = exposed_run_and_wait(create_graph_execution)
+ get_execution_results = exposed_run_and_wait(get_execution_results)
+ get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
+ get_latest_execution = exposed_run_and_wait(get_latest_execution)
+ update_execution_status = exposed_run_and_wait(update_execution_status)
+ update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
+ update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
+ upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
+ upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
+
+ # Graphs
+ get_node = exposed_run_and_wait(get_node)
+ get_graph = exposed_run_and_wait(get_graph)
+
+ # Credits
+ user_credit_model = get_user_credit_model()
+ get_or_refill_credit = cast(
+ Callable[[Any, str], int],
+ exposed_run_and_wait(user_credit_model.get_or_refill_credit),
+ )
+ spend_credits = cast(
+ Callable[[Any, str, int, str, dict[str, str], float, float], int],
+ exposed_run_and_wait(user_credit_model.spend_credits),
+ )
+
+ # User + User Metadata
+ get_user_metadata = exposed_run_and_wait(get_user_metadata)
+ update_user_metadata = exposed_run_and_wait(update_user_metadata)
diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py
index 544c59f8b4..102de62c16 100644
--- a/autogpt_platform/backend/backend/executor/manager.py
+++ b/autogpt_platform/backend/backend/executor/manager.py
@@ -1,4 +1,3 @@
-import asyncio
import atexit
import logging
import multiprocessing
@@ -9,45 +8,40 @@ import threading
from concurrent.futures import Future, ProcessPoolExecutor
from contextlib import contextmanager
from multiprocessing.pool import AsyncResult, Pool
-from typing import TYPE_CHECKING, Any, Coroutine, Generator, TypeVar, cast
+from typing import TYPE_CHECKING, Any, Generator, TypeVar, cast
-from autogpt_libs.supabase_integration_credentials_store.types import Credentials
from pydantic import BaseModel
+from redis.lock import Lock as RedisLock
if TYPE_CHECKING:
- from backend.server.rest_api import AgentServer
+ from backend.executor import DatabaseManager
-from backend.data import db
+from autogpt_libs.utils.cache import thread_cached
+
+from backend.data import redis
from backend.data.block import Block, BlockData, BlockInput, BlockType, get_block
-from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionQueue,
ExecutionResult,
ExecutionStatus,
GraphExecution,
NodeExecution,
- create_graph_execution,
- get_execution_results,
- get_incomplete_executions,
- get_latest_execution,
merge_execution_input,
parse_execution_output,
- update_execution_status,
- update_graph_execution_stats,
- update_node_execution_stats,
- upsert_execution_input,
- upsert_execution_output,
)
-from backend.data.graph import Graph, Link, Node, get_graph, get_node
+from backend.data.graph import Graph, Link, Node
from backend.data.model import CREDENTIALS_FIELD_NAME, CredentialsMetaInput
+from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.util import json
from backend.util.decorator import error_logged, time_measured
from backend.util.logging import configure_logging
+from backend.util.process import set_service_name
from backend.util.service import AppService, expose, get_service_client
-from backend.util.settings import Config
+from backend.util.settings import Settings
from backend.util.type import convert
logger = logging.getLogger(__name__)
+settings = Settings()
class LogMetadata:
@@ -100,10 +94,9 @@ ExecutionStream = Generator[NodeExecution, None, None]
def execute_node(
- loop: asyncio.AbstractEventLoop,
- api_client: "AgentServer",
+ db_client: "DatabaseManager",
+ creds_manager: IntegrationCredentialsManager,
data: NodeExecution,
- input_credentials: Credentials | None = None,
execution_stats: dict[str, Any] | None = None,
) -> ExecutionStream:
"""
@@ -111,8 +104,8 @@ def execute_node(
persist the execution result, and return the subsequent node to be executed.
Args:
- loop: The event loop to run the async functions.
- api_client: The client to send execution updates to the server.
+ db_client: The client to send execution updates to the server.
+ creds_manager: The manager to acquire and release credentials.
data: The execution data for executing the current node.
execution_stats: The execution statistics to be updated.
@@ -125,17 +118,12 @@ def execute_node(
node_exec_id = data.node_exec_id
node_id = data.node_id
- asyncio.set_event_loop(loop)
-
- def wait(f: Coroutine[Any, Any, T]) -> T:
- return loop.run_until_complete(f)
-
def update_execution(status: ExecutionStatus) -> ExecutionResult:
- exec_update = wait(update_execution_status(node_exec_id, status))
- api_client.send_execution_update(exec_update.model_dump())
+ exec_update = db_client.update_execution_status(node_exec_id, status)
+ db_client.send_execution_update(exec_update.model_dump())
return exec_update
- node = wait(get_node(node_id))
+ node = db_client.get_node(node_id)
node_block = get_block(node.block_id)
if not node_block:
@@ -161,28 +149,34 @@ def execute_node(
input_size = len(input_data_str)
log_metadata.info("Executed node with input", input=input_data_str)
update_execution(ExecutionStatus.RUNNING)
- user_credit = get_user_credit_model()
extra_exec_kwargs = {}
- if input_credentials:
- extra_exec_kwargs["credentials"] = input_credentials
+ # Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
+ # changes during execution. ⚠️ This means a set of credentials can only be used by
+ # one (running) block at a time; simultaneous execution of blocks using same
+ # credentials is not supported.
+ creds_lock = None
+ if CREDENTIALS_FIELD_NAME in input_data:
+ credentials_meta = CredentialsMetaInput(**input_data[CREDENTIALS_FIELD_NAME])
+ credentials, creds_lock = creds_manager.acquire(user_id, credentials_meta.id)
+ extra_exec_kwargs["credentials"] = credentials
output_size = 0
- try:
- credit = wait(user_credit.get_or_refill_credit(user_id))
- if credit < 0:
- raise ValueError(f"Insufficient credit: {credit}")
+ end_status = ExecutionStatus.COMPLETED
+ credit = db_client.get_or_refill_credit(user_id)
+ if credit < 0:
+ raise ValueError(f"Insufficient credit: {credit}")
+ try:
for output_name, output_data in node_block.execute(
input_data, **extra_exec_kwargs
):
output_size += len(json.dumps(output_data))
log_metadata.info("Node produced output", output_name=output_data)
- wait(upsert_execution_output(node_exec_id, output_name, output_data))
+ db_client.upsert_execution_output(node_exec_id, output_name, output_data)
for execution in _enqueue_next_nodes(
- api_client=api_client,
- loop=loop,
+ db_client=db_client,
node=node,
output=(output_name, output_data),
user_id=user_id,
@@ -192,41 +186,52 @@ def execute_node(
):
yield execution
- r = update_execution(ExecutionStatus.COMPLETED)
- s = input_size + output_size
- t = (
- (r.end_time - r.start_time).total_seconds()
- if r.end_time and r.start_time
- else 0
- )
- wait(user_credit.spend_credits(user_id, credit, node_block, input_data, s, t))
-
except Exception as e:
+ end_status = ExecutionStatus.FAILED
error_msg = str(e)
log_metadata.exception(f"Node execution failed with error {error_msg}")
- wait(upsert_execution_output(node_exec_id, "error", error_msg))
- update_execution(ExecutionStatus.FAILED)
+ db_client.upsert_execution_output(node_exec_id, "error", error_msg)
+
+ for execution in _enqueue_next_nodes(
+ db_client=db_client,
+ node=node,
+ output=("error", error_msg),
+ user_id=user_id,
+ graph_exec_id=graph_exec_id,
+ graph_id=graph_id,
+ log_metadata=log_metadata,
+ ):
+ yield execution
raise e
-
finally:
+ # Ensure credentials are released even if execution fails
+ if creds_lock:
+ try:
+ creds_lock.release()
+ except Exception as e:
+ log_metadata.error(f"Failed to release credentials lock: {e}")
+
+ # Update execution status and spend credits
+ res = update_execution(end_status)
+ if end_status == ExecutionStatus.COMPLETED:
+ s = input_size + output_size
+ t = (
+ (res.end_time - res.start_time).total_seconds()
+ if res.end_time and res.start_time
+ else 0
+ )
+ db_client.spend_credits(user_id, credit, node_block.id, input_data, s, t)
+
+ # Update execution stats
if execution_stats is not None:
+ execution_stats.update(node_block.execution_stats)
execution_stats["input_size"] = input_size
execution_stats["output_size"] = output_size
-@contextmanager
-def synchronized(api_client: "AgentServer", key: Any):
- api_client.acquire_lock(key)
- try:
- yield
- finally:
- api_client.release_lock(key)
-
-
def _enqueue_next_nodes(
- api_client: "AgentServer",
- loop: asyncio.AbstractEventLoop,
+ db_client: "DatabaseManager",
node: Node,
output: BlockData,
user_id: str,
@@ -234,16 +239,14 @@ def _enqueue_next_nodes(
graph_id: str,
log_metadata: LogMetadata,
) -> list[NodeExecution]:
- def wait(f: Coroutine[Any, Any, T]) -> T:
- return loop.run_until_complete(f)
def add_enqueued_execution(
node_exec_id: str, node_id: str, data: BlockInput
) -> NodeExecution:
- exec_update = wait(
- update_execution_status(node_exec_id, ExecutionStatus.QUEUED, data)
+ exec_update = db_client.update_execution_status(
+ node_exec_id, ExecutionStatus.QUEUED, data
)
- api_client.send_execution_update(exec_update.model_dump())
+ db_client.send_execution_update(exec_update.model_dump())
return NodeExecution(
user_id=user_id,
graph_exec_id=graph_exec_id,
@@ -263,20 +266,18 @@ def _enqueue_next_nodes(
if next_data is None:
return enqueued_executions
- next_node = wait(get_node(next_node_id))
+ next_node = db_client.get_node(next_node_id)
# Multiple node can register the same next node, we need this to be atomic
# To avoid same execution to be enqueued multiple times,
# Or the same input to be consumed multiple times.
- with synchronized(api_client, ("upsert_input", next_node_id, graph_exec_id)):
+ with synchronized(f"upsert_input-{next_node_id}-{graph_exec_id}"):
# Add output data to the earliest incomplete execution, or create a new one.
- next_node_exec_id, next_node_input = wait(
- upsert_execution_input(
- node_id=next_node_id,
- graph_exec_id=graph_exec_id,
- input_name=next_input_name,
- input_data=next_data,
- )
+ next_node_exec_id, next_node_input = db_client.upsert_execution_input(
+ node_id=next_node_id,
+ graph_exec_id=graph_exec_id,
+ input_name=next_input_name,
+ input_data=next_data,
)
# Complete missing static input pins data using the last execution input.
@@ -286,8 +287,8 @@ def _enqueue_next_nodes(
if link.is_static and link.sink_name not in next_node_input
}
if static_link_names and (
- latest_execution := wait(
- get_latest_execution(next_node_id, graph_exec_id)
+ latest_execution := db_client.get_latest_execution(
+ next_node_id, graph_exec_id
)
):
for name in static_link_names:
@@ -314,7 +315,9 @@ def _enqueue_next_nodes(
# If link is static, there could be some incomplete executions waiting for it.
# Load and complete the input missing input data, and try to re-enqueue them.
- for iexec in wait(get_incomplete_executions(next_node_id, graph_exec_id)):
+ for iexec in db_client.get_incomplete_executions(
+ next_node_id, graph_exec_id
+ ):
idata = iexec.input_data
ineid = iexec.node_exec_id
@@ -399,12 +402,6 @@ def validate_exec(
return data, node_block.name
-def get_agent_server_client() -> "AgentServer":
- from backend.server.rest_api import AgentServer
-
- return get_service_client(AgentServer, Config().agent_server_port)
-
-
class Executor:
"""
This class contains event handlers for the process pool executor events.
@@ -433,12 +430,11 @@ class Executor:
@classmethod
def on_node_executor_start(cls):
configure_logging()
-
- cls.loop = asyncio.new_event_loop()
+ set_service_name("NodeExecutor")
+ redis.connect()
cls.pid = os.getpid()
-
- cls.loop.run_until_complete(db.connect())
- cls.agent_server_client = get_agent_server_client()
+ cls.db_client = get_db_client()
+ cls.creds_manager = IntegrationCredentialsManager()
# Set up shutdown handlers
cls.shutdown_lock = threading.Lock()
@@ -452,19 +448,23 @@ class Executor:
if not cls.shutdown_lock.acquire(blocking=False):
return # already shutting down
- logger.info(f"[on_node_executor_stop {cls.pid}] ⏳ Disconnecting DB...")
- cls.loop.run_until_complete(db.disconnect())
+ logger.info(f"[on_node_executor_stop {cls.pid}] ⏳ Releasing locks...")
+ cls.creds_manager.release_all_locks()
+ logger.info(f"[on_node_executor_stop {cls.pid}] ⏳ Disconnecting Redis...")
+ redis.disconnect()
logger.info(f"[on_node_executor_stop {cls.pid}] ✅ Finished cleanup")
@classmethod
def on_node_executor_sigterm(cls):
llprint(f"[on_node_executor_sigterm {cls.pid}] ⚠️ SIGTERM received")
if not cls.shutdown_lock.acquire(blocking=False):
- return # already shutting down, no need to self-terminate
+ return # already shutting down
- llprint(f"[on_node_executor_sigterm {cls.pid}] ⏳ Disconnecting DB...")
- cls.loop.run_until_complete(db.disconnect())
- llprint(f"[on_node_executor_sigterm {cls.pid}] ✅ Finished cleanup")
+ llprint(f"[on_node_executor_stop {cls.pid}] ⏳ Releasing locks...")
+ cls.creds_manager.release_all_locks()
+ llprint(f"[on_node_executor_stop {cls.pid}] ⏳ Disconnecting Redis...")
+ redis.disconnect()
+ llprint(f"[on_node_executor_stop {cls.pid}] ✅ Finished cleanup")
sys.exit(0)
@classmethod
@@ -473,7 +473,6 @@ class Executor:
cls,
q: ExecutionQueue[NodeExecution],
node_exec: NodeExecution,
- input_credentials: Credentials | None,
):
log_metadata = LogMetadata(
user_id=node_exec.user_id,
@@ -486,13 +485,13 @@ class Executor:
execution_stats = {}
timing_info, _ = cls._on_node_execution(
- q, node_exec, input_credentials, log_metadata, execution_stats
+ q, node_exec, log_metadata, execution_stats
)
execution_stats["walltime"] = timing_info.wall_time
execution_stats["cputime"] = timing_info.cpu_time
- cls.loop.run_until_complete(
- update_node_execution_stats(node_exec.node_exec_id, execution_stats)
+ cls.db_client.update_node_execution_stats(
+ node_exec.node_exec_id, execution_stats
)
@classmethod
@@ -501,14 +500,13 @@ class Executor:
cls,
q: ExecutionQueue[NodeExecution],
node_exec: NodeExecution,
- input_credentials: Credentials | None,
log_metadata: LogMetadata,
stats: dict[str, Any] | None = None,
):
try:
log_metadata.info(f"Start node execution {node_exec.node_exec_id}")
for execution in execute_node(
- cls.loop, cls.agent_server_client, node_exec, input_credentials, stats
+ cls.db_client, cls.creds_manager, node_exec, stats
):
q.add(execution)
log_metadata.info(f"Finished node execution {node_exec.node_exec_id}")
@@ -520,12 +518,11 @@ class Executor:
@classmethod
def on_graph_executor_start(cls):
configure_logging()
+ set_service_name("GraphExecutor")
- cls.pool_size = Config().num_node_workers
- cls.loop = asyncio.new_event_loop()
+ cls.db_client = get_db_client()
+ cls.pool_size = settings.config.num_node_workers
cls.pid = os.getpid()
-
- cls.loop.run_until_complete(db.connect())
cls._init_node_executor_pool()
logger.info(
f"Graph executor {cls.pid} started with {cls.pool_size} node workers"
@@ -537,8 +534,6 @@ class Executor:
@classmethod
def on_graph_executor_stop(cls):
prefix = f"[on_graph_executor_stop {cls.pid}]"
- logger.info(f"{prefix} ⏳ Disconnecting DB...")
- cls.loop.run_until_complete(db.disconnect())
logger.info(f"{prefix} ⏳ Terminating node executor pool...")
cls.executor.terminate()
logger.info(f"{prefix} ✅ Finished cleanup")
@@ -561,19 +556,16 @@ class Executor:
node_eid="*",
block_name="-",
)
- timing_info, node_count = cls._on_graph_execution(
+ timing_info, (node_count, error) = cls._on_graph_execution(
graph_exec, cancel, log_metadata
)
- cls.loop.run_until_complete(
- update_graph_execution_stats(
- graph_exec.graph_exec_id,
- {
- "walltime": timing_info.wall_time,
- "cputime": timing_info.cpu_time,
- "nodecount": node_count,
- },
- )
+ cls.db_client.update_graph_execution_stats(
+ graph_exec_id=graph_exec.graph_exec_id,
+ error=error,
+ wall_time=timing_info.wall_time,
+ cpu_time=timing_info.cpu_time,
+ node_count=node_count,
)
@classmethod
@@ -583,9 +575,15 @@ class Executor:
graph_exec: GraphExecution,
cancel: threading.Event,
log_metadata: LogMetadata,
- ) -> int:
+ ) -> tuple[int, Exception | None]:
+ """
+ Returns:
+ The number of node executions completed.
+ The error that occurred during the execution.
+ """
log_metadata.info(f"Start graph execution {graph_exec.graph_exec_id}")
n_node_executions = 0
+ error = None
finished = False
def cancel_handler():
@@ -619,7 +617,8 @@ class Executor:
while not queue.empty():
if cancel.is_set():
- return n_node_executions
+ error = RuntimeError("Execution is cancelled")
+ return n_node_executions, error
exec_data = queue.get()
@@ -638,11 +637,7 @@ class Executor:
)
running_executions[exec_data.node_id] = cls.executor.apply_async(
cls.on_node_execution,
- (
- queue,
- exec_data,
- graph_exec.node_input_credentials.get(exec_data.node_id),
- ),
+ (queue, exec_data),
callback=make_exec_callback(exec_data),
)
@@ -653,7 +648,8 @@ class Executor:
)
for node_id, execution in list(running_executions.items()):
if cancel.is_set():
- return n_node_executions
+ error = RuntimeError("Execution is cancelled")
+ return n_node_executions, error
if not queue.empty():
break # yield to parent loop to execute new queue items
@@ -666,29 +662,37 @@ class Executor:
log_metadata.exception(
f"Failed graph execution {graph_exec.graph_exec_id}: {e}"
)
+ error = e
finally:
if not cancel.is_set():
finished = True
cancel.set()
cancel_thread.join()
- return n_node_executions
+ return n_node_executions, error
class ExecutionManager(AppService):
+
def __init__(self):
- super().__init__(port=Config().execution_manager_port)
- self.use_db = True
+ super().__init__()
+ self.use_redis = True
self.use_supabase = True
- self.pool_size = Config().num_graph_workers
+ self.pool_size = settings.config.num_graph_workers
self.queue = ExecutionQueue[GraphExecution]()
self.active_graph_runs: dict[str, tuple[Future, threading.Event]] = {}
+ @classmethod
+ def get_port(cls) -> int:
+ return settings.config.execution_manager_port
+
def run_service(self):
from autogpt_libs.supabase_integration_credentials_store import (
SupabaseIntegrationCredentialsStore,
)
- self.credentials_store = SupabaseIntegrationCredentialsStore(self.supabase)
+ self.credentials_store = SupabaseIntegrationCredentialsStore(
+ redis=redis.get_redis()
+ )
self.executor = ProcessPoolExecutor(
max_workers=self.pool_size,
initializer=Executor.on_graph_executor_start,
@@ -719,19 +723,19 @@ class ExecutionManager(AppService):
super().cleanup()
@property
- def agent_server_client(self) -> "AgentServer":
- return get_agent_server_client()
+ def db_client(self) -> "DatabaseManager":
+ return get_db_client()
@expose
def add_execution(
self, graph_id: str, data: BlockInput, user_id: str
) -> dict[str, Any]:
- graph: Graph | None = self.run_and_wait(get_graph(graph_id, user_id=user_id))
+ graph: Graph | None = self.db_client.get_graph(graph_id, user_id=user_id)
if not graph:
raise Exception(f"Graph #{graph_id} not found.")
graph.validate_graph(for_run=True)
- node_input_credentials = self._get_node_input_credentials(graph, user_id)
+ self._validate_node_input_credentials(graph, user_id)
nodes_input = []
for node in graph.starting_nodes:
@@ -754,13 +758,11 @@ class ExecutionManager(AppService):
else:
nodes_input.append((node.id, input_data))
- graph_exec_id, node_execs = self.run_and_wait(
- create_graph_execution(
- graph_id=graph_id,
- graph_version=graph.version,
- nodes_input=nodes_input,
- user_id=user_id,
- )
+ graph_exec_id, node_execs = self.db_client.create_graph_execution(
+ graph_id=graph_id,
+ graph_version=graph.version,
+ nodes_input=nodes_input,
+ user_id=user_id,
)
starting_node_execs = []
@@ -775,19 +777,16 @@ class ExecutionManager(AppService):
data=node_exec.input_data,
)
)
- exec_update = self.run_and_wait(
- update_execution_status(
- node_exec.node_exec_id, ExecutionStatus.QUEUED, node_exec.input_data
- )
+ exec_update = self.db_client.update_execution_status(
+ node_exec.node_exec_id, ExecutionStatus.QUEUED, node_exec.input_data
)
- self.agent_server_client.send_execution_update(exec_update.model_dump())
+ self.db_client.send_execution_update(exec_update.model_dump())
graph_exec = GraphExecution(
user_id=user_id,
graph_id=graph_id,
graph_exec_id=graph_exec_id,
start_node_execs=starting_node_execs,
- node_input_credentials=node_input_credentials,
)
self.queue.add(graph_exec)
@@ -816,30 +815,22 @@ class ExecutionManager(AppService):
future.result()
# Update the status of the unfinished node executions
- node_execs = self.run_and_wait(get_execution_results(graph_exec_id))
+ node_execs = self.db_client.get_execution_results(graph_exec_id)
for node_exec in node_execs:
if node_exec.status not in (
ExecutionStatus.COMPLETED,
ExecutionStatus.FAILED,
):
- self.run_and_wait(
- upsert_execution_output(
- node_exec.node_exec_id, "error", "TERMINATED"
- )
+ self.db_client.upsert_execution_output(
+ node_exec.node_exec_id, "error", "TERMINATED"
)
- exec_update = self.run_and_wait(
- update_execution_status(
- node_exec.node_exec_id, ExecutionStatus.FAILED
- )
+ exec_update = self.db_client.update_execution_status(
+ node_exec.node_exec_id, ExecutionStatus.FAILED
)
- self.agent_server_client.send_execution_update(exec_update.model_dump())
+ self.db_client.send_execution_update(exec_update.model_dump())
- def _get_node_input_credentials(
- self, graph: Graph, user_id: str
- ) -> dict[str, Credentials]:
- """Gets all credentials for all nodes of the graph"""
-
- node_credentials: dict[str, Credentials] = {}
+ def _validate_node_input_credentials(self, graph: Graph, user_id: str):
+ """Checks all credentials for all nodes of the graph"""
for node in graph.nodes:
block = get_block(node.block_id)
@@ -882,9 +873,26 @@ class ExecutionManager(AppService):
f"Invalid credentials #{credentials.id} for node #{node.id}: "
"type/provider mismatch"
)
- node_credentials[node.id] = credentials
- return node_credentials
+
+# ------- UTILITIES ------- #
+
+
+@thread_cached
+def get_db_client() -> "DatabaseManager":
+ from backend.executor import DatabaseManager
+
+ return get_service_client(DatabaseManager)
+
+
+@contextmanager
+def synchronized(key: str, timeout: int = 60):
+ lock: RedisLock = redis.get_redis().lock(f"lock:{key}", timeout=timeout)
+ try:
+ lock.acquire()
+ yield
+ finally:
+ lock.release()
def llprint(message: str):
diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py
index a1c7ca687f..5080e16031 100644
--- a/autogpt_platform/backend/backend/executor/scheduler.py
+++ b/autogpt_platform/backend/backend/executor/scheduler.py
@@ -4,9 +4,16 @@ from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
+from autogpt_libs.utils.cache import thread_cached
-from backend.data import schedule as model
from backend.data.block import BlockInput
+from backend.data.schedule import (
+ ExecutionSchedule,
+ add_schedule,
+ get_active_schedules,
+ get_schedules,
+ update_schedule,
+)
from backend.executor.manager import ExecutionManager
from backend.util.service import AppService, expose, get_service_client
from backend.util.settings import Config
@@ -19,16 +26,21 @@ def log(msg, **kwargs):
class ExecutionScheduler(AppService):
+
def __init__(self, refresh_interval=10):
- super().__init__(port=Config().execution_scheduler_port)
+ super().__init__()
self.use_db = True
self.last_check = datetime.min
self.refresh_interval = refresh_interval
- self.use_redis = False
+
+ @classmethod
+ def get_port(cls) -> int:
+ return Config().execution_scheduler_port
@property
- def execution_manager_client(self) -> ExecutionManager:
- return get_service_client(ExecutionManager, Config().execution_manager_port)
+ @thread_cached
+ def execution_client(self) -> ExecutionManager:
+ return get_service_client(ExecutionManager)
def run_service(self):
scheduler = BackgroundScheduler()
@@ -38,7 +50,7 @@ class ExecutionScheduler(AppService):
time.sleep(self.refresh_interval)
def __refresh_jobs_from_db(self, scheduler: BackgroundScheduler):
- schedules = self.run_and_wait(model.get_active_schedules(self.last_check))
+ schedules = self.run_and_wait(get_active_schedules(self.last_check))
for schedule in schedules:
if schedule.last_updated:
self.last_check = max(self.last_check, schedule.last_updated)
@@ -60,14 +72,13 @@ class ExecutionScheduler(AppService):
def __execute_graph(self, graph_id: str, input_data: dict, user_id: str):
try:
log(f"Executing recurring job for graph #{graph_id}")
- execution_manager = self.execution_manager_client
- execution_manager.add_execution(graph_id, input_data, user_id)
+ self.execution_client.add_execution(graph_id, input_data, user_id)
except Exception as e:
logger.exception(f"Error executing graph {graph_id}: {e}")
@expose
def update_schedule(self, schedule_id: str, is_enabled: bool, user_id: str) -> str:
- self.run_and_wait(model.update_schedule(schedule_id, is_enabled, user_id))
+ self.run_and_wait(update_schedule(schedule_id, is_enabled, user_id))
return schedule_id
@expose
@@ -79,17 +90,16 @@ class ExecutionScheduler(AppService):
input_data: BlockInput,
user_id: str,
) -> str:
- schedule = model.ExecutionSchedule(
+ schedule = ExecutionSchedule(
graph_id=graph_id,
user_id=user_id,
graph_version=graph_version,
schedule=cron,
input_data=input_data,
)
- return self.run_and_wait(model.add_schedule(schedule)).id
+ return self.run_and_wait(add_schedule(schedule)).id
@expose
def get_execution_schedules(self, graph_id: str, user_id: str) -> dict[str, str]:
- query = model.get_schedules(graph_id, user_id=user_id)
- schedules: list[model.ExecutionSchedule] = self.run_and_wait(query)
+ schedules = self.run_and_wait(get_schedules(graph_id, user_id=user_id))
return {v.id: v.schedule for v in schedules}
diff --git a/autogpt_platform/backend/backend/integrations/creds_manager.py b/autogpt_platform/backend/backend/integrations/creds_manager.py
new file mode 100644
index 0000000000..96f9d1a3c5
--- /dev/null
+++ b/autogpt_platform/backend/backend/integrations/creds_manager.py
@@ -0,0 +1,170 @@
+import logging
+from contextlib import contextmanager
+from datetime import datetime
+
+from autogpt_libs.supabase_integration_credentials_store import (
+ Credentials,
+ SupabaseIntegrationCredentialsStore,
+)
+from autogpt_libs.utils.synchronize import RedisKeyedMutex
+from redis.lock import Lock as RedisLock
+
+from backend.data import redis
+from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler
+from backend.util.settings import Settings
+
+logger = logging.getLogger(__name__)
+settings = Settings()
+
+
+class IntegrationCredentialsManager:
+ """
+ Handles the lifecycle of integration credentials.
+ - Automatically refreshes requested credentials if needed.
+ - Uses locking mechanisms to ensure system-wide consistency and
+ prevent invalidation of in-use tokens.
+
+ ### ⚠️ Gotcha
+ With `acquire(..)`, credentials can only be in use in one place at a time (e.g. one
+ block execution).
+
+ ### Locking mechanism
+ - Because *getting* credentials can result in a refresh (= *invalidation* +
+ *replacement*) of the stored credentials, *getting* is an operation that
+ potentially requires read/write access.
+ - Checking whether a token has to be refreshed is subject to an additional `refresh`
+ scoped lock to prevent unnecessary sequential refreshes when multiple executions
+ try to access the same credentials simultaneously.
+ - We MUST lock credentials while in use to prevent them from being invalidated while
+ they are in use, e.g. because they are being refreshed by a different part
+ of the system.
+ - The `!time_sensitive` lock in `acquire(..)` is part of a two-tier locking
+ mechanism in which *updating* gets priority over *getting* credentials.
+ This is to prevent a long queue of waiting *get* requests from blocking essential
+ credential refreshes or user-initiated updates.
+
+ It is possible to implement a reader/writer locking system where either multiple
+ readers or a single writer can have simultaneous access, but this would add a lot of
+ complexity to the mechanism. I don't expect the current ("simple") mechanism to
+ cause so much latency that it's worth implementing.
+ """
+
+ def __init__(self):
+ redis_conn = redis.get_redis()
+ self._locks = RedisKeyedMutex(redis_conn)
+ self.store = SupabaseIntegrationCredentialsStore(redis=redis_conn)
+
+ def create(self, user_id: str, credentials: Credentials) -> None:
+ return self.store.add_creds(user_id, credentials)
+
+ def exists(self, user_id: str, credentials_id: str) -> bool:
+ return self.store.get_creds_by_id(user_id, credentials_id) is not None
+
+ def get(
+ self, user_id: str, credentials_id: str, lock: bool = True
+ ) -> Credentials | None:
+ credentials = self.store.get_creds_by_id(user_id, credentials_id)
+ if not credentials:
+ return None
+
+ # Refresh OAuth credentials if needed
+ if credentials.type == "oauth2" and credentials.access_token_expires_at:
+ logger.debug(
+ f"Credentials #{credentials.id} expire at "
+ f"{datetime.fromtimestamp(credentials.access_token_expires_at)}; "
+ f"current time is {datetime.now()}"
+ )
+
+ with self._locked(user_id, credentials_id, "refresh"):
+ oauth_handler = _get_provider_oauth_handler(credentials.provider)
+ if oauth_handler.needs_refresh(credentials):
+ logger.debug(
+ f"Refreshing '{credentials.provider}' "
+ f"credentials #{credentials.id}"
+ )
+ _lock = None
+ if lock:
+ # Wait until the credentials are no longer in use anywhere
+ _lock = self._acquire_lock(user_id, credentials_id)
+
+ fresh_credentials = oauth_handler.refresh_tokens(credentials)
+ self.store.update_creds(user_id, fresh_credentials)
+ if _lock:
+ _lock.release()
+
+ credentials = fresh_credentials
+ else:
+ logger.debug(f"Credentials #{credentials.id} never expire")
+
+ return credentials
+
+ def acquire(
+ self, user_id: str, credentials_id: str
+ ) -> tuple[Credentials, RedisLock]:
+ """
+ ⚠️ WARNING: this locks credentials system-wide and blocks both acquiring
+ and updating them elsewhere until the lock is released.
+ See the class docstring for more info.
+ """
+ # Use a low-priority (!time_sensitive) locking queue on top of the general lock
+ # to allow priority access for refreshing/updating the tokens.
+ with self._locked(user_id, credentials_id, "!time_sensitive"):
+ lock = self._acquire_lock(user_id, credentials_id)
+ credentials = self.get(user_id, credentials_id, lock=False)
+ if not credentials:
+ raise ValueError(
+ f"Credentials #{credentials_id} for user #{user_id} not found"
+ )
+ return credentials, lock
+
+ def update(self, user_id: str, updated: Credentials) -> None:
+ with self._locked(user_id, updated.id):
+ self.store.update_creds(user_id, updated)
+
+ def delete(self, user_id: str, credentials_id: str) -> None:
+ with self._locked(user_id, credentials_id):
+ self.store.delete_creds_by_id(user_id, credentials_id)
+
+ # -- Locking utilities -- #
+
+ def _acquire_lock(self, user_id: str, credentials_id: str, *args: str) -> RedisLock:
+ key = (
+ self.store.db_manager,
+ f"user:{user_id}",
+ f"credentials:{credentials_id}",
+ *args,
+ )
+ return self._locks.acquire(key)
+
+ @contextmanager
+ def _locked(self, user_id: str, credentials_id: str, *args: str):
+ lock = self._acquire_lock(user_id, credentials_id, *args)
+ try:
+ yield
+ finally:
+ lock.release()
+
+ def release_all_locks(self):
+ """Call this on process termination to ensure all locks are released"""
+ self._locks.release_all_locks()
+ self.store.locks.release_all_locks()
+
+
+def _get_provider_oauth_handler(provider_name: str) -> BaseOAuthHandler:
+ if provider_name not in HANDLERS_BY_NAME:
+ raise KeyError(f"Unknown provider '{provider_name}'")
+
+ client_id = getattr(settings.secrets, f"{provider_name}_client_id")
+ client_secret = getattr(settings.secrets, f"{provider_name}_client_secret")
+ if not (client_id and client_secret):
+ raise Exception( # TODO: ConfigError
+ f"Integration with provider '{provider_name}' is not configured",
+ )
+
+ handler_class = HANDLERS_BY_NAME[provider_name]
+ frontend_base_url = settings.config.frontend_base_url
+ return handler_class(
+ client_id=client_id,
+ client_secret=client_secret,
+ redirect_uri=f"{frontend_base_url}/auth/integrations/oauth_callback",
+ )
diff --git a/autogpt_platform/backend/backend/integrations/oauth/__init__.py b/autogpt_platform/backend/backend/integrations/oauth/__init__.py
index 3ce18050a0..834293da92 100644
--- a/autogpt_platform/backend/backend/integrations/oauth/__init__.py
+++ b/autogpt_platform/backend/backend/integrations/oauth/__init__.py
@@ -3,6 +3,7 @@ from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .notion import NotionOAuthHandler
+# --8<-- [start:HANDLERS_BY_NAMEExample]
HANDLERS_BY_NAME: dict[str, type[BaseOAuthHandler]] = {
handler.PROVIDER_NAME: handler
for handler in [
@@ -11,5 +12,6 @@ HANDLERS_BY_NAME: dict[str, type[BaseOAuthHandler]] = {
NotionOAuthHandler,
]
}
+# --8<-- [end:HANDLERS_BY_NAMEExample]
__all__ = ["HANDLERS_BY_NAME"]
diff --git a/autogpt_platform/backend/backend/integrations/oauth/base.py b/autogpt_platform/backend/backend/integrations/oauth/base.py
index 5fefe5b54d..a12200af65 100644
--- a/autogpt_platform/backend/backend/integrations/oauth/base.py
+++ b/autogpt_platform/backend/backend/integrations/oauth/base.py
@@ -1,31 +1,56 @@
+import logging
import time
from abc import ABC, abstractmethod
from typing import ClassVar
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
+logger = logging.getLogger(__name__)
+
class BaseOAuthHandler(ABC):
+ # --8<-- [start:BaseOAuthHandler1]
PROVIDER_NAME: ClassVar[str]
+ DEFAULT_SCOPES: ClassVar[list[str]] = []
+ # --8<-- [end:BaseOAuthHandler1]
@abstractmethod
+ # --8<-- [start:BaseOAuthHandler2]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ...
+ # --8<-- [end:BaseOAuthHandler2]
+
@abstractmethod
+ # --8<-- [start:BaseOAuthHandler3]
def get_login_url(self, scopes: list[str], state: str) -> str:
+ # --8<-- [end:BaseOAuthHandler3]
"""Constructs a login URL that the user can be redirected to"""
...
@abstractmethod
- def exchange_code_for_tokens(self, code: str) -> OAuth2Credentials:
+ # --8<-- [start:BaseOAuthHandler4]
+ def exchange_code_for_tokens(
+ self, code: str, scopes: list[str]
+ ) -> OAuth2Credentials:
+ # --8<-- [end:BaseOAuthHandler4]
"""Exchanges the acquired authorization code from login for a set of tokens"""
...
@abstractmethod
+ # --8<-- [start:BaseOAuthHandler5]
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
+ # --8<-- [end:BaseOAuthHandler5]
"""Implements the token refresh mechanism"""
...
+ @abstractmethod
+ # --8<-- [start:BaseOAuthHandler6]
+ def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
+ # --8<-- [end:BaseOAuthHandler6]
+ """Revokes the given token at provider,
+ returns False provider does not support it"""
+ ...
+
def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if credentials.provider != self.PROVIDER_NAME:
raise ValueError(
@@ -46,3 +71,11 @@ class BaseOAuthHandler(ABC):
credentials.access_token_expires_at is not None
and credentials.access_token_expires_at < int(time.time()) + 300
)
+
+ def handle_default_scopes(self, scopes: list[str]) -> list[str]:
+ """Handles the default scopes for the provider"""
+ # If scopes are empty, use the default scopes for the provider
+ if not scopes:
+ logger.debug(f"Using default scopes for provider {self.PROVIDER_NAME}")
+ scopes = self.DEFAULT_SCOPES
+ return scopes
diff --git a/autogpt_platform/backend/backend/integrations/oauth/github.py b/autogpt_platform/backend/backend/integrations/oauth/github.py
index ea22128eab..ebd5ff9e32 100644
--- a/autogpt_platform/backend/backend/integrations/oauth/github.py
+++ b/autogpt_platform/backend/backend/integrations/oauth/github.py
@@ -8,6 +8,7 @@ from autogpt_libs.supabase_integration_credentials_store import OAuth2Credential
from .base import BaseOAuthHandler
+# --8<-- [start:GithubOAuthHandlerExample]
class GitHubOAuthHandler(BaseOAuthHandler):
"""
Based on the documentation at:
@@ -23,7 +24,6 @@ class GitHubOAuthHandler(BaseOAuthHandler):
""" # noqa
PROVIDER_NAME = "github"
- EMAIL_ENDPOINT = "https://api.github.com/user/emails"
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
@@ -31,6 +31,7 @@ class GitHubOAuthHandler(BaseOAuthHandler):
self.redirect_uri = redirect_uri
self.auth_base_url = "https://github.com/login/oauth/authorize"
self.token_url = "https://github.com/login/oauth/access_token"
+ self.revoke_url = "https://api.github.com/applications/{client_id}/token"
def get_login_url(self, scopes: list[str], state: str) -> str:
params = {
@@ -41,9 +42,29 @@ class GitHubOAuthHandler(BaseOAuthHandler):
}
return f"{self.auth_base_url}?{urlencode(params)}"
- def exchange_code_for_tokens(self, code: str) -> OAuth2Credentials:
+ def exchange_code_for_tokens(
+ self, code: str, scopes: list[str]
+ ) -> OAuth2Credentials:
return self._request_tokens({"code": code, "redirect_uri": self.redirect_uri})
+ def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
+ if not credentials.access_token:
+ raise ValueError("No access token to revoke")
+
+ headers = {
+ "Accept": "application/vnd.github+json",
+ "X-GitHub-Api-Version": "2022-11-28",
+ }
+
+ response = requests.delete(
+ url=self.revoke_url.format(client_id=self.client_id),
+ auth=(self.client_id, self.client_secret),
+ headers=headers,
+ json={"access_token": credentials.access_token.get_secret_value()},
+ )
+ response.raise_for_status()
+ return True
+
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if not credentials.refresh_token:
return credentials
@@ -117,3 +138,6 @@ class GitHubOAuthHandler(BaseOAuthHandler):
# Get the login (username)
return response.json().get("login")
+
+
+# --8<-- [end:GithubOAuthHandlerExample]
diff --git a/autogpt_platform/backend/backend/integrations/oauth/google.py b/autogpt_platform/backend/backend/integrations/oauth/google.py
index 4bb6741813..810892188d 100644
--- a/autogpt_platform/backend/backend/integrations/oauth/google.py
+++ b/autogpt_platform/backend/backend/integrations/oauth/google.py
@@ -1,3 +1,5 @@
+import logging
+
from autogpt_libs.supabase_integration_credentials_store import OAuth2Credentials
from google.auth.external_account_authorized_user import (
Credentials as ExternalAccountCredentials,
@@ -9,7 +11,10 @@ from pydantic import SecretStr
from .base import BaseOAuthHandler
+logger = logging.getLogger(__name__)
+
+# --8<-- [start:GoogleOAuthHandlerExample]
class GoogleOAuthHandler(BaseOAuthHandler):
"""
Based on the documentation at https://developers.google.com/identity/protocols/oauth2/web-server
@@ -17,15 +22,24 @@ class GoogleOAuthHandler(BaseOAuthHandler):
PROVIDER_NAME = "google"
EMAIL_ENDPOINT = "https://www.googleapis.com/oauth2/v2/userinfo"
+ DEFAULT_SCOPES = [
+ "https://www.googleapis.com/auth/userinfo.email",
+ "https://www.googleapis.com/auth/userinfo.profile",
+ "openid",
+ ]
+ # --8<-- [end:GoogleOAuthHandlerExample]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.token_uri = "https://oauth2.googleapis.com/token"
+ self.revoke_uri = "https://oauth2.googleapis.com/revoke"
def get_login_url(self, scopes: list[str], state: str) -> str:
- flow = self._setup_oauth_flow(scopes)
+ all_scopes = list(set(scopes + self.DEFAULT_SCOPES))
+ logger.debug(f"Setting up OAuth flow with scopes: {all_scopes}")
+ flow = self._setup_oauth_flow(all_scopes)
flow.redirect_uri = self.redirect_uri
authorization_url, _ = flow.authorization_url(
access_type="offline",
@@ -35,29 +49,67 @@ class GoogleOAuthHandler(BaseOAuthHandler):
)
return authorization_url
- def exchange_code_for_tokens(self, code: str) -> OAuth2Credentials:
- flow = self._setup_oauth_flow(None)
+ def exchange_code_for_tokens(
+ self, code: str, scopes: list[str]
+ ) -> OAuth2Credentials:
+ logger.debug(f"Exchanging code for tokens with scopes: {scopes}")
+
+ # Use the scopes from the initial request
+ flow = self._setup_oauth_flow(scopes)
flow.redirect_uri = self.redirect_uri
- flow.fetch_token(code=code)
+
+ logger.debug("Fetching token from Google")
+
+ # Disable scope check in fetch_token
+ flow.oauth2session.scope = None
+ token = flow.fetch_token(code=code)
+ logger.debug("Token fetched successfully")
+
+ # Get the actual scopes granted by Google
+ granted_scopes: list[str] = token.get("scope", [])
+
+ logger.debug(f"Scopes granted by Google: {granted_scopes}")
google_creds = flow.credentials
- username = self._request_email(google_creds)
+ logger.debug(f"Received credentials: {google_creds}")
+
+ logger.debug("Requesting user email")
+ username = self._request_email(google_creds)
+ logger.debug(f"User email retrieved: {username}")
- # Google's OAuth library is poorly typed so we need some of these:
assert google_creds.token
assert google_creds.refresh_token
assert google_creds.expiry
- assert google_creds.scopes
- return OAuth2Credentials(
+ assert granted_scopes
+
+ # Create OAuth2Credentials with the granted scopes
+ credentials = OAuth2Credentials(
provider=self.PROVIDER_NAME,
title=None,
username=username,
access_token=SecretStr(google_creds.token),
- refresh_token=SecretStr(google_creds.refresh_token),
- access_token_expires_at=int(google_creds.expiry.timestamp()),
+ refresh_token=(SecretStr(google_creds.refresh_token)),
+ access_token_expires_at=(
+ int(google_creds.expiry.timestamp()) if google_creds.expiry else None
+ ),
refresh_token_expires_at=None,
- scopes=google_creds.scopes,
+ scopes=granted_scopes,
)
+ logger.debug(
+ f"OAuth2Credentials object created successfully with scopes: {credentials.scopes}"
+ )
+
+ return credentials
+
+ def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
+ session = AuthorizedSession(credentials)
+ response = session.post(
+ self.revoke_uri,
+ params={"token": credentials.access_token.get_secret_value()},
+ headers={"content-type": "application/x-www-form-urlencoded"},
+ )
+ response.raise_for_status()
+ return True
def _request_email(
self, creds: Credentials | ExternalAccountCredentials
@@ -65,6 +117,9 @@ class GoogleOAuthHandler(BaseOAuthHandler):
session = AuthorizedSession(creds)
response = session.get(self.EMAIL_ENDPOINT)
if not response.ok:
+ logger.error(
+ f"Failed to get user email. Status code: {response.status_code}"
+ )
return None
return response.json()["email"]
@@ -99,7 +154,7 @@ class GoogleOAuthHandler(BaseOAuthHandler):
scopes=google_creds.scopes,
)
- def _setup_oauth_flow(self, scopes: list[str] | None) -> Flow:
+ def _setup_oauth_flow(self, scopes: list[str]) -> Flow:
return Flow.from_client_config(
{
"web": {
diff --git a/autogpt_platform/backend/backend/integrations/oauth/notion.py b/autogpt_platform/backend/backend/integrations/oauth/notion.py
index b00a907f1e..c485d3bec3 100644
--- a/autogpt_platform/backend/backend/integrations/oauth/notion.py
+++ b/autogpt_platform/backend/backend/integrations/oauth/notion.py
@@ -35,7 +35,9 @@ class NotionOAuthHandler(BaseOAuthHandler):
}
return f"{self.auth_base_url}?{urlencode(params)}"
- def exchange_code_for_tokens(self, code: str) -> OAuth2Credentials:
+ def exchange_code_for_tokens(
+ self, code: str, scopes: list[str]
+ ) -> OAuth2Credentials:
request_body = {
"grant_type": "authorization_code",
"code": code,
@@ -75,6 +77,10 @@ class NotionOAuthHandler(BaseOAuthHandler):
},
)
+ def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
+ # Notion doesn't support token revocation
+ return False
+
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# Notion doesn't support token refresh
return credentials
diff --git a/autogpt_platform/backend/backend/rest.py b/autogpt_platform/backend/backend/rest.py
index 8e4ba138f6..f56deaa1d2 100644
--- a/autogpt_platform/backend/backend/rest.py
+++ b/autogpt_platform/backend/backend/rest.py
@@ -1,6 +1,6 @@
from backend.app import run_processes
from backend.executor import ExecutionScheduler
-from backend.server import AgentServer
+from backend.server.rest_api import AgentServer
def main():
diff --git a/autogpt_platform/backend/backend/server/__init__.py b/autogpt_platform/backend/backend/server/__init__.py
index f17f46d8e7..e69de29bb2 100644
--- a/autogpt_platform/backend/backend/server/__init__.py
+++ b/autogpt_platform/backend/backend/server/__init__.py
@@ -1,4 +0,0 @@
-from .rest_api import AgentServer
-from .ws_api import WebsocketServer
-
-__all__ = ["AgentServer", "WebsocketServer"]
diff --git a/autogpt_platform/backend/backend/server/routers/integrations.py b/autogpt_platform/backend/backend/server/integrations/router.py
similarity index 64%
rename from autogpt_platform/backend/backend/server/routers/integrations.py
rename to autogpt_platform/backend/backend/server/integrations/router.py
index 5f0fa411f0..1e3d01e0bf 100644
--- a/autogpt_platform/backend/backend/server/routers/integrations.py
+++ b/autogpt_platform/backend/backend/server/integrations/router.py
@@ -1,40 +1,26 @@
import logging
-from typing import Annotated
+from typing import Annotated, Literal
-from autogpt_libs.supabase_integration_credentials_store import (
- SupabaseIntegrationCredentialsStore,
-)
from autogpt_libs.supabase_integration_credentials_store.types import (
APIKeyCredentials,
Credentials,
CredentialsType,
OAuth2Credentials,
)
-from fastapi import (
- APIRouter,
- Body,
- Depends,
- HTTPException,
- Path,
- Query,
- Request,
- Response,
-)
-from pydantic import BaseModel, SecretStr
-from supabase import Client
+from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request
+from pydantic import BaseModel, Field, SecretStr
+from backend.integrations.creds_manager import IntegrationCredentialsManager
from backend.integrations.oauth import HANDLERS_BY_NAME, BaseOAuthHandler
from backend.util.settings import Settings
-from ..utils import get_supabase, get_user_id
+from ..utils import get_user_id
logger = logging.getLogger(__name__)
settings = Settings()
router = APIRouter()
-
-def get_store(supabase: Client = Depends(get_supabase)):
- return SupabaseIntegrationCredentialsStore(supabase)
+creds_manager = IntegrationCredentialsManager()
class LoginResponse(BaseModel):
@@ -43,21 +29,23 @@ class LoginResponse(BaseModel):
@router.get("/{provider}/login")
-async def login(
+def login(
provider: Annotated[str, Path(title="The provider to initiate an OAuth flow for")],
user_id: Annotated[str, Depends(get_user_id)],
request: Request,
- store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
scopes: Annotated[
str, Query(title="Comma-separated list of authorization scopes")
] = "",
) -> LoginResponse:
handler = _get_provider_oauth_handler(request, provider)
- # Generate and store a secure random state token
- state_token = await store.store_state_token(user_id, provider)
-
requested_scopes = scopes.split(",") if scopes else []
+
+ # Generate and store a secure random state token along with the scopes
+ state_token = creds_manager.store.store_state_token(
+ user_id, provider, requested_scopes
+ )
+
login_url = handler.get_login_url(requested_scopes, state_token)
return LoginResponse(login_url=login_url, state_token=state_token)
@@ -72,28 +60,51 @@ class CredentialsMetaResponse(BaseModel):
@router.post("/{provider}/callback")
-async def callback(
+def callback(
provider: Annotated[str, Path(title="The target provider for this OAuth exchange")],
code: Annotated[str, Body(title="Authorization code acquired by user login")],
state_token: Annotated[str, Body(title="Anti-CSRF nonce")],
- store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
user_id: Annotated[str, Depends(get_user_id)],
request: Request,
) -> CredentialsMetaResponse:
+ logger.debug(f"Received OAuth callback for provider: {provider}")
handler = _get_provider_oauth_handler(request, provider)
# Verify the state token
- if not await store.verify_state_token(user_id, state_token, provider):
+ if not creds_manager.store.verify_state_token(user_id, state_token, provider):
+ logger.warning(f"Invalid or expired state token for user {user_id}")
raise HTTPException(status_code=400, detail="Invalid or expired state token")
try:
- credentials = handler.exchange_code_for_tokens(code)
+ scopes = creds_manager.store.get_any_valid_scopes_from_state_token(
+ user_id, state_token, provider
+ )
+ logger.debug(f"Retrieved scopes from state token: {scopes}")
+
+ scopes = handler.handle_default_scopes(scopes)
+
+ credentials = handler.exchange_code_for_tokens(code, scopes)
+ logger.debug(f"Received credentials with final scopes: {credentials.scopes}")
+
+ # Check if the granted scopes are sufficient for the requested scopes
+ if not set(scopes).issubset(set(credentials.scopes)):
+ # For now, we'll just log the warning and continue
+ logger.warning(
+ f"Granted scopes {credentials.scopes} for {provider}do not include all requested scopes {scopes}"
+ )
+
except Exception as e:
- logger.warning(f"Code->Token exchange failed for provider {provider}: {e}")
- raise HTTPException(status_code=400, detail=str(e))
+ logger.error(f"Code->Token exchange failed for provider {provider}: {e}")
+ raise HTTPException(
+ status_code=400, detail=f"Failed to exchange code for tokens: {str(e)}"
+ )
# TODO: Allow specifying `title` to set on `credentials`
- store.add_creds(user_id, credentials)
+ creds_manager.create(user_id, credentials)
+
+ logger.debug(
+ f"Successfully processed OAuth callback for user {user_id} and provider {provider}"
+ )
return CredentialsMetaResponse(
id=credentials.id,
type=credentials.type,
@@ -104,12 +115,11 @@ async def callback(
@router.get("/{provider}/credentials")
-async def list_credentials(
+def list_credentials(
provider: Annotated[str, Path(title="The provider to list credentials for")],
user_id: Annotated[str, Depends(get_user_id)],
- store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
) -> list[CredentialsMetaResponse]:
- credentials = store.get_creds_by_provider(user_id, provider)
+ credentials = creds_manager.store.get_creds_by_provider(user_id, provider)
return [
CredentialsMetaResponse(
id=cred.id,
@@ -123,13 +133,12 @@ async def list_credentials(
@router.get("/{provider}/credentials/{cred_id}")
-async def get_credential(
+def get_credential(
provider: Annotated[str, Path(title="The provider to retrieve credentials for")],
cred_id: Annotated[str, Path(title="The ID of the credentials to retrieve")],
user_id: Annotated[str, Depends(get_user_id)],
- store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
) -> Credentials:
- credential = store.get_creds_by_id(user_id, cred_id)
+ credential = creds_manager.get(user_id, cred_id)
if not credential:
raise HTTPException(status_code=404, detail="Credentials not found")
if credential.provider != provider:
@@ -140,8 +149,7 @@ async def get_credential(
@router.post("/{provider}/credentials", status_code=201)
-async def create_api_key_credentials(
- store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
+def create_api_key_credentials(
user_id: Annotated[str, Depends(get_user_id)],
provider: Annotated[str, Path(title="The provider to create credentials for")],
api_key: Annotated[str, Body(title="The API key to store")],
@@ -158,7 +166,7 @@ async def create_api_key_credentials(
)
try:
- store.add_creds(user_id, new_credentials)
+ creds_manager.create(user_id, new_credentials)
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Failed to store credentials: {str(e)}"
@@ -166,14 +174,23 @@ async def create_api_key_credentials(
return new_credentials
-@router.delete("/{provider}/credentials/{cred_id}", status_code=204)
-async def delete_credential(
+class CredentialsDeletionResponse(BaseModel):
+ deleted: Literal[True] = True
+ revoked: bool | None = Field(
+ description="Indicates whether the credentials were also revoked by their "
+ "provider. `None`/`null` if not applicable, e.g. when deleting "
+ "non-revocable credentials such as API keys."
+ )
+
+
+@router.delete("/{provider}/credentials/{cred_id}")
+def delete_credentials(
+ request: Request,
provider: Annotated[str, Path(title="The provider to delete credentials for")],
cred_id: Annotated[str, Path(title="The ID of the credentials to delete")],
user_id: Annotated[str, Depends(get_user_id)],
- store: Annotated[SupabaseIntegrationCredentialsStore, Depends(get_store)],
-):
- creds = store.get_creds_by_id(user_id, cred_id)
+) -> CredentialsDeletionResponse:
+ creds = creds_manager.store.get_creds_by_id(user_id, cred_id)
if not creds:
raise HTTPException(status_code=404, detail="Credentials not found")
if creds.provider != provider:
@@ -181,8 +198,14 @@ async def delete_credential(
status_code=404, detail="Credentials do not match the specified provider"
)
- store.delete_creds_by_id(user_id, cred_id)
- return Response(status_code=204)
+ creds_manager.delete(user_id, cred_id)
+
+ tokens_revoked = None
+ if isinstance(creds, OAuth2Credentials):
+ handler = _get_provider_oauth_handler(request, provider)
+ tokens_revoked = handler.revoke_tokens(creds)
+
+ return CredentialsDeletionResponse(revoked=tokens_revoked)
# -------- UTILITIES --------- #
diff --git a/autogpt_platform/backend/backend/server/integrations/utils.py b/autogpt_platform/backend/backend/server/integrations/utils.py
new file mode 100644
index 0000000000..0fa1052e5b
--- /dev/null
+++ b/autogpt_platform/backend/backend/server/integrations/utils.py
@@ -0,0 +1,11 @@
+from supabase import Client, create_client
+
+from backend.util.settings import Settings
+
+settings = Settings()
+
+
+def get_supabase() -> Client:
+ return create_client(
+ settings.secrets.supabase_url, settings.secrets.supabase_service_role_key
+ )
diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py
index 59dbd286dd..8c3ed3dcba 100644
--- a/autogpt_platform/backend/backend/server/rest_api.py
+++ b/autogpt_platform/backend/backend/server/rest_api.py
@@ -1,3 +1,4 @@
+import asyncio
import inspect
import logging
from collections import defaultdict
@@ -7,23 +8,22 @@ from typing import Annotated, Any, Dict
import uvicorn
from autogpt_libs.auth.middleware import auth_middleware
+from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
+from typing_extensions import TypedDict
from backend.data import block, db
from backend.data import execution as execution_db
from backend.data import graph as graph_db
-from backend.data import user as user_db
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.credit import get_block_costs, get_user_credit_model
-from backend.data.queue import AsyncEventQueue, AsyncRedisEventQueue
from backend.data.user import get_or_create_user
from backend.executor import ExecutionManager, ExecutionScheduler
from backend.server.model import CreateGraph, SetGraphActiveVersion
-from backend.util.lock import KeyedMutex
-from backend.util.service import AppService, expose, get_service_client
-from backend.util.settings import Config, Settings
+from backend.util.service import AppService, get_service_client
+from backend.util.settings import AppEnvironment, Config, Settings
from .utils import get_user_id
@@ -32,27 +32,26 @@ logger = logging.getLogger(__name__)
class AgentServer(AppService):
- mutex = KeyedMutex()
- use_redis = True
_test_dependency_overrides = {}
_user_credit_model = get_user_credit_model()
- def __init__(self, event_queue: AsyncEventQueue | None = None):
- super().__init__(port=Config().agent_server_port)
- self.event_queue = event_queue or AsyncRedisEventQueue()
+ def __init__(self):
+ super().__init__()
+ self.use_redis = True
+
+ @classmethod
+ def get_port(cls) -> int:
+ return Config().agent_server_port
@asynccontextmanager
async def lifespan(self, _: FastAPI):
await db.connect()
- self.run_and_wait(self.event_queue.connect())
await block.initialize_blocks()
- if await user_db.create_default_user(settings.config.enable_auth):
- await graph_db.import_packaged_templates()
yield
- await self.event_queue.close()
await db.disconnect()
def run_service(self):
+ docs_url = "/docs" if settings.config.app_env == AppEnvironment.LOCAL else None
app = FastAPI(
title="AutoGPT Agent Server",
description=(
@@ -62,6 +61,7 @@ class AgentServer(AppService):
summary="AutoGPT Agent Server",
version="0.1",
lifespan=self.lifespan,
+ docs_url=docs_url,
)
if self._test_dependency_overrides:
@@ -79,16 +79,24 @@ class AgentServer(AppService):
allow_headers=["*"], # Allows all headers
)
+ health_router = APIRouter()
+ health_router.add_api_route(
+ path="/health",
+ endpoint=self.health,
+ methods=["GET"],
+ tags=["health"],
+ )
+
# Define the API routes
api_router = APIRouter(prefix="/api")
api_router.dependencies.append(Depends(auth_middleware))
# Import & Attach sub-routers
+ import backend.server.integrations.router
import backend.server.routers.analytics
- import backend.server.routers.integrations
api_router.include_router(
- backend.server.routers.integrations.router,
+ backend.server.integrations.router.router,
prefix="/integrations",
tags=["integrations"],
dependencies=[Depends(auth_middleware)],
@@ -168,6 +176,12 @@ class AgentServer(AppService):
methods=["PUT"],
tags=["templates", "graphs"],
)
+ api_router.add_api_route(
+ path="/graphs/{graph_id}",
+ endpoint=self.delete_graph,
+ methods=["DELETE"],
+ tags=["graphs"],
+ )
api_router.add_api_route(
path="/graphs/{graph_id}/versions",
endpoint=self.get_graph_all_versions,
@@ -256,6 +270,7 @@ class AgentServer(AppService):
app.add_exception_handler(500, self.handle_internal_http_error)
app.include_router(api_router)
+ app.include_router(health_router)
uvicorn.run(
app,
@@ -294,12 +309,14 @@ class AgentServer(AppService):
return wrapper
@property
+ @thread_cached
def execution_manager_client(self) -> ExecutionManager:
- return get_service_client(ExecutionManager, Config().execution_manager_port)
+ return get_service_client(ExecutionManager)
@property
+ @thread_cached
def execution_scheduler_client(self) -> ExecutionScheduler:
- return get_service_client(ExecutionScheduler, Config().execution_scheduler_port)
+ return get_service_client(ExecutionScheduler)
@classmethod
def handle_internal_http_error(cls, request: Request, exc: Exception):
@@ -318,9 +335,9 @@ class AgentServer(AppService):
@classmethod
def get_graph_blocks(cls) -> list[dict[Any, Any]]:
- blocks = block.get_blocks()
+ blocks = [cls() for cls in block.get_blocks().values()]
costs = get_block_costs()
- return [{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks.values()]
+ return [{**b.to_dict(), "costs": costs.get(b.id, [])} for b in blocks]
@classmethod
def execute_graph_block(
@@ -346,8 +363,10 @@ class AgentServer(AppService):
)
@classmethod
- async def get_templates(cls) -> list[graph_db.GraphMeta]:
- return await graph_db.get_graphs_meta(filter_by="template")
+ async def get_templates(
+ cls, user_id: Annotated[str, Depends(get_user_id)]
+ ) -> list[graph_db.GraphMeta]:
+ return await graph_db.get_graphs_meta(filter_by="template", user_id=user_id)
@classmethod
async def get_graph(
@@ -355,8 +374,11 @@ class AgentServer(AppService):
graph_id: str,
user_id: Annotated[str, Depends(get_user_id)],
version: int | None = None,
+ hide_credentials: bool = False,
) -> graph_db.Graph:
- graph = await graph_db.get_graph(graph_id, version, user_id=user_id)
+ graph = await graph_db.get_graph(
+ graph_id, version, user_id=user_id, hide_credentials=hide_credentials
+ )
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
return graph
@@ -393,6 +415,17 @@ class AgentServer(AppService):
) -> graph_db.Graph:
return await cls.create_graph(create_graph, is_template=True, user_id=user_id)
+ class DeleteGraphResponse(TypedDict):
+ version_counts: int
+
+ @classmethod
+ async def delete_graph(
+ cls, graph_id: str, user_id: Annotated[str, Depends(get_user_id)]
+ ) -> DeleteGraphResponse:
+ return {
+ "version_counts": await graph_db.delete_graph(graph_id, user_id=user_id)
+ }
+
@classmethod
async def create_graph(
cls,
@@ -486,7 +519,7 @@ class AgentServer(AppService):
user_id=user_id,
)
- async def execute_graph(
+ def execute_graph(
self,
graph_id: str,
node_input: dict[Any, Any],
@@ -509,7 +542,9 @@ class AgentServer(AppService):
404, detail=f"Agent execution #{graph_exec_id} not found"
)
- self.execution_manager_client.cancel_execution(graph_exec_id)
+ await asyncio.to_thread(
+ lambda: self.execution_manager_client.cancel_execution(graph_exec_id)
+ )
# Retrieve & return canceled graph execution in its final state
return await execution_db.get_execution_results(graph_exec_id)
@@ -584,10 +619,16 @@ class AgentServer(AppService):
graph = await graph_db.get_graph(graph_id, user_id=user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
- execution_scheduler = self.execution_scheduler_client
+
return {
- "id": execution_scheduler.add_execution_schedule(
- graph_id, graph.version, cron, input_data, user_id=user_id
+ "id": await asyncio.to_thread(
+ lambda: self.execution_scheduler_client.add_execution_schedule(
+ graph_id=graph_id,
+ graph_version=graph.version,
+ cron=cron,
+ input_data=input_data,
+ user_id=user_id,
+ )
)
}
@@ -613,18 +654,8 @@ class AgentServer(AppService):
execution_scheduler = self.execution_scheduler_client
return execution_scheduler.get_execution_schedules(graph_id, user_id)
- @expose
- def send_execution_update(self, execution_result_dict: dict[Any, Any]):
- execution_result = execution_db.ExecutionResult(**execution_result_dict)
- self.run_and_wait(self.event_queue.put(execution_result))
-
- @expose
- def acquire_lock(self, key: Any):
- self.mutex.lock(key)
-
- @expose
- def release_lock(self, key: Any):
- self.mutex.unlock(key)
+ async def health(self):
+ return {"status": "healthy"}
@classmethod
def update_configuration(
diff --git a/autogpt_platform/backend/backend/server/utils.py b/autogpt_platform/backend/backend/server/utils.py
index 5e01bb0518..56f756cb45 100644
--- a/autogpt_platform/backend/backend/server/utils.py
+++ b/autogpt_platform/backend/backend/server/utils.py
@@ -1,6 +1,5 @@
from autogpt_libs.auth.middleware import auth_middleware
from fastapi import Depends, HTTPException
-from supabase import Client, create_client
from backend.data.user import DEFAULT_USER_ID
from backend.util.settings import Settings
@@ -17,9 +16,3 @@ def get_user_id(payload: dict = Depends(auth_middleware)) -> str:
if not user_id:
raise HTTPException(status_code=401, detail="User ID not found in token")
return user_id
-
-
-def get_supabase() -> Client:
- return create_client(
- settings.secrets.supabase_url, settings.secrets.supabase_service_role_key
- )
diff --git a/autogpt_platform/backend/backend/server/ws_api.py b/autogpt_platform/backend/backend/server/ws_api.py
index da941233d0..2800182c2c 100644
--- a/autogpt_platform/backend/backend/server/ws_api.py
+++ b/autogpt_platform/backend/backend/server/ws_api.py
@@ -1,23 +1,34 @@
import asyncio
import logging
+from contextlib import asynccontextmanager
import uvicorn
from autogpt_libs.auth import parse_jwt_token
from fastapi import Depends, FastAPI, WebSocket, WebSocketDisconnect
from fastapi.middleware.cors import CORSMiddleware
-from backend.data.queue import AsyncRedisEventQueue
+from backend.data import redis
+from backend.data.queue import AsyncRedisExecutionEventBus
from backend.data.user import DEFAULT_USER_ID
from backend.server.conn_manager import ConnectionManager
from backend.server.model import ExecutionSubscription, Methods, WsMessage
from backend.util.service import AppProcess
-from backend.util.settings import Config, Settings
+from backend.util.settings import AppEnvironment, Config, Settings
logger = logging.getLogger(__name__)
settings = Settings()
-app = FastAPI()
-event_queue = AsyncRedisEventQueue()
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ manager = get_connection_manager()
+ fut = asyncio.create_task(event_broadcaster(manager))
+ fut.add_done_callback(lambda _: logger.info("Event broadcaster stopped"))
+ yield
+
+
+docs_url = "/docs" if settings.config.app_env == AppEnvironment.LOCAL else None
+app = FastAPI(lifespan=lifespan, docs_url=docs_url)
_connection_manager = None
logger.info(f"CORS allow origins: {settings.config.backend_cors_allow_origins}")
@@ -37,27 +48,21 @@ def get_connection_manager():
return _connection_manager
-@app.on_event("startup")
-async def startup_event():
- await event_queue.connect()
- manager = get_connection_manager()
- asyncio.create_task(event_broadcaster(manager))
-
-
-@app.on_event("shutdown")
-async def shutdown_event():
- await event_queue.close()
-
-
async def event_broadcaster(manager: ConnectionManager):
- while True:
- event = await event_queue.get()
- if event is not None:
+ try:
+ redis.connect()
+ event_queue = AsyncRedisExecutionEventBus()
+ async for event in event_queue.listen():
await manager.send_execution_result(event)
+ except Exception as e:
+ logger.exception(f"Event broadcaster error: {e}")
+ raise
+ finally:
+ redis.disconnect()
async def authenticate_websocket(websocket: WebSocket) -> str:
- if settings.config.enable_auth.lower() == "true":
+ if settings.config.enable_auth:
token = websocket.query_params.get("token")
if not token:
await websocket.close(code=4001, reason="Missing authentication token")
diff --git a/autogpt_platform/backend/backend/usecases/block_autogen.py b/autogpt_platform/backend/backend/usecases/block_autogen.py
index 55fdece6b4..9f5ae43528 100644
--- a/autogpt_platform/backend/backend/usecases/block_autogen.py
+++ b/autogpt_platform/backend/backend/usecases/block_autogen.py
@@ -252,7 +252,7 @@ async def block_autogen_agent():
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), user_id=test_user.id)
input_data = {"input": "Write me a block that writes a string into a file."}
- response = await server.agent_server.execute_graph(
+ response = server.agent_server.execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
diff --git a/autogpt_platform/backend/backend/usecases/reddit_marketing.py b/autogpt_platform/backend/backend/usecases/reddit_marketing.py
index 54413b1eef..1d297d5bc9 100644
--- a/autogpt_platform/backend/backend/usecases/reddit_marketing.py
+++ b/autogpt_platform/backend/backend/usecases/reddit_marketing.py
@@ -156,7 +156,7 @@ async def reddit_marketing_agent():
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), user_id=test_user.id)
input_data = {"subreddit": "AutoGPT"}
- response = await server.agent_server.execute_graph(
+ response = server.agent_server.execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
diff --git a/autogpt_platform/backend/backend/usecases/sample.py b/autogpt_platform/backend/backend/usecases/sample.py
index 3b4087a5bd..37fa7407de 100644
--- a/autogpt_platform/backend/backend/usecases/sample.py
+++ b/autogpt_platform/backend/backend/usecases/sample.py
@@ -78,7 +78,7 @@ async def sample_agent():
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
- response = await server.agent_server.execute_graph(
+ response = server.agent_server.execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
diff --git a/autogpt_platform/backend/backend/util/lock.py b/autogpt_platform/backend/backend/util/lock.py
deleted file mode 100644
index 90773c4022..0000000000
--- a/autogpt_platform/backend/backend/util/lock.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from threading import Lock
-from typing import Any
-
-from expiringdict import ExpiringDict
-
-
-class KeyedMutex:
- """
- This class provides a mutex that can be locked and unlocked by a specific key.
- It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
- in case the key is not unlocked for a specified duration, to prevent memory leaks.
- """
-
- def __init__(self):
- self.locks: dict[Any, tuple[Lock, int]] = ExpiringDict(
- max_len=6000, max_age_seconds=60
- )
- self.locks_lock = Lock()
-
- def lock(self, key: Any):
- with self.locks_lock:
- lock, request_count = self.locks.get(key, (Lock(), 0))
- self.locks[key] = (lock, request_count + 1)
- lock.acquire()
-
- def unlock(self, key: Any):
- with self.locks_lock:
- lock, request_count = self.locks.pop(key)
- if request_count > 1:
- self.locks[key] = (lock, request_count - 1)
- lock.release()
diff --git a/autogpt_platform/backend/backend/util/logging.py b/autogpt_platform/backend/backend/util/logging.py
index 68e6ff78c9..63166a84c2 100644
--- a/autogpt_platform/backend/backend/util/logging.py
+++ b/autogpt_platform/backend/backend/util/logging.py
@@ -1,4 +1,6 @@
-import os
+from backend.util.settings import AppEnvironment, BehaveAs, Settings
+
+settings = Settings()
def configure_logging():
@@ -6,7 +8,10 @@ def configure_logging():
import autogpt_libs.logging.config
- if os.getenv("APP_ENV") != "cloud":
+ if (
+ settings.config.behave_as == BehaveAs.LOCAL
+ or settings.config.app_env == AppEnvironment.LOCAL
+ ):
autogpt_libs.logging.config.configure_logging(force_cloud_logging=False)
else:
autogpt_libs.logging.config.configure_logging(force_cloud_logging=True)
diff --git a/autogpt_platform/backend/backend/util/process.py b/autogpt_platform/backend/backend/util/process.py
index 3d63822786..218ac749d0 100644
--- a/autogpt_platform/backend/backend/util/process.py
+++ b/autogpt_platform/backend/backend/util/process.py
@@ -10,6 +10,16 @@ from backend.util.logging import configure_logging
from backend.util.metrics import sentry_init
logger = logging.getLogger(__name__)
+_SERVICE_NAME = "MainProcess"
+
+
+def get_service_name():
+ return _SERVICE_NAME
+
+
+def set_service_name(name: str):
+ global _SERVICE_NAME
+ _SERVICE_NAME = name
class AppProcess(ABC):
@@ -32,6 +42,11 @@ class AppProcess(ABC):
"""
pass
+ @classmethod
+ @property
+ def service_name(cls) -> str:
+ return cls.__name__
+
def cleanup(self):
"""
Implement this method on a subclass to do post-execution cleanup,
@@ -52,10 +67,12 @@ class AppProcess(ABC):
if silent:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
- logger.info(f"[{self.__class__.__name__}] Starting...")
+
+ set_service_name(self.service_name)
+ logger.info(f"[{self.service_name}] Starting...")
self.run()
except (KeyboardInterrupt, SystemExit) as e:
- logger.warning(f"[{self.__class__.__name__}] Terminated: {e}; quitting...")
+ logger.warning(f"[{self.service_name}] Terminated: {e}; quitting...")
def _self_terminate(self, signum: int, frame):
self.cleanup()
diff --git a/autogpt_platform/backend/backend/util/retry.py b/autogpt_platform/backend/backend/util/retry.py
index 60b63132c5..5a451726c0 100644
--- a/autogpt_platform/backend/backend/util/retry.py
+++ b/autogpt_platform/backend/backend/util/retry.py
@@ -1,7 +1,54 @@
+import logging
+import os
+from functools import wraps
+from uuid import uuid4
+
from tenacity import retry, stop_after_attempt, wait_exponential
-conn_retry = retry(
- stop=stop_after_attempt(30),
- wait=wait_exponential(multiplier=1, min=1, max=30),
- reraise=True,
-)
+from backend.util.process import get_service_name
+
+logger = logging.getLogger(__name__)
+
+
+def _log_prefix(resource_name: str, conn_id: str):
+ """
+ Returns a prefix string for logging purposes.
+ This needs to be called on the fly to get the current process ID & service name,
+ not the parent process ID & service name.
+ """
+ return f"[PID-{os.getpid()}|{get_service_name()}|{resource_name}-{conn_id}]"
+
+
+def conn_retry(resource_name: str, action_name: str, max_retry: int = 5):
+ conn_id = str(uuid4())
+
+ def on_retry(retry_state):
+ prefix = _log_prefix(resource_name, conn_id)
+ exception = retry_state.outcome.exception()
+ logger.info(f"{prefix} {action_name} failed: {exception}. Retrying now...")
+
+ def decorator(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ prefix = _log_prefix(resource_name, conn_id)
+ logger.info(f"{prefix} {action_name} started...")
+
+ # Define the retrying strategy
+ retrying_func = retry(
+ stop=stop_after_attempt(max_retry + 1),
+ wait=wait_exponential(multiplier=1, min=1, max=30),
+ before_sleep=on_retry,
+ reraise=True,
+ )(func)
+
+ try:
+ result = retrying_func(*args, **kwargs)
+ logger.info(f"{prefix} {action_name} completed successfully.")
+ return result
+ except Exception as e:
+ logger.error(f"{prefix} {action_name} failed after retries: {e}")
+ raise
+
+ return wrapper
+
+ return decorator
diff --git a/autogpt_platform/backend/backend/util/service.py b/autogpt_platform/backend/backend/util/service.py
index 238121663f..5c01530ec1 100644
--- a/autogpt_platform/backend/backend/util/service.py
+++ b/autogpt_platform/backend/backend/util/service.py
@@ -1,16 +1,37 @@
import asyncio
+import builtins
import logging
import os
import threading
import time
-from abc import abstractmethod
-from typing import Any, Callable, Coroutine, Type, TypeVar, cast
+import typing
+from abc import ABC, abstractmethod
+from enum import Enum
+from types import NoneType, UnionType
+from typing import (
+ Annotated,
+ Any,
+ Callable,
+ Coroutine,
+ Dict,
+ FrozenSet,
+ Iterator,
+ List,
+ Set,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ cast,
+ get_args,
+ get_origin,
+)
import Pyro5.api
+from pydantic import BaseModel
from Pyro5 import api as pyro
-from backend.data import db
-from backend.data.queue import AsyncEventQueue, AsyncRedisEventQueue
+from backend.data import db, redis
from backend.util.process import AppProcess
from backend.util.retry import conn_retry
from backend.util.settings import Config, Secrets
@@ -27,9 +48,8 @@ def expose(func: C) -> C:
Decorator to mark a method or class to be exposed for remote calls.
## ⚠️ Gotcha
- The types on the exposed function signature are respected **as long as they are
- fully picklable**. This is not the case for Pydantic models, so if you really need
- to pass a model, try dumping the model and passing the resulting dict instead.
+ Aside from "simple" types, only Pydantic models are passed unscathed *if annotated*.
+ Any other passed or returned class objects are converted to dictionaries by Pyro.
"""
def wrapper(*args, **kwargs):
@@ -38,29 +58,67 @@ def expose(func: C) -> C:
except Exception as e:
msg = f"Error in {func.__name__}: {e.__str__()}"
logger.exception(msg)
- raise Exception(msg, e)
+ raise
+
+ # Register custom serializers and deserializers for annotated Pydantic models
+ for name, annotation in func.__annotations__.items():
+ try:
+ pydantic_types = _pydantic_models_from_type_annotation(annotation)
+ except Exception as e:
+ raise TypeError(f"Error while exposing {func.__name__}: {e.__str__()}")
+
+ for model in pydantic_types:
+ logger.debug(
+ f"Registering Pyro (de)serializers for {func.__name__} annotation "
+ f"'{name}': {model.__qualname__}"
+ )
+ pyro.register_class_to_dict(model, _make_custom_serializer(model))
+ pyro.register_dict_to_class(
+ model.__qualname__, _make_custom_deserializer(model)
+ )
return pyro.expose(wrapper) # type: ignore
-class AppService(AppProcess):
+def _make_custom_serializer(model: Type[BaseModel]):
+ def custom_class_to_dict(obj):
+ data = {
+ "__class__": obj.__class__.__qualname__,
+ **obj.model_dump(),
+ }
+ logger.debug(f"Serializing {obj.__class__.__qualname__} with data: {data}")
+ return data
+
+ return custom_class_to_dict
+
+
+def _make_custom_deserializer(model: Type[BaseModel]):
+ def custom_dict_to_class(qualname, data: dict):
+ logger.debug(f"Deserializing {model.__qualname__} from data: {data}")
+ return model(**data)
+
+ return custom_dict_to_class
+
+
+class AppService(AppProcess, ABC):
shared_event_loop: asyncio.AbstractEventLoop
- event_queue: AsyncEventQueue = AsyncRedisEventQueue()
use_db: bool = False
use_redis: bool = False
use_supabase: bool = False
- def __init__(self, port):
- self.port = port
+ def __init__(self):
self.uri = None
@classmethod
- @property
- def service_name(cls) -> str:
- return cls.__name__
-
@abstractmethod
- def run_service(self):
+ def get_port(cls) -> int:
+ pass
+
+ @classmethod
+ def get_host(cls) -> str:
+ return os.environ.get(f"{cls.service_name.upper()}_HOST", Config().pyro_host)
+
+ def run_service(self) -> None:
while True:
time.sleep(10)
@@ -76,7 +134,7 @@ class AppService(AppProcess):
if self.use_db:
self.shared_event_loop.run_until_complete(db.connect())
if self.use_redis:
- self.shared_event_loop.run_until_complete(self.event_queue.connect())
+ redis.connect()
if self.use_supabase:
from supabase import create_client
@@ -104,12 +162,12 @@ class AppService(AppProcess):
self.run_and_wait(db.disconnect())
if self.use_redis:
logger.info(f"[{self.__class__.__name__}] ⏳ Disconnecting Redis...")
- self.run_and_wait(self.event_queue.close())
+ redis.disconnect()
- @conn_retry
+ @conn_retry("Pyro", "Starting Pyro Service")
def __start_pyro(self):
host = Config().pyro_host
- daemon = Pyro5.api.Daemon(host=host, port=self.port)
+ daemon = Pyro5.api.Daemon(host=host, port=self.get_port())
self.uri = daemon.register(self, objectId=self.service_name)
logger.info(f"[{self.service_name}] Connected to Pyro; URI = {self.uri}")
daemon.requestLoop()
@@ -118,17 +176,20 @@ class AppService(AppProcess):
self.shared_event_loop.run_forever()
+# --------- UTILITIES --------- #
+
+
AS = TypeVar("AS", bound=AppService)
-def get_service_client(service_type: Type[AS], port: int) -> AS:
+def get_service_client(service_type: Type[AS]) -> AS:
service_name = service_type.service_name
class DynamicClient:
- @conn_retry
+ @conn_retry("Pyro", f"Connecting to [{service_name}]")
def __init__(self):
host = os.environ.get(f"{service_name.upper()}_HOST", "localhost")
- uri = f"PYRO:{service_type.service_name}@{host}:{port}"
+ uri = f"PYRO:{service_type.service_name}@{host}:{service_type.get_port()}"
logger.debug(f"Connecting to service [{service_name}]. URI = {uri}")
self.proxy = Pyro5.api.Proxy(uri)
# Attempt to bind to ensure the connection is established
@@ -136,6 +197,51 @@ def get_service_client(service_type: Type[AS], port: int) -> AS:
logger.debug(f"Successfully connected to service [{service_name}]")
def __getattr__(self, name: str) -> Callable[..., Any]:
- return getattr(self.proxy, name)
+ res = getattr(self.proxy, name)
+ return res
return cast(AS, DynamicClient())
+
+
+builtin_types = [*vars(builtins).values(), NoneType, Enum]
+
+
+def _pydantic_models_from_type_annotation(annotation) -> Iterator[type[BaseModel]]:
+ # Peel Annotated parameters
+ if (origin := get_origin(annotation)) and origin is Annotated:
+ annotation = get_args(annotation)[0]
+
+ origin = get_origin(annotation)
+ args = get_args(annotation)
+
+ if origin in (
+ Union,
+ UnionType,
+ list,
+ List,
+ tuple,
+ Tuple,
+ set,
+ Set,
+ frozenset,
+ FrozenSet,
+ ):
+ for arg in args:
+ yield from _pydantic_models_from_type_annotation(arg)
+ elif origin in (dict, Dict):
+ key_type, value_type = args
+ yield from _pydantic_models_from_type_annotation(key_type)
+ yield from _pydantic_models_from_type_annotation(value_type)
+ else:
+ annotype = annotation if origin is None else origin
+
+ # Exclude generic types and aliases
+ if (
+ annotype is not None
+ and not hasattr(typing, getattr(annotype, "__name__", ""))
+ and isinstance(annotype, type)
+ ):
+ if issubclass(annotype, BaseModel):
+ yield annotype
+ elif annotype not in builtin_types and not issubclass(annotype, Enum):
+ raise TypeError(f"Unsupported type encountered: {annotype}")
diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py
index 83f82ee10f..6cca5e5469 100644
--- a/autogpt_platform/backend/backend/util/settings.py
+++ b/autogpt_platform/backend/backend/util/settings.py
@@ -1,5 +1,6 @@
import json
import os
+from enum import Enum
from typing import Any, Dict, Generic, List, Set, Tuple, Type, TypeVar
from pydantic import BaseModel, Field, PrivateAttr, field_validator
@@ -15,6 +16,17 @@ from backend.util.data import get_config_path, get_data_path, get_secrets_path
T = TypeVar("T", bound=BaseSettings)
+class AppEnvironment(str, Enum):
+ LOCAL = "local"
+ DEVELOPMENT = "dev"
+ PRODUCTION = "prod"
+
+
+class BehaveAs(str, Enum):
+ LOCAL = "local"
+ CLOUD = "cloud"
+
+
class UpdateTrackingModel(BaseModel, Generic[T]):
_updated_fields: Set[str] = PrivateAttr(default_factory=set)
@@ -57,8 +69,8 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
default="localhost",
description="The default hostname of the Pyro server.",
)
- enable_auth: str = Field(
- default="false",
+ enable_auth: bool = Field(
+ default=True,
description="If authentication is enabled or not",
)
enable_credit: str = Field(
@@ -105,6 +117,11 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
description="The port for agent server daemon to run on",
)
+ database_api_port: int = Field(
+ default=8005,
+ description="The port for database server API to run on",
+ )
+
agent_api_host: str = Field(
default="0.0.0.0",
description="The host for agent server API to run on",
@@ -116,11 +133,26 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
)
frontend_base_url: str = Field(
- default="",
+ default="http://localhost:3000",
description="Can be used to explicitly set the base URL for the frontend. "
"This value is then used to generate redirect URLs for OAuth flows.",
)
+ app_env: AppEnvironment = Field(
+ default=AppEnvironment.LOCAL,
+ description="The name of the app environment: local or dev or prod",
+ )
+
+ behave_as: BehaveAs = Field(
+ default=BehaveAs.LOCAL,
+ description="What environment to behave as: local or cloud",
+ )
+
+ execution_event_bus_name: str = Field(
+ default="execution_event",
+ description="Name of the event bus",
+ )
+
backend_cors_allow_origins: List[str] = Field(default_factory=list)
@field_validator("backend_cors_allow_origins")
@@ -177,10 +209,12 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
)
# OAuth server credentials for integrations
+ # --8<-- [start:OAuthServerCredentialsExample]
github_client_id: str = Field(default="", description="GitHub OAuth client ID")
github_client_secret: str = Field(
default="", description="GitHub OAuth client secret"
)
+ # --8<-- [end:OAuthServerCredentialsExample]
google_client_id: str = Field(default="", description="Google OAuth client ID")
google_client_secret: str = Field(
default="", description="Google OAuth client secret"
@@ -206,7 +240,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
medium_api_key: str = Field(default="", description="Medium API key")
medium_author_id: str = Field(default="", description="Medium author ID")
did_api_key: str = Field(default="", description="D-ID API Key")
-
+ revid_api_key: str = Field(default="", description="revid.ai API key")
discord_bot_token: str = Field(default="", description="Discord bot token")
smtp_server: str = Field(default="", description="SMTP server IP")
@@ -216,6 +250,12 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
sentry_dsn: str = Field(default="", description="Sentry DSN")
+ google_maps_api_key: str = Field(default="", description="Google Maps API Key")
+
+ replicate_api_key: str = Field(default="", description="Replicate API Key")
+ unreal_speech_api_key: str = Field(default="", description="Unreal Speech API Key")
+ ideogram_api_key: str = Field(default="", description="Ideogram API Key")
+
# Add more secret fields as needed
model_config = SettingsConfigDict(
diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py
index b95e035c9b..d1e2d83f7f 100644
--- a/autogpt_platform/backend/backend/util/test.py
+++ b/autogpt_platform/backend/backend/util/test.py
@@ -1,57 +1,21 @@
-import asyncio
import time
from backend.data import db
from backend.data.block import Block, initialize_blocks
-from backend.data.execution import ExecutionResult, ExecutionStatus
+from backend.data.execution import ExecutionStatus
from backend.data.model import CREDENTIALS_FIELD_NAME
-from backend.data.queue import AsyncEventQueue
from backend.data.user import create_default_user
-from backend.executor import ExecutionManager, ExecutionScheduler
-from backend.server import AgentServer
-from backend.server.rest_api import get_user_id
+from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
+from backend.server.rest_api import AgentServer, get_user_id
log = print
-class InMemoryAsyncEventQueue(AsyncEventQueue):
- def __init__(self):
- self.queue = asyncio.Queue()
- self.connected = False
- self.closed = False
-
- async def connect(self):
- if not self.connected:
- self.connected = True
- return
-
- async def close(self):
- self.closed = True
- self.connected = False
- return
-
- async def put(self, execution_result: ExecutionResult):
- if not self.connected:
- raise RuntimeError("Queue is not connected")
- await self.queue.put(execution_result)
-
- async def get(self):
- if self.closed:
- return None
- if not self.connected:
- raise RuntimeError("Queue is not connected")
- try:
- item = await asyncio.wait_for(self.queue.get(), timeout=0.1)
- return item
- except asyncio.TimeoutError:
- return None
-
-
class SpinTestServer:
def __init__(self):
+ self.db_api = DatabaseManager()
self.exec_manager = ExecutionManager()
- self.in_memory_queue = InMemoryAsyncEventQueue()
- self.agent_server = AgentServer(event_queue=self.in_memory_queue)
+ self.agent_server = AgentServer()
self.scheduler = ExecutionScheduler()
@staticmethod
@@ -60,13 +24,14 @@ class SpinTestServer:
async def __aenter__(self):
self.setup_dependency_overrides()
+ self.db_api.__enter__()
self.agent_server.__enter__()
self.exec_manager.__enter__()
self.scheduler.__enter__()
await db.connect()
await initialize_blocks()
- await create_default_user("false")
+ await create_default_user()
return self
@@ -76,6 +41,7 @@ class SpinTestServer:
self.scheduler.__exit__(exc_type, exc_val, exc_tb)
self.exec_manager.__exit__(exc_type, exc_val, exc_tb)
self.agent_server.__exit__(exc_type, exc_val, exc_tb)
+ self.db_api.__exit__(exc_type, exc_val, exc_tb)
def setup_dependency_overrides(self):
# Override get_user_id for testing
diff --git a/autogpt_platform/backend/backend/util/type.py b/autogpt_platform/backend/backend/util/type.py
index 379526f87f..9c267aba2a 100644
--- a/autogpt_platform/backend/backend/util/type.py
+++ b/autogpt_platform/backend/backend/util/type.py
@@ -1,5 +1,5 @@
import json
-from typing import Any, Type, TypeVar, get_origin
+from typing import Any, Type, TypeVar, get_args, get_origin
class ConversionError(Exception):
@@ -103,26 +103,75 @@ def __convert_bool(value: Any) -> bool:
def convert(value: Any, target_type: Type):
- target_type = get_origin(target_type) or target_type
- if target_type not in [list, dict, tuple, str, set, int, float, bool]:
+ origin = get_origin(target_type)
+ args = get_args(target_type)
+ if origin is None:
+ origin = target_type
+ if origin not in [list, dict, tuple, str, set, int, float, bool]:
return value
- if isinstance(value, target_type):
- return value
- if target_type is list:
- return __convert_list(value)
- elif target_type is dict:
- return __convert_dict(value)
- elif target_type is tuple:
- return __convert_tuple(value)
- elif target_type is str:
- return __convert_str(value)
- elif target_type is set:
- return __convert_set(value)
- elif target_type is int:
- return __convert_num(value, int)
- elif target_type is float:
- return __convert_num(value, float)
- elif target_type is bool:
- return __convert_bool(value)
+
+ # Handle the case when value is already of the target type
+ if isinstance(value, origin):
+ if not args:
+ return value
+ else:
+ # Need to convert elements
+ if origin is list:
+ return [convert(v, args[0]) for v in value]
+ elif origin is tuple:
+ # Tuples can have multiple types
+ if len(args) == 1:
+ return tuple(convert(v, args[0]) for v in value)
+ else:
+ return tuple(convert(v, t) for v, t in zip(value, args))
+ elif origin is dict:
+ key_type, val_type = args
+ return {
+ convert(k, key_type): convert(v, val_type) for k, v in value.items()
+ }
+ elif origin is set:
+ return {convert(v, args[0]) for v in value}
+ else:
+ return value
else:
- return value
+ # Need to convert value to the origin type
+ if origin is list:
+ value = __convert_list(value)
+ if args:
+ return [convert(v, args[0]) for v in value]
+ else:
+ return value
+ elif origin is dict:
+ value = __convert_dict(value)
+ if args:
+ key_type, val_type = args
+ return {
+ convert(k, key_type): convert(v, val_type) for k, v in value.items()
+ }
+ else:
+ return value
+ elif origin is tuple:
+ value = __convert_tuple(value)
+ if args:
+ if len(args) == 1:
+ return tuple(convert(v, args[0]) for v in value)
+ else:
+ return tuple(convert(v, t) for v, t in zip(value, args))
+ else:
+ return value
+ elif origin is str:
+ return __convert_str(value)
+ elif origin is set:
+ value = __convert_set(value)
+ if args:
+ return {convert(v, args[0]) for v in value}
+ else:
+ return value
+ elif origin is int:
+ return __convert_num(value, int)
+ elif origin is float:
+ return __convert_num(value, float)
+ elif origin is bool:
+ return __convert_bool(value)
+ else:
+ return value
diff --git a/autogpt_platform/backend/graph_templates/Discord Bot Chat To LLM_v5.json b/autogpt_platform/backend/graph_templates/Discord Bot Chat To LLM_v5.json
index b6ea9e6286..34cdfa7ae0 100644
--- a/autogpt_platform/backend/graph_templates/Discord Bot Chat To LLM_v5.json
+++ b/autogpt_platform/backend/graph_templates/Discord Bot Chat To LLM_v5.json
@@ -8,7 +8,7 @@
"nodes": [
{
"id": "b8138bca-7892-42c2-9594-a845d3483413",
- "block_id": "d3f4g5h6-1i2j-3k4l-5m6n-7o8p9q0r1s2t",
+ "block_id": "df06086a-d5ac-4abb-9996-2ad0acb2eff7",
"input_default": {},
"metadata": {
"position": {
@@ -59,7 +59,7 @@
},
{
"id": "dda2d061-2ef9-4dc5-9433-918c8395a4ac",
- "block_id": "h1i2j3k4-5l6m-7n8o-9p0q-r1s2t3u4v5w6",
+ "block_id": "d0822ab5-9f8a-44a3-8971-531dd0178b6b",
"input_default": {},
"metadata": {
"position": {
diff --git a/autogpt_platform/backend/graph_templates/Discord Chatbot with History_v145.json b/autogpt_platform/backend/graph_templates/Discord Chatbot with History_v145.json
index da36ec00de..0da2a83684 100644
--- a/autogpt_platform/backend/graph_templates/Discord Chatbot with History_v145.json
+++ b/autogpt_platform/backend/graph_templates/Discord Chatbot with History_v145.json
@@ -110,7 +110,7 @@
},
{
"id": "b45cfa51-5ead-4621-9f1c-f847dfea3e4c",
- "block_id": "d3f4g5h6-1i2j-3k4l-5m6n-7o8p9q0r1s2t",
+ "block_id": "df06086a-d5ac-4abb-9996-2ad0acb2eff7",
"input_default": {},
"metadata": {
"position": {
@@ -146,7 +146,7 @@
},
{
"id": "8eedcf71-1146-4f54-b522-bf9b6e2d26b2",
- "block_id": "h1i2j3k4-5l6m-7n8o-9p0q-r1s2t3u4v5w6",
+ "block_id": "d0822ab5-9f8a-44a3-8971-531dd0178b6b",
"input_default": {},
"metadata": {
"position": {
@@ -197,7 +197,7 @@
},
{
"id": "a568daee-45d2-4429-bf33-cbe9e1261f7b",
- "block_id": "c3d4e5f6-g7h8-i9j0-k1l2-m3n4o5p6q7r8",
+ "block_id": "32a87eab-381e-4dd4-bdb8-4c47151be35a",
"input_default": {
"model": "llama-3.1-70b-versatile",
"max_tokens": 2000
diff --git a/autogpt_platform/backend/graph_templates/Discord Search Bot_v17.json b/autogpt_platform/backend/graph_templates/Discord Search Bot_v17.json
index 7de268a970..366fcf7e62 100644
--- a/autogpt_platform/backend/graph_templates/Discord Search Bot_v17.json
+++ b/autogpt_platform/backend/graph_templates/Discord Search Bot_v17.json
@@ -8,7 +8,7 @@
"nodes": [
{
"id": "60ba4aac-1751-4be7-8745-1bd32191d4a2",
- "block_id": "d3f4g5h6-1i2j-3k4l-5m6n-7o8p9q0r1s2t",
+ "block_id": "df06086a-d5ac-4abb-9996-2ad0acb2eff7",
"input_default": {},
"metadata": {
"position": {
@@ -45,7 +45,7 @@
},
{
"id": "5658c4f7-8e67-4d30-93f2-157bdbd3ef87",
- "block_id": "b2c3d4e5-6f7g-8h9i-0j1k-l2m3n4o5p6q7",
+ "block_id": "87840993-2053-44b7-8da4-187ad4ee518c",
"input_default": {},
"metadata": {
"position": {
@@ -118,7 +118,7 @@
},
{
"id": "f3d62f22-d193-4f04-85d2-164200fca4c0",
- "block_id": "h1i2j3k4-5l6m-7n8o-9p0q-r1s2t3u4v5w6",
+ "block_id": "d0822ab5-9f8a-44a3-8971-531dd0178b6b",
"input_default": {},
"metadata": {
"position": {
diff --git a/autogpt_platform/backend/graph_templates/Medium Blogger_v28.json b/autogpt_platform/backend/graph_templates/Medium Blogger_v28.json
index 69040bcb1e..2335f1684c 100644
--- a/autogpt_platform/backend/graph_templates/Medium Blogger_v28.json
+++ b/autogpt_platform/backend/graph_templates/Medium Blogger_v28.json
@@ -8,7 +8,7 @@
"nodes": [
{
"id": "382efac9-3def-4baf-b16a-d6d2512a5c8b",
- "block_id": "b2c3d4e5-6f7g-8h9i-0j1k-l2m3n4o5p6q7",
+ "block_id": "87840993-2053-44b7-8da4-187ad4ee518c",
"input_default": {
"query": "19th July 2024 Microsoft Blackout"
},
@@ -44,7 +44,7 @@
},
{
"id": "0cd8f670-8956-4942-ba28-aee732ec783f",
- "block_id": "b2g2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6",
+ "block_id": "0e50422c-6dee-4145-83d6-3a5a392f65de",
"input_default": {
"key": "TITLE"
},
@@ -57,7 +57,7 @@
},
{
"id": "4a15b6b9-036d-43d3-915a-7e931fbc6522",
- "block_id": "b2g2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6",
+ "block_id": "0e50422c-6dee-4145-83d6-3a5a392f65de",
"input_default": {
"key": "CONTENT"
},
diff --git a/autogpt_platform/backend/migrations/20240930151406_reassign_block_ids/migration.sql b/autogpt_platform/backend/migrations/20240930151406_reassign_block_ids/migration.sql
new file mode 100644
index 0000000000..c6f7ae6f33
--- /dev/null
+++ b/autogpt_platform/backend/migrations/20240930151406_reassign_block_ids/migration.sql
@@ -0,0 +1,18 @@
+-- Update AgentBlock IDs: this should cascade to the AgentNode and UserBlockCredit tables
+UPDATE "AgentBlock"
+SET "id" = CASE
+ WHEN "id" = 'a1b2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6' THEN '436c3984-57fd-4b85-8e9a-459b356883bd'
+ WHEN "id" = 'b2g2c3d4-5e6f-7g8h-9i0j-k1l2m3n4o5p6' THEN '0e50422c-6dee-4145-83d6-3a5a392f65de'
+ WHEN "id" = 'c3d4e5f6-7g8h-9i0j-1k2l-m3n4o5p6q7r8' THEN 'a0a69be1-4528-491c-a85a-a4ab6873e3f0'
+ WHEN "id" = 'c3d4e5f6-g7h8-i9j0-k1l2-m3n4o5p6q7r8' THEN '32a87eab-381e-4dd4-bdb8-4c47151be35a'
+ WHEN "id" = 'b2c3d4e5-6f7g-8h9i-0j1k-l2m3n4o5p6q7' THEN '87840993-2053-44b7-8da4-187ad4ee518c'
+ WHEN "id" = 'h1i2j3k4-5l6m-7n8o-9p0q-r1s2t3u4v5w6' THEN 'd0822ab5-9f8a-44a3-8971-531dd0178b6b'
+ WHEN "id" = 'd3f4g5h6-1i2j-3k4l-5m6n-7o8p9q0r1s2t' THEN 'df06086a-d5ac-4abb-9996-2ad0acb2eff7'
+ WHEN "id" = 'h5e7f8g9-1b2c-3d4e-5f6g-7h8i9j0k1l2m' THEN 'f5b0f5d0-1862-4d61-94be-3ad0fa772760'
+ WHEN "id" = 'a1234567-89ab-cdef-0123-456789abcdef' THEN '4335878a-394e-4e67-adf2-919877ff49ae'
+ WHEN "id" = 'f8e7d6c5-b4a3-2c1d-0e9f-8g7h6i5j4k3l' THEN 'f66a3543-28d3-4ab5-8945-9b336371e2ce'
+ WHEN "id" = 'b29c1b50-5d0e-4d9f-8f9d-1b0e6fcbf0h2' THEN '716a67b3-6760-42e7-86dc-18645c6e00fc'
+ WHEN "id" = '31d1064e-7446-4693-o7d4-65e5ca9110d1' THEN 'cc10ff7b-7753-4ff2-9af6-9399b1a7eddc'
+ WHEN "id" = 'c6731acb-4105-4zp1-bc9b-03d0036h370g' THEN '5ebe6768-8e5d-41e3-9134-1c7bd89a8d52'
+ ELSE "id"
+END;
diff --git a/autogpt_platform/backend/migrations/20241007090536_add_on_delete_platform/migration.sql b/autogpt_platform/backend/migrations/20241007090536_add_on_delete_platform/migration.sql
new file mode 100644
index 0000000000..821450272e
--- /dev/null
+++ b/autogpt_platform/backend/migrations/20241007090536_add_on_delete_platform/migration.sql
@@ -0,0 +1,89 @@
+-- DropForeignKey
+ALTER TABLE "AgentGraph" DROP CONSTRAINT "AgentGraph_userId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentGraphExecution" DROP CONSTRAINT "AgentGraphExecution_agentGraphId_agentGraphVersion_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentGraphExecution" DROP CONSTRAINT "AgentGraphExecution_userId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentGraphExecutionSchedule" DROP CONSTRAINT "AgentGraphExecutionSchedule_agentGraphId_agentGraphVersion_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentGraphExecutionSchedule" DROP CONSTRAINT "AgentGraphExecutionSchedule_userId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentNode" DROP CONSTRAINT "AgentNode_agentGraphId_agentGraphVersion_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentNodeExecution" DROP CONSTRAINT "AgentNodeExecution_agentGraphExecutionId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentNodeExecution" DROP CONSTRAINT "AgentNodeExecution_agentNodeId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentNodeExecutionInputOutput" DROP CONSTRAINT "AgentNodeExecutionInputOutput_referencedByInputExecId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentNodeExecutionInputOutput" DROP CONSTRAINT "AgentNodeExecutionInputOutput_referencedByOutputExecId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentNodeLink" DROP CONSTRAINT "AgentNodeLink_agentNodeSinkId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AgentNodeLink" DROP CONSTRAINT "AgentNodeLink_agentNodeSourceId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AnalyticsDetails" DROP CONSTRAINT "AnalyticsDetails_userId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "AnalyticsMetrics" DROP CONSTRAINT "AnalyticsMetrics_userId_fkey";
+
+-- DropForeignKey
+ALTER TABLE "UserBlockCredit" DROP CONSTRAINT "UserBlockCredit_userId_fkey";
+
+-- AddForeignKey
+ALTER TABLE "AgentGraph" ADD CONSTRAINT "AgentGraph_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentNode" ADD CONSTRAINT "AgentNode_agentGraphId_agentGraphVersion_fkey" FOREIGN KEY ("agentGraphId", "agentGraphVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentNodeLink" ADD CONSTRAINT "AgentNodeLink_agentNodeSourceId_fkey" FOREIGN KEY ("agentNodeSourceId") REFERENCES "AgentNode"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentNodeLink" ADD CONSTRAINT "AgentNodeLink_agentNodeSinkId_fkey" FOREIGN KEY ("agentNodeSinkId") REFERENCES "AgentNode"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentGraphExecution" ADD CONSTRAINT "AgentGraphExecution_agentGraphId_agentGraphVersion_fkey" FOREIGN KEY ("agentGraphId", "agentGraphVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentGraphExecution" ADD CONSTRAINT "AgentGraphExecution_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentNodeExecution" ADD CONSTRAINT "AgentNodeExecution_agentGraphExecutionId_fkey" FOREIGN KEY ("agentGraphExecutionId") REFERENCES "AgentGraphExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentNodeExecution" ADD CONSTRAINT "AgentNodeExecution_agentNodeId_fkey" FOREIGN KEY ("agentNodeId") REFERENCES "AgentNode"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentNodeExecutionInputOutput" ADD CONSTRAINT "AgentNodeExecutionInputOutput_referencedByInputExecId_fkey" FOREIGN KEY ("referencedByInputExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentNodeExecutionInputOutput" ADD CONSTRAINT "AgentNodeExecutionInputOutput_referencedByOutputExecId_fkey" FOREIGN KEY ("referencedByOutputExecId") REFERENCES "AgentNodeExecution"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentGraphExecutionSchedule" ADD CONSTRAINT "AgentGraphExecutionSchedule_agentGraphId_agentGraphVersion_fkey" FOREIGN KEY ("agentGraphId", "agentGraphVersion") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AgentGraphExecutionSchedule" ADD CONSTRAINT "AgentGraphExecutionSchedule_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AnalyticsDetails" ADD CONSTRAINT "AnalyticsDetails_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "AnalyticsMetrics" ADD CONSTRAINT "AnalyticsMetrics_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
+
+-- AddForeignKey
+ALTER TABLE "UserBlockCredit" ADD CONSTRAINT "UserBlockCredit_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
diff --git a/autogpt_platform/backend/migrations/20241007115713_cascade_graph_deletion/migration.sql b/autogpt_platform/backend/migrations/20241007115713_cascade_graph_deletion/migration.sql
new file mode 100644
index 0000000000..3b783a6d92
--- /dev/null
+++ b/autogpt_platform/backend/migrations/20241007115713_cascade_graph_deletion/migration.sql
@@ -0,0 +1,5 @@
+-- DropForeignKey
+ALTER TABLE "AgentGraph" DROP CONSTRAINT "AgentGraph_agentGraphParentId_version_fkey";
+
+-- AddForeignKey
+ALTER TABLE "AgentGraph" ADD CONSTRAINT "AgentGraph_agentGraphParentId_version_fkey" FOREIGN KEY ("agentGraphParentId", "version") REFERENCES "AgentGraph"("id", "version") ON DELETE CASCADE ON UPDATE CASCADE;
diff --git a/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql b/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql
new file mode 100644
index 0000000000..b3886efa03
--- /dev/null
+++ b/autogpt_platform/backend/migrations/20241007175111_move_oauth_creds_to_user_obj/migration.sql
@@ -0,0 +1,2 @@
+-- AlterTable
+ALTER TABLE "User" ADD COLUMN "metadata" JSONB;
diff --git a/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql b/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql
new file mode 100644
index 0000000000..aa577c90e9
--- /dev/null
+++ b/autogpt_platform/backend/migrations/20241007175112_add_oauth_creds_user_trigger/migration.sql
@@ -0,0 +1,27 @@
+--CreateFunction
+CREATE OR REPLACE FUNCTION add_user_to_platform() RETURNS TRIGGER AS $$
+BEGIN
+ INSERT INTO platform."User" (id, email, "updatedAt")
+ VALUES (NEW.id, NEW.email, now());
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql SECURITY DEFINER;
+
+DO $$
+BEGIN
+ -- Check if the auth schema and users table exist
+ IF EXISTS (
+ SELECT 1
+ FROM information_schema.tables
+ WHERE table_schema = 'auth'
+ AND table_name = 'users'
+ ) THEN
+ -- Drop the trigger if it exists
+ DROP TRIGGER IF EXISTS user_added_to_platform ON auth.users;
+
+ -- Create the trigger
+ CREATE TRIGGER user_added_to_platform
+ AFTER INSERT ON auth.users
+ FOR EACH ROW EXECUTE FUNCTION add_user_to_platform();
+ END IF;
+END $$;
diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock
index e76165a144..d4541d2187 100644
--- a/autogpt_platform/backend/poetry.lock
+++ b/autogpt_platform/backend/poetry.lock
@@ -17,113 +17,113 @@ yarl = "*"
[[package]]
name = "aiohappyeyeballs"
-version = "2.4.0"
+version = "2.4.3"
description = "Happy Eyeballs for asyncio"
optional = false
python-versions = ">=3.8"
files = [
- {file = "aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd"},
- {file = "aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2"},
+ {file = "aiohappyeyeballs-2.4.3-py3-none-any.whl", hash = "sha256:8a7a83727b2756f394ab2895ea0765a0a8c475e3c71e98d43d76f22b4b435572"},
+ {file = "aiohappyeyeballs-2.4.3.tar.gz", hash = "sha256:75cf88a15106a5002a8eb1dab212525c00d1f4c0fa96e551c9fbe6f09a621586"},
]
[[package]]
name = "aiohttp"
-version = "3.10.5"
+version = "3.10.8"
description = "Async http client/server framework (asyncio)"
optional = false
python-versions = ">=3.8"
files = [
- {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:18a01eba2574fb9edd5f6e5fb25f66e6ce061da5dab5db75e13fe1558142e0a3"},
- {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:94fac7c6e77ccb1ca91e9eb4cb0ac0270b9fb9b289738654120ba8cebb1189c6"},
- {file = "aiohttp-3.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f1f1c75c395991ce9c94d3e4aa96e5c59c8356a15b1c9231e783865e2772699"},
- {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f7acae3cf1a2a2361ec4c8e787eaaa86a94171d2417aae53c0cca6ca3118ff6"},
- {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94c4381ffba9cc508b37d2e536b418d5ea9cfdc2848b9a7fea6aebad4ec6aac1"},
- {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c31ad0c0c507894e3eaa843415841995bf8de4d6b2d24c6e33099f4bc9fc0d4f"},
- {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0912b8a8fadeb32ff67a3ed44249448c20148397c1ed905d5dac185b4ca547bb"},
- {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d93400c18596b7dc4794d48a63fb361b01a0d8eb39f28800dc900c8fbdaca91"},
- {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d00f3c5e0d764a5c9aa5a62d99728c56d455310bcc288a79cab10157b3af426f"},
- {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d742c36ed44f2798c8d3f4bc511f479b9ceef2b93f348671184139e7d708042c"},
- {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:814375093edae5f1cb31e3407997cf3eacefb9010f96df10d64829362ae2df69"},
- {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8224f98be68a84b19f48e0bdc14224b5a71339aff3a27df69989fa47d01296f3"},
- {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d9a487ef090aea982d748b1b0d74fe7c3950b109df967630a20584f9a99c0683"},
- {file = "aiohttp-3.10.5-cp310-cp310-win32.whl", hash = "sha256:d9ef084e3dc690ad50137cc05831c52b6ca428096e6deb3c43e95827f531d5ef"},
- {file = "aiohttp-3.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:66bf9234e08fe561dccd62083bf67400bdbf1c67ba9efdc3dac03650e97c6088"},
- {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c6a4e5e40156d72a40241a25cc226051c0a8d816610097a8e8f517aeacd59a2"},
- {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c634a3207a5445be65536d38c13791904fda0748b9eabf908d3fe86a52941cf"},
- {file = "aiohttp-3.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4aff049b5e629ef9b3e9e617fa6e2dfeda1bf87e01bcfecaf3949af9e210105e"},
- {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1942244f00baaacaa8155eca94dbd9e8cc7017deb69b75ef67c78e89fdad3c77"},
- {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04a1f2a65ad2f93aa20f9ff9f1b672bf912413e5547f60749fa2ef8a644e061"},
- {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f2bfc0032a00405d4af2ba27f3c429e851d04fad1e5ceee4080a1c570476697"},
- {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:424ae21498790e12eb759040bbb504e5e280cab64693d14775c54269fd1d2bb7"},
- {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:975218eee0e6d24eb336d0328c768ebc5d617609affaca5dbbd6dd1984f16ed0"},
- {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4120d7fefa1e2d8fb6f650b11489710091788de554e2b6f8347c7a20ceb003f5"},
- {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b90078989ef3fc45cf9221d3859acd1108af7560c52397ff4ace8ad7052a132e"},
- {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ba5a8b74c2a8af7d862399cdedce1533642fa727def0b8c3e3e02fcb52dca1b1"},
- {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:02594361128f780eecc2a29939d9dfc870e17b45178a867bf61a11b2a4367277"},
- {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8fb4fc029e135859f533025bc82047334e24b0d489e75513144f25408ecaf058"},
- {file = "aiohttp-3.10.5-cp311-cp311-win32.whl", hash = "sha256:e1ca1ef5ba129718a8fc827b0867f6aa4e893c56eb00003b7367f8a733a9b072"},
- {file = "aiohttp-3.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:349ef8a73a7c5665cca65c88ab24abe75447e28aa3bc4c93ea5093474dfdf0ff"},
- {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:305be5ff2081fa1d283a76113b8df7a14c10d75602a38d9f012935df20731487"},
- {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3a1c32a19ee6bbde02f1cb189e13a71b321256cc1d431196a9f824050b160d5a"},
- {file = "aiohttp-3.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:61645818edd40cc6f455b851277a21bf420ce347baa0b86eaa41d51ef58ba23d"},
- {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c225286f2b13bab5987425558baa5cbdb2bc925b2998038fa028245ef421e75"},
- {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ba01ebc6175e1e6b7275c907a3a36be48a2d487549b656aa90c8a910d9f3178"},
- {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8eaf44ccbc4e35762683078b72bf293f476561d8b68ec8a64f98cf32811c323e"},
- {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c43eb1ab7cbf411b8e387dc169acb31f0ca0d8c09ba63f9eac67829585b44f"},
- {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de7a5299827253023c55ea549444e058c0eb496931fa05d693b95140a947cb73"},
- {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4790f0e15f00058f7599dab2b206d3049d7ac464dc2e5eae0e93fa18aee9e7bf"},
- {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44b324a6b8376a23e6ba25d368726ee3bc281e6ab306db80b5819999c737d820"},
- {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0d277cfb304118079e7044aad0b76685d30ecb86f83a0711fc5fb257ffe832ca"},
- {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:54d9ddea424cd19d3ff6128601a4a4d23d54a421f9b4c0fff740505813739a91"},
- {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4f1c9866ccf48a6df2b06823e6ae80573529f2af3a0992ec4fe75b1a510df8a6"},
- {file = "aiohttp-3.10.5-cp312-cp312-win32.whl", hash = "sha256:dc4826823121783dccc0871e3f405417ac116055bf184ac04c36f98b75aacd12"},
- {file = "aiohttp-3.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:22c0a23a3b3138a6bf76fc553789cb1a703836da86b0f306b6f0dc1617398abc"},
- {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7f6b639c36734eaa80a6c152a238242bedcee9b953f23bb887e9102976343092"},
- {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f29930bc2921cef955ba39a3ff87d2c4398a0394ae217f41cb02d5c26c8b1b77"},
- {file = "aiohttp-3.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f489a2c9e6455d87eabf907ac0b7d230a9786be43fbe884ad184ddf9e9c1e385"},
- {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:123dd5b16b75b2962d0fff566effb7a065e33cd4538c1692fb31c3bda2bfb972"},
- {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b98e698dc34966e5976e10bbca6d26d6724e6bdea853c7c10162a3235aba6e16"},
- {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3b9162bab7e42f21243effc822652dc5bb5e8ff42a4eb62fe7782bcbcdfacf6"},
- {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1923a5c44061bffd5eebeef58cecf68096e35003907d8201a4d0d6f6e387ccaa"},
- {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d55f011da0a843c3d3df2c2cf4e537b8070a419f891c930245f05d329c4b0689"},
- {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:afe16a84498441d05e9189a15900640a2d2b5e76cf4efe8cbb088ab4f112ee57"},
- {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8112fb501b1e0567a1251a2fd0747baae60a4ab325a871e975b7bb67e59221f"},
- {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1e72589da4c90337837fdfe2026ae1952c0f4a6e793adbbfbdd40efed7c63599"},
- {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4d46c7b4173415d8e583045fbc4daa48b40e31b19ce595b8d92cf639396c15d5"},
- {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33e6bc4bab477c772a541f76cd91e11ccb6d2efa2b8d7d7883591dfb523e5987"},
- {file = "aiohttp-3.10.5-cp313-cp313-win32.whl", hash = "sha256:c58c6837a2c2a7cf3133983e64173aec11f9c2cd8e87ec2fdc16ce727bcf1a04"},
- {file = "aiohttp-3.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:38172a70005252b6893088c0f5e8a47d173df7cc2b2bd88650957eb84fcf5022"},
- {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f6f18898ace4bcd2d41a122916475344a87f1dfdec626ecde9ee802a711bc569"},
- {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5ede29d91a40ba22ac1b922ef510aab871652f6c88ef60b9dcdf773c6d32ad7a"},
- {file = "aiohttp-3.10.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:673f988370f5954df96cc31fd99c7312a3af0a97f09e407399f61583f30da9bc"},
- {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58718e181c56a3c02d25b09d4115eb02aafe1a732ce5714ab70326d9776457c3"},
- {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b38b1570242fbab8d86a84128fb5b5234a2f70c2e32f3070143a6d94bc854cf"},
- {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:074d1bff0163e107e97bd48cad9f928fa5a3eb4b9d33366137ffce08a63e37fe"},
- {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd31f176429cecbc1ba499d4aba31aaccfea488f418d60376b911269d3b883c5"},
- {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7384d0b87d4635ec38db9263e6a3f1eb609e2e06087f0aa7f63b76833737b471"},
- {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8989f46f3d7ef79585e98fa991e6ded55d2f48ae56d2c9fa5e491a6e4effb589"},
- {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c83f7a107abb89a227d6c454c613e7606c12a42b9a4ca9c5d7dad25d47c776ae"},
- {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cde98f323d6bf161041e7627a5fd763f9fd829bcfcd089804a5fdce7bb6e1b7d"},
- {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:676f94c5480d8eefd97c0c7e3953315e4d8c2b71f3b49539beb2aa676c58272f"},
- {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2d21ac12dc943c68135ff858c3a989f2194a709e6e10b4c8977d7fcd67dfd511"},
- {file = "aiohttp-3.10.5-cp38-cp38-win32.whl", hash = "sha256:17e997105bd1a260850272bfb50e2a328e029c941c2708170d9d978d5a30ad9a"},
- {file = "aiohttp-3.10.5-cp38-cp38-win_amd64.whl", hash = "sha256:1c19de68896747a2aa6257ae4cf6ef59d73917a36a35ee9d0a6f48cff0f94db8"},
- {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7e2fe37ac654032db1f3499fe56e77190282534810e2a8e833141a021faaab0e"},
- {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5bf3ead3cb66ab990ee2561373b009db5bc0e857549b6c9ba84b20bc462e172"},
- {file = "aiohttp-3.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b2c16a919d936ca87a3c5f0e43af12a89a3ce7ccbce59a2d6784caba945b68b"},
- {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad146dae5977c4dd435eb31373b3fe9b0b1bf26858c6fc452bf6af394067e10b"},
- {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c5c6fa16412b35999320f5c9690c0f554392dc222c04e559217e0f9ae244b92"},
- {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95c4dc6f61d610bc0ee1edc6f29d993f10febfe5b76bb470b486d90bbece6b22"},
- {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da452c2c322e9ce0cfef392e469a26d63d42860f829026a63374fde6b5c5876f"},
- {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:898715cf566ec2869d5cb4d5fb4be408964704c46c96b4be267442d265390f32"},
- {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:391cc3a9c1527e424c6865e087897e766a917f15dddb360174a70467572ac6ce"},
- {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:380f926b51b92d02a34119d072f178d80bbda334d1a7e10fa22d467a66e494db"},
- {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce91db90dbf37bb6fa0997f26574107e1b9d5ff939315247b7e615baa8ec313b"},
- {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9093a81e18c45227eebe4c16124ebf3e0d893830c6aca7cc310bfca8fe59d857"},
- {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ee40b40aa753d844162dcc80d0fe256b87cba48ca0054f64e68000453caead11"},
- {file = "aiohttp-3.10.5-cp39-cp39-win32.whl", hash = "sha256:03f2645adbe17f274444953bdea69f8327e9d278d961d85657cb0d06864814c1"},
- {file = "aiohttp-3.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:d17920f18e6ee090bdd3d0bfffd769d9f2cb4c8ffde3eb203777a3895c128862"},
- {file = "aiohttp-3.10.5.tar.gz", hash = "sha256:f071854b47d39591ce9a17981c46790acb30518e2f83dfca8db2dfa091178691"},
+ {file = "aiohttp-3.10.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a1ba7bc139592339ddeb62c06486d0fa0f4ca61216e14137a40d626c81faf10c"},
+ {file = "aiohttp-3.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85e4d7bd05d18e4b348441e7584c681eff646e3bf38f68b2626807f3add21aa2"},
+ {file = "aiohttp-3.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:69de056022e7abf69cb9fec795515973cc3eeaff51e3ea8d72a77aa933a91c52"},
+ {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee3587506898d4a404b33bd19689286ccf226c3d44d7a73670c8498cd688e42c"},
+ {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe285a697c851734285369614443451462ce78aac2b77db23567507484b1dc6f"},
+ {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10c7932337285a6bfa3a5fe1fd4da90b66ebfd9d0cbd1544402e1202eb9a8c3e"},
+ {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd9716ef0224fe0d0336997eb242f40619f9f8c5c57e66b525a1ebf9f1d8cebe"},
+ {file = "aiohttp-3.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ceacea31f8a55cdba02bc72c93eb2e1b77160e91f8abd605969c168502fd71eb"},
+ {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9721554bfa9e15f6e462da304374c2f1baede3cb06008c36c47fa37ea32f1dc4"},
+ {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:22cdeb684d8552490dd2697a5138c4ecb46f844892df437aaf94f7eea99af879"},
+ {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e56bb7e31c4bc79956b866163170bc89fd619e0581ce813330d4ea46921a4881"},
+ {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:3a95d2686bc4794d66bd8de654e41b5339fab542b2bca9238aa63ed5f4f2ce82"},
+ {file = "aiohttp-3.10.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d82404a0e7b10e0d7f022cf44031b78af8a4f99bd01561ac68f7c24772fed021"},
+ {file = "aiohttp-3.10.8-cp310-cp310-win32.whl", hash = "sha256:4e10b04542d27e21538e670156e88766543692a0a883f243ba8fad9ddea82e53"},
+ {file = "aiohttp-3.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:680dbcff5adc7f696ccf8bf671d38366a1f620b5616a1d333d0cb33956065395"},
+ {file = "aiohttp-3.10.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:33a68011a38020ed4ff41ae0dbf4a96a202562ecf2024bdd8f65385f1d07f6ef"},
+ {file = "aiohttp-3.10.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c7efa6616a95e3bd73b8a69691012d2ef1f95f9ea0189e42f338fae080c2fc6"},
+ {file = "aiohttp-3.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddb9b9764cfb4459acf01c02d2a59d3e5066b06a846a364fd1749aa168efa2be"},
+ {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7f270f4ca92760f98a42c45a58674fff488e23b144ec80b1cc6fa2effed377"},
+ {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6984dda9d79064361ab58d03f6c1e793ea845c6cfa89ffe1a7b9bb400dfd56bd"},
+ {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f6d47e392c27206701565c8df4cac6ebed28fdf6dcaea5b1eea7a4631d8e6db"},
+ {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a72f89aea712c619b2ca32c6f4335c77125ede27530ad9705f4f349357833695"},
+ {file = "aiohttp-3.10.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36074b26f3263879ba8e4dbd33db2b79874a3392f403a70b772701363148b9f"},
+ {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e32148b4a745e70a255a1d44b5664de1f2e24fcefb98a75b60c83b9e260ddb5b"},
+ {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5aa1a073514cf59c81ad49a4ed9b5d72b2433638cd53160fd2f3a9cfa94718db"},
+ {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d3a79200a9d5e621c4623081ddb25380b713c8cf5233cd11c1aabad990bb9381"},
+ {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e45fdfcb2d5bcad83373e4808825b7512953146d147488114575780640665027"},
+ {file = "aiohttp-3.10.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f78e2a78432c537ae876a93013b7bc0027ba5b93ad7b3463624c4b6906489332"},
+ {file = "aiohttp-3.10.8-cp311-cp311-win32.whl", hash = "sha256:f8179855a4e4f3b931cb1764ec87673d3fbdcca2af496c8d30567d7b034a13db"},
+ {file = "aiohttp-3.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:ef9b484604af05ca745b6108ca1aaa22ae1919037ae4f93aaf9a37ba42e0b835"},
+ {file = "aiohttp-3.10.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ab2d6523575fc98896c80f49ac99e849c0b0e69cc80bf864eed6af2ae728a52b"},
+ {file = "aiohttp-3.10.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f5d5d5401744dda50b943d8764508d0e60cc2d3305ac1e6420935861a9d544bc"},
+ {file = "aiohttp-3.10.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de23085cf90911600ace512e909114385026b16324fa203cc74c81f21fd3276a"},
+ {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4618f0d2bf523043866a9ff8458900d8eb0a6d4018f251dae98e5f1fb699f3a8"},
+ {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21c1925541ca84f7b5e0df361c0a813a7d6a56d3b0030ebd4b220b8d232015f9"},
+ {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:497a7d20caea8855c5429db3cdb829385467217d7feb86952a6107e033e031b9"},
+ {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c887019dbcb4af58a091a45ccf376fffe800b5531b45c1efccda4bedf87747ea"},
+ {file = "aiohttp-3.10.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40d2d719c3c36a7a65ed26400e2b45b2d9ed7edf498f4df38b2ae130f25a0d01"},
+ {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:57359785f27394a8bcab0da6dcd46706d087dfebf59a8d0ad2e64a4bc2f6f94f"},
+ {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a961ee6f2cdd1a2be4735333ab284691180d40bad48f97bb598841bfcbfb94ec"},
+ {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:fe3d79d6af839ffa46fdc5d2cf34295390894471e9875050eafa584cb781508d"},
+ {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9a281cba03bdaa341c70b7551b2256a88d45eead149f48b75a96d41128c240b3"},
+ {file = "aiohttp-3.10.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c6769d71bfb1ed60321363a9bc05e94dcf05e38295ef41d46ac08919e5b00d19"},
+ {file = "aiohttp-3.10.8-cp312-cp312-win32.whl", hash = "sha256:a3081246bab4d419697ee45e555cef5cd1def7ac193dff6f50be761d2e44f194"},
+ {file = "aiohttp-3.10.8-cp312-cp312-win_amd64.whl", hash = "sha256:ab1546fc8e00676febc81c548a876c7bde32f881b8334b77f84719ab2c7d28dc"},
+ {file = "aiohttp-3.10.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:b1a012677b8e0a39e181e218de47d6741c5922202e3b0b65e412e2ce47c39337"},
+ {file = "aiohttp-3.10.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2df786c96c57cd6b87156ba4c5f166af7b88f3fc05f9d592252fdc83d8615a3c"},
+ {file = "aiohttp-3.10.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8885ca09d3a9317219c0831276bfe26984b17b2c37b7bf70dd478d17092a4772"},
+ {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4dbf252ac19860e0ab56cd480d2805498f47c5a2d04f5995d8d8a6effd04b48c"},
+ {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b2036479b6b94afaaca7d07b8a68dc0e67b0caf5f6293bb6a5a1825f5923000"},
+ {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:365783e1b7c40b59ed4ce2b5a7491bae48f41cd2c30d52647a5b1ee8604c68ad"},
+ {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:270e653b5a4b557476a1ed40e6b6ce82f331aab669620d7c95c658ef976c9c5e"},
+ {file = "aiohttp-3.10.8-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8960fabc20bfe4fafb941067cda8e23c8c17c98c121aa31c7bf0cdab11b07842"},
+ {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f21e8f2abed9a44afc3d15bba22e0dfc71e5fa859bea916e42354c16102b036f"},
+ {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fecd55e7418fabd297fd836e65cbd6371aa4035a264998a091bbf13f94d9c44d"},
+ {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:badb51d851358cd7535b647bb67af4854b64f3c85f0d089c737f75504d5910ec"},
+ {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e860985f30f3a015979e63e7ba1a391526cdac1b22b7b332579df7867848e255"},
+ {file = "aiohttp-3.10.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:71462f8eeca477cbc0c9700a9464e3f75f59068aed5e9d4a521a103692da72dc"},
+ {file = "aiohttp-3.10.8-cp313-cp313-win32.whl", hash = "sha256:177126e971782769b34933e94fddd1089cef0fe6b82fee8a885e539f5b0f0c6a"},
+ {file = "aiohttp-3.10.8-cp313-cp313-win_amd64.whl", hash = "sha256:98a4eb60e27033dee9593814ca320ee8c199489fbc6b2699d0f710584db7feb7"},
+ {file = "aiohttp-3.10.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ffef3d763e4c8fc97e740da5b4d0f080b78630a3914f4e772a122bbfa608c1db"},
+ {file = "aiohttp-3.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:597128cb7bc5f068181b49a732961f46cb89f85686206289d6ccb5e27cb5fbe2"},
+ {file = "aiohttp-3.10.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f23a6c1d09de5de89a33c9e9b229106cb70dcfdd55e81a3a3580eaadaa32bc92"},
+ {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da57af0c54a302b7c655fa1ccd5b1817a53739afa39924ef1816e7b7c8a07ccb"},
+ {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e7a6af57091056a79a35104d6ec29d98ec7f1fb7270ad9c6fff871b678d1ff8"},
+ {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32710d6b3b6c09c60c794d84ca887a3a2890131c0b02b3cefdcc6709a2260a7c"},
+ {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b91f4f62ad39a8a42d511d66269b46cb2fb7dea9564c21ab6c56a642d28bff5"},
+ {file = "aiohttp-3.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:471a8c47344b9cc309558b3fcc469bd2c12b49322b4b31eb386c4a2b2d44e44a"},
+ {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fc0e7f91705445d79beafba9bb3057dd50830e40fe5417017a76a214af54e122"},
+ {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:85431c9131a9a0f65260dc7a65c800ca5eae78c4c9931618f18c8e0933a0e0c1"},
+ {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:b91557ee0893da52794b25660d4f57bb519bcad8b7df301acd3898f7197c5d81"},
+ {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:4954e6b06dd0be97e1a5751fc606be1f9edbdc553c5d9b57d72406a8fbd17f9d"},
+ {file = "aiohttp-3.10.8-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a087c84b4992160ffef7afd98ef24177c8bd4ad61c53607145a8377457385100"},
+ {file = "aiohttp-3.10.8-cp38-cp38-win32.whl", hash = "sha256:e1f0f7b27171b2956a27bd8f899751d0866ddabdd05cbddf3520f945130a908c"},
+ {file = "aiohttp-3.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:c4916070e12ae140110aa598031876c1bf8676a36a750716ea0aa5bd694aa2e7"},
+ {file = "aiohttp-3.10.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5284997e3d88d0dfb874c43e51ae8f4a6f4ca5b90dcf22995035187253d430db"},
+ {file = "aiohttp-3.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9443d9ebc5167ce1fbb552faf2d666fb22ef5716a8750be67efd140a7733738c"},
+ {file = "aiohttp-3.10.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b667e2a03407d79a76c618dc30cedebd48f082d85880d0c9c4ec2faa3e10f43e"},
+ {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98fae99d5c2146f254b7806001498e6f9ffb0e330de55a35e72feb7cb2fa399b"},
+ {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8296edd99d0dd9d0eb8b9e25b3b3506eef55c1854e9cc230f0b3f885f680410b"},
+ {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ce46dfb49cfbf9e92818be4b761d4042230b1f0e05ffec0aad15b3eb162b905"},
+ {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c38cfd355fd86c39b2d54651bd6ed7d63d4fe3b5553f364bae3306e2445f847"},
+ {file = "aiohttp-3.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:713dff3f87ceec3bde4f3f484861464e722cf7533f9fa6b824ec82bb5a9010a7"},
+ {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:21a72f4a9c69a8567a0aca12042f12bba25d3139fd5dd8eeb9931f4d9e8599cd"},
+ {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6d1ad868624f6cea77341ef2877ad4e71f7116834a6cd7ec36ec5c32f94ee6ae"},
+ {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a78ba86d5a08207d1d1ad10b97aed6ea48b374b3f6831d02d0b06545ac0f181e"},
+ {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:aff048793d05e1ce05b62e49dccf81fe52719a13f4861530706619506224992b"},
+ {file = "aiohttp-3.10.8-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d088ca05381fd409793571d8e34eca06daf41c8c50a05aeed358d2d340c7af81"},
+ {file = "aiohttp-3.10.8-cp39-cp39-win32.whl", hash = "sha256:ee97c4e54f457c366e1f76fbbf3e8effee9de57dae671084a161c00f481106ce"},
+ {file = "aiohttp-3.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:d95ae4420669c871667aad92ba8cce6251d61d79c1a38504621094143f94a8b4"},
+ {file = "aiohttp-3.10.8.tar.gz", hash = "sha256:21f8225f7dc187018e8433c9326be01477fb2810721e048b33ac49091b19fb4a"},
]
[package.dependencies]
@@ -133,7 +133,7 @@ async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""}
attrs = ">=17.3.0"
frozenlist = ">=1.1.1"
multidict = ">=4.5,<7.0"
-yarl = ">=1.0,<2.0"
+yarl = ">=1.12.0,<2.0"
[package.extras]
speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"]
@@ -204,13 +204,13 @@ vertex = ["google-auth (>=2,<3)"]
[[package]]
name = "anyio"
-version = "4.4.0"
+version = "4.6.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
- {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
+ {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"},
+ {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"},
]
[package.dependencies]
@@ -220,9 +220,9 @@ sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
-doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
-test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
-trio = ["trio (>=0.23)"]
+doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"]
+trio = ["trio (>=0.26.1)"]
[[package]]
name = "apscheduler"
@@ -293,6 +293,7 @@ develop = true
[package.dependencies]
colorama = "^0.4.6"
+expiringdict = "^1.2.2"
google-cloud-logging = "^3.8.0"
pydantic = "^2.8.2"
pydantic-settings = "^2.5.2"
@@ -306,33 +307,33 @@ url = "../autogpt_libs"
[[package]]
name = "black"
-version = "24.8.0"
+version = "24.10.0"
description = "The uncompromising code formatter."
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"},
- {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"},
- {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"},
- {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"},
- {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"},
- {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"},
- {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"},
- {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"},
- {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"},
- {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"},
- {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"},
- {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"},
- {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"},
- {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"},
- {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"},
- {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"},
- {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"},
- {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"},
- {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"},
- {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"},
- {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"},
- {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"},
+ {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"},
+ {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"},
+ {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"},
+ {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"},
+ {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"},
+ {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"},
+ {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"},
+ {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"},
+ {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"},
+ {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"},
+ {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"},
+ {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"},
+ {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"},
+ {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"},
+ {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"},
+ {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"},
+ {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"},
+ {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"},
+ {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"},
+ {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"},
+ {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"},
+ {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"},
]
[package.dependencies]
@@ -346,7 +347,7 @@ typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
[package.extras]
colorama = ["colorama (>=0.4.3)"]
-d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
+d = ["aiohttp (>=3.10)"]
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
uvloop = ["uvloop (>=0.15.2)"]
@@ -636,18 +637,18 @@ sgmllib3k = "*"
[[package]]
name = "filelock"
-version = "3.16.0"
+version = "3.16.1"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.8"
files = [
- {file = "filelock-3.16.0-py3-none-any.whl", hash = "sha256:f6ed4c963184f4c84dd5557ce8fece759a3724b37b80c6c4f20a2f63a4dc6609"},
- {file = "filelock-3.16.0.tar.gz", hash = "sha256:81de9eb8453c769b63369f87f11131a7ab04e367f8d97ad39dc230daa07e3bec"},
+ {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"},
+ {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"},
]
[package.extras]
-docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"]
-testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.1.1)", "pytest (>=8.3.2)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.3)"]
+docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"]
typing = ["typing-extensions (>=4.12.2)"]
[[package]]
@@ -793,13 +794,13 @@ tqdm = ["tqdm"]
[[package]]
name = "google-api-core"
-version = "2.19.2"
+version = "2.20.0"
description = "Google API client core library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "google_api_core-2.19.2-py3-none-any.whl", hash = "sha256:53ec0258f2837dd53bbd3d3df50f5359281b3cc13f800c941dd15a9b5a415af4"},
- {file = "google_api_core-2.19.2.tar.gz", hash = "sha256:ca07de7e8aa1c98a8bfca9321890ad2340ef7f2eb136e558cee68f24b94b0a8f"},
+ {file = "google_api_core-2.20.0-py3-none-any.whl", hash = "sha256:ef0591ef03c30bb83f79b3d0575c3f31219001fc9c5cf37024d08310aeffed8a"},
+ {file = "google_api_core-2.20.0.tar.gz", hash = "sha256:f74dff1889ba291a4b76c5079df0711810e2d9da81abfdc99957bc961c1eb28f"},
]
[package.dependencies]
@@ -824,13 +825,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
[[package]]
name = "google-api-python-client"
-version = "2.145.0"
+version = "2.147.0"
description = "Google API Client Library for Python"
optional = false
python-versions = ">=3.7"
files = [
- {file = "google_api_python_client-2.145.0-py2.py3-none-any.whl", hash = "sha256:d74da1358f3f2d63daf3c6f26bd96d89652051183bc87cf10a56ceb2a70beb50"},
- {file = "google_api_python_client-2.145.0.tar.gz", hash = "sha256:8b84dde11aaccadc127e4846f5cd932331d804ea324e353131595e3f25376e97"},
+ {file = "google_api_python_client-2.147.0-py2.py3-none-any.whl", hash = "sha256:c6ecfa193c695baa41e84562d8f8f244fcd164419eca3fc9fd7565646668f9b2"},
+ {file = "google_api_python_client-2.147.0.tar.gz", hash = "sha256:e864c2cf61d34c00f05278b8bdb72b93b6fa34f0de9ead51d20435f3b65f91be"},
]
[package.dependencies]
@@ -842,13 +843,13 @@ uritemplate = ">=3.0.1,<5"
[[package]]
name = "google-auth"
-version = "2.34.0"
+version = "2.35.0"
description = "Google Authentication Library"
optional = false
python-versions = ">=3.7"
files = [
- {file = "google_auth-2.34.0-py2.py3-none-any.whl", hash = "sha256:72fd4733b80b6d777dcde515628a9eb4a577339437012874ea286bca7261ee65"},
- {file = "google_auth-2.34.0.tar.gz", hash = "sha256:8eb87396435c19b20d32abd2f984e31c191a15284af72eb922f10e5bde9c04cc"},
+ {file = "google_auth-2.35.0-py2.py3-none-any.whl", hash = "sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f"},
+ {file = "google_auth-2.35.0.tar.gz", hash = "sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a"},
]
[package.dependencies]
@@ -989,15 +990,28 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4
[package.extras]
grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"]
+[[package]]
+name = "googlemaps"
+version = "4.10.0"
+description = "Python client library for Google Maps Platform"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "googlemaps-4.10.0.tar.gz", hash = "sha256:3055fcbb1aa262a9159b589b5e6af762b10e80634ae11c59495bd44867e47d88"},
+]
+
+[package.dependencies]
+requests = ">=2.20.0,<3.0"
+
[[package]]
name = "gotrue"
-version = "2.8.1"
+version = "2.9.0"
description = "Python Client Library for Supabase Auth"
optional = false
python-versions = "<4.0,>=3.8"
files = [
- {file = "gotrue-2.8.1-py3-none-any.whl", hash = "sha256:97dff077d71cca629f046c35ba34fae132b69c55fe271651766ddcf6d8132468"},
- {file = "gotrue-2.8.1.tar.gz", hash = "sha256:644d0096c4c390f7e36d9cb05271a7091c01e7dc6d506eb117b8fe8fc48eb8d9"},
+ {file = "gotrue-2.9.0-py3-none-any.whl", hash = "sha256:9a6448479329771752cb93be65bc95f06f17d9262e814a95d03b218cf5dce87a"},
+ {file = "gotrue-2.9.0.tar.gz", hash = "sha256:c50e75bd01b82a388eed6a921a1c373a7157fd405df2221a8532193a39df4159"},
]
[package.dependencies]
@@ -1041,76 +1055,85 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4
[[package]]
name = "grpcio"
-version = "1.66.1"
+version = "1.66.2"
description = "HTTP/2-based RPC framework"
optional = false
python-versions = ">=3.8"
files = [
- {file = "grpcio-1.66.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:4877ba180591acdf127afe21ec1c7ff8a5ecf0fe2600f0d3c50e8c4a1cbc6492"},
- {file = "grpcio-1.66.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3750c5a00bd644c75f4507f77a804d0189d97a107eb1481945a0cf3af3e7a5ac"},
- {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a013c5fbb12bfb5f927444b477a26f1080755a931d5d362e6a9a720ca7dbae60"},
- {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1b24c23d51a1e8790b25514157d43f0a4dce1ac12b3f0b8e9f66a5e2c4c132f"},
- {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffb8ea674d68de4cac6f57d2498fef477cef582f1fa849e9f844863af50083"},
- {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:307b1d538140f19ccbd3aed7a93d8f71103c5d525f3c96f8616111614b14bf2a"},
- {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1c17ebcec157cfb8dd445890a03e20caf6209a5bd4ac5b040ae9dbc59eef091d"},
- {file = "grpcio-1.66.1-cp310-cp310-win32.whl", hash = "sha256:ef82d361ed5849d34cf09105d00b94b6728d289d6b9235513cb2fcc79f7c432c"},
- {file = "grpcio-1.66.1-cp310-cp310-win_amd64.whl", hash = "sha256:292a846b92cdcd40ecca46e694997dd6b9be6c4c01a94a0dfb3fcb75d20da858"},
- {file = "grpcio-1.66.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:c30aeceeaff11cd5ddbc348f37c58bcb96da8d5aa93fed78ab329de5f37a0d7a"},
- {file = "grpcio-1.66.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8a1e224ce6f740dbb6b24c58f885422deebd7eb724aff0671a847f8951857c26"},
- {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a66fe4dc35d2330c185cfbb42959f57ad36f257e0cc4557d11d9f0a3f14311df"},
- {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3ba04659e4fce609de2658fe4dbf7d6ed21987a94460f5f92df7579fd5d0e22"},
- {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4573608e23f7e091acfbe3e84ac2045680b69751d8d67685ffa193a4429fedb1"},
- {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7e06aa1f764ec8265b19d8f00140b8c4b6ca179a6dc67aa9413867c47e1fb04e"},
- {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3885f037eb11f1cacc41f207b705f38a44b69478086f40608959bf5ad85826dd"},
- {file = "grpcio-1.66.1-cp311-cp311-win32.whl", hash = "sha256:97ae7edd3f3f91480e48ede5d3e7d431ad6005bfdbd65c1b56913799ec79e791"},
- {file = "grpcio-1.66.1-cp311-cp311-win_amd64.whl", hash = "sha256:cfd349de4158d797db2bd82d2020554a121674e98fbe6b15328456b3bf2495bb"},
- {file = "grpcio-1.66.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:a92c4f58c01c77205df6ff999faa008540475c39b835277fb8883b11cada127a"},
- {file = "grpcio-1.66.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fdb14bad0835914f325349ed34a51940bc2ad965142eb3090081593c6e347be9"},
- {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f03a5884c56256e08fd9e262e11b5cfacf1af96e2ce78dc095d2c41ccae2c80d"},
- {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ca2559692d8e7e245d456877a85ee41525f3ed425aa97eb7a70fc9a79df91a0"},
- {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ca1be089fb4446490dd1135828bd42a7c7f8421e74fa581611f7afdf7ab761"},
- {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d639c939ad7c440c7b2819a28d559179a4508783f7e5b991166f8d7a34b52815"},
- {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b9feb4e5ec8dc2d15709f4d5fc367794d69277f5d680baf1910fc9915c633524"},
- {file = "grpcio-1.66.1-cp312-cp312-win32.whl", hash = "sha256:7101db1bd4cd9b880294dec41a93fcdce465bdbb602cd8dc5bd2d6362b618759"},
- {file = "grpcio-1.66.1-cp312-cp312-win_amd64.whl", hash = "sha256:b0aa03d240b5539648d996cc60438f128c7f46050989e35b25f5c18286c86734"},
- {file = "grpcio-1.66.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:ecfe735e7a59e5a98208447293ff8580e9db1e890e232b8b292dc8bd15afc0d2"},
- {file = "grpcio-1.66.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4825a3aa5648010842e1c9d35a082187746aa0cdbf1b7a2a930595a94fb10fce"},
- {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f517fd7259fe823ef3bd21e508b653d5492e706e9f0ef82c16ce3347a8a5620c"},
- {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1fe60d0772831d96d263b53d83fb9a3d050a94b0e94b6d004a5ad111faa5b5b"},
- {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31a049daa428f928f21090403e5d18ea02670e3d5d172581670be006100db9ef"},
- {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f914386e52cbdeb5d2a7ce3bf1fdfacbe9d818dd81b6099a05b741aaf3848bb"},
- {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bff2096bdba686019fb32d2dde45b95981f0d1490e054400f70fc9a8af34b49d"},
- {file = "grpcio-1.66.1-cp38-cp38-win32.whl", hash = "sha256:aa8ba945c96e73de29d25331b26f3e416e0c0f621e984a3ebdb2d0d0b596a3b3"},
- {file = "grpcio-1.66.1-cp38-cp38-win_amd64.whl", hash = "sha256:161d5c535c2bdf61b95080e7f0f017a1dfcb812bf54093e71e5562b16225b4ce"},
- {file = "grpcio-1.66.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:d0cd7050397b3609ea51727b1811e663ffda8bda39c6a5bb69525ef12414b503"},
- {file = "grpcio-1.66.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0e6c9b42ded5d02b6b1fea3a25f036a2236eeb75d0579bfd43c0018c88bf0a3e"},
- {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c9f80f9fad93a8cf71c7f161778ba47fd730d13a343a46258065c4deb4b550c0"},
- {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dd67ed9da78e5121efc5c510f0122a972216808d6de70953a740560c572eb44"},
- {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48b0d92d45ce3be2084b92fb5bae2f64c208fea8ceed7fccf6a7b524d3c4942e"},
- {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4d813316d1a752be6f5c4360c49f55b06d4fe212d7df03253dfdae90c8a402bb"},
- {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c9bebc6627873ec27a70fc800f6083a13c70b23a5564788754b9ee52c5aef6c"},
- {file = "grpcio-1.66.1-cp39-cp39-win32.whl", hash = "sha256:30a1c2cf9390c894c90bbc70147f2372130ad189cffef161f0432d0157973f45"},
- {file = "grpcio-1.66.1-cp39-cp39-win_amd64.whl", hash = "sha256:17663598aadbedc3cacd7bbde432f541c8e07d2496564e22b214b22c7523dac8"},
- {file = "grpcio-1.66.1.tar.gz", hash = "sha256:35334f9c9745add3e357e3372756fd32d925bd52c41da97f4dfdafbde0bf0ee2"},
+ {file = "grpcio-1.66.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:fe96281713168a3270878255983d2cb1a97e034325c8c2c25169a69289d3ecfa"},
+ {file = "grpcio-1.66.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:73fc8f8b9b5c4a03e802b3cd0c18b2b06b410d3c1dcbef989fdeb943bd44aff7"},
+ {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:03b0b307ba26fae695e067b94cbb014e27390f8bc5ac7a3a39b7723fed085604"},
+ {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d69ce1f324dc2d71e40c9261d3fdbe7d4c9d60f332069ff9b2a4d8a257c7b2b"},
+ {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05bc2ceadc2529ab0b227b1310d249d95d9001cd106aa4d31e8871ad3c428d73"},
+ {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ac475e8da31484efa25abb774674d837b343afb78bb3bcdef10f81a93e3d6bf"},
+ {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0be4e0490c28da5377283861bed2941d1d20ec017ca397a5df4394d1c31a9b50"},
+ {file = "grpcio-1.66.2-cp310-cp310-win32.whl", hash = "sha256:4e504572433f4e72b12394977679161d495c4c9581ba34a88d843eaf0f2fbd39"},
+ {file = "grpcio-1.66.2-cp310-cp310-win_amd64.whl", hash = "sha256:2018b053aa15782db2541ca01a7edb56a0bf18c77efed975392583725974b249"},
+ {file = "grpcio-1.66.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:2335c58560a9e92ac58ff2bc5649952f9b37d0735608242973c7a8b94a6437d8"},
+ {file = "grpcio-1.66.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45a3d462826f4868b442a6b8fdbe8b87b45eb4f5b5308168c156b21eca43f61c"},
+ {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a9539f01cb04950fd4b5ab458e64a15f84c2acc273670072abe49a3f29bbad54"},
+ {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce89f5876662f146d4c1f695dda29d4433a5d01c8681fbd2539afff535da14d4"},
+ {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25a14af966438cddf498b2e338f88d1c9706f3493b1d73b93f695c99c5f0e2a"},
+ {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6001e575b8bbd89eee11960bb640b6da6ae110cf08113a075f1e2051cc596cae"},
+ {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4ea1d062c9230278793820146c95d038dc0f468cbdd172eec3363e42ff1c7d01"},
+ {file = "grpcio-1.66.2-cp311-cp311-win32.whl", hash = "sha256:38b68498ff579a3b1ee8f93a05eb48dc2595795f2f62716e797dc24774c1aaa8"},
+ {file = "grpcio-1.66.2-cp311-cp311-win_amd64.whl", hash = "sha256:6851de821249340bdb100df5eacfecfc4e6075fa85c6df7ee0eb213170ec8e5d"},
+ {file = "grpcio-1.66.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:802d84fd3d50614170649853d121baaaa305de7b65b3e01759247e768d691ddf"},
+ {file = "grpcio-1.66.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:80fd702ba7e432994df208f27514280b4b5c6843e12a48759c9255679ad38db8"},
+ {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:12fda97ffae55e6526825daf25ad0fa37483685952b5d0f910d6405c87e3adb6"},
+ {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:950da58d7d80abd0ea68757769c9db0a95b31163e53e5bb60438d263f4bed7b7"},
+ {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e636ce23273683b00410f1971d209bf3689238cf5538d960adc3cdfe80dd0dbd"},
+ {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a917d26e0fe980b0ac7bfcc1a3c4ad6a9a4612c911d33efb55ed7833c749b0ee"},
+ {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49f0ca7ae850f59f828a723a9064cadbed90f1ece179d375966546499b8a2c9c"},
+ {file = "grpcio-1.66.2-cp312-cp312-win32.whl", hash = "sha256:31fd163105464797a72d901a06472860845ac157389e10f12631025b3e4d0453"},
+ {file = "grpcio-1.66.2-cp312-cp312-win_amd64.whl", hash = "sha256:ff1f7882e56c40b0d33c4922c15dfa30612f05fb785074a012f7cda74d1c3679"},
+ {file = "grpcio-1.66.2-cp313-cp313-linux_armv7l.whl", hash = "sha256:3b00efc473b20d8bf83e0e1ae661b98951ca56111feb9b9611df8efc4fe5d55d"},
+ {file = "grpcio-1.66.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1caa38fb22a8578ab8393da99d4b8641e3a80abc8fd52646f1ecc92bcb8dee34"},
+ {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:c408f5ef75cfffa113cacd8b0c0e3611cbfd47701ca3cdc090594109b9fcbaed"},
+ {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c806852deaedee9ce8280fe98955c9103f62912a5b2d5ee7e3eaa284a6d8d8e7"},
+ {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f145cc21836c332c67baa6fc81099d1d27e266401565bf481948010d6ea32d46"},
+ {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:73e3b425c1e155730273f73e419de3074aa5c5e936771ee0e4af0814631fb30a"},
+ {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:9c509a4f78114cbc5f0740eb3d7a74985fd2eff022971bc9bc31f8bc93e66a3b"},
+ {file = "grpcio-1.66.2-cp313-cp313-win32.whl", hash = "sha256:20657d6b8cfed7db5e11b62ff7dfe2e12064ea78e93f1434d61888834bc86d75"},
+ {file = "grpcio-1.66.2-cp313-cp313-win_amd64.whl", hash = "sha256:fb70487c95786e345af5e854ffec8cb8cc781bcc5df7930c4fbb7feaa72e1cdf"},
+ {file = "grpcio-1.66.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:a18e20d8321c6400185b4263e27982488cb5cdd62da69147087a76a24ef4e7e3"},
+ {file = "grpcio-1.66.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:02697eb4a5cbe5a9639f57323b4c37bcb3ab2d48cec5da3dc2f13334d72790dd"},
+ {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:99a641995a6bc4287a6315989ee591ff58507aa1cbe4c2e70d88411c4dcc0839"},
+ {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ed71e81782966ffead60268bbda31ea3f725ebf8aa73634d5dda44f2cf3fb9c"},
+ {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbd27c24a4cc5e195a7f56cfd9312e366d5d61b86e36d46bbe538457ea6eb8dd"},
+ {file = "grpcio-1.66.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d9a9724a156c8ec6a379869b23ba3323b7ea3600851c91489b871e375f710bc8"},
+ {file = "grpcio-1.66.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d8d4732cc5052e92cea2f78b233c2e2a52998ac40cd651f40e398893ad0d06ec"},
+ {file = "grpcio-1.66.2-cp38-cp38-win32.whl", hash = "sha256:7b2c86457145ce14c38e5bf6bdc19ef88e66c5fee2c3d83285c5aef026ba93b3"},
+ {file = "grpcio-1.66.2-cp38-cp38-win_amd64.whl", hash = "sha256:e88264caad6d8d00e7913996030bac8ad5f26b7411495848cc218bd3a9040b6c"},
+ {file = "grpcio-1.66.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:c400ba5675b67025c8a9f48aa846f12a39cf0c44df5cd060e23fda5b30e9359d"},
+ {file = "grpcio-1.66.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:66a0cd8ba6512b401d7ed46bb03f4ee455839957f28b8d61e7708056a806ba6a"},
+ {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:06de8ec0bd71be123eec15b0e0d457474931c2c407869b6c349bd9bed4adbac3"},
+ {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb57870449dfcfac428afbb5a877829fcb0d6db9d9baa1148705739e9083880e"},
+ {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b672abf90a964bfde2d0ecbce30f2329a47498ba75ce6f4da35a2f4532b7acbc"},
+ {file = "grpcio-1.66.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ad2efdbe90c73b0434cbe64ed372e12414ad03c06262279b104a029d1889d13e"},
+ {file = "grpcio-1.66.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c3a99c519f4638e700e9e3f83952e27e2ea10873eecd7935823dab0c1c9250e"},
+ {file = "grpcio-1.66.2-cp39-cp39-win32.whl", hash = "sha256:78fa51ebc2d9242c0fc5db0feecc57a9943303b46664ad89921f5079e2e4ada7"},
+ {file = "grpcio-1.66.2-cp39-cp39-win_amd64.whl", hash = "sha256:728bdf36a186e7f51da73be7f8d09457a03061be848718d0edf000e709418987"},
+ {file = "grpcio-1.66.2.tar.gz", hash = "sha256:563588c587b75c34b928bc428548e5b00ea38c46972181a4d8b75ba7e3f24231"},
]
[package.extras]
-protobuf = ["grpcio-tools (>=1.66.1)"]
+protobuf = ["grpcio-tools (>=1.66.2)"]
[[package]]
name = "grpcio-status"
-version = "1.66.1"
+version = "1.66.2"
description = "Status proto mapping for gRPC"
optional = false
python-versions = ">=3.8"
files = [
- {file = "grpcio_status-1.66.1-py3-none-any.whl", hash = "sha256:cf9ed0b4a83adbe9297211c95cb5488b0cd065707e812145b842c85c4782ff02"},
- {file = "grpcio_status-1.66.1.tar.gz", hash = "sha256:b3f7d34ccc46d83fea5261eea3786174459f763c31f6e34f1d24eba6d515d024"},
+ {file = "grpcio_status-1.66.2-py3-none-any.whl", hash = "sha256:e5fe189f6897d12aa9cd74408a17ca41e44fad30871cf84f5cbd17bd713d2455"},
+ {file = "grpcio_status-1.66.2.tar.gz", hash = "sha256:fb55cbb5c2e67062f7a4d5c99e489d074fb57e98678d5c3c6692a2d74d89e9ae"},
]
[package.dependencies]
googleapis-common-protos = ">=1.5.5"
-grpcio = ">=1.66.1"
+grpcio = ">=1.66.2"
protobuf = ">=5.26.1,<6.0dev"
[[package]]
@@ -1261,13 +1284,13 @@ zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "huggingface-hub"
-version = "0.24.6"
+version = "0.25.1"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "huggingface_hub-0.24.6-py3-none-any.whl", hash = "sha256:a990f3232aa985fe749bc9474060cbad75e8b2f115f6665a9fda5b9c97818970"},
- {file = "huggingface_hub-0.24.6.tar.gz", hash = "sha256:cc2579e761d070713eaa9c323e3debe39d5b464ae3a7261c39a9195b27bb8000"},
+ {file = "huggingface_hub-0.25.1-py3-none-any.whl", hash = "sha256:a5158ded931b3188f54ea9028097312cb0acd50bffaaa2612014c3c526b44972"},
+ {file = "huggingface_hub-0.25.1.tar.gz", hash = "sha256:9ff7cb327343211fbd06e2b149b8f362fd1e389454f3f14c6db75a4999ee20ff"},
]
[package.dependencies]
@@ -1306,15 +1329,18 @@ files = [
[[package]]
name = "idna"
-version = "3.8"
+version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
files = [
- {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"},
- {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"},
+ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
+ {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]
+[package.extras]
+all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
+
[[package]]
name = "importlib-metadata"
version = "8.4.0"
@@ -1730,13 +1756,13 @@ httpx = ">=0.27.0,<0.28.0"
[[package]]
name = "openai"
-version = "1.44.1"
+version = "1.50.2"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.7.1"
files = [
- {file = "openai-1.44.1-py3-none-any.whl", hash = "sha256:07e2c2758d1c94151c740b14dab638ba0d04bcb41a2e397045c90e7661cdf741"},
- {file = "openai-1.44.1.tar.gz", hash = "sha256:e0ffdab601118329ea7529e684b606a72c6c9d4f05be9ee1116255fcf5593874"},
+ {file = "openai-1.50.2-py3-none-any.whl", hash = "sha256:822dd2051baa3393d0d5406990611975dd6f533020dc9375a34d4fe67e8b75f7"},
+ {file = "openai-1.50.2.tar.gz", hash = "sha256:3987ae027152fc8bea745d60b02c8f4c4a76e1b5c70e73565fa556db6f78c9e6"},
]
[package.dependencies]
@@ -1815,15 +1841,66 @@ files = [
{file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
]
+[[package]]
+name = "pinecone"
+version = "5.3.1"
+description = "Pinecone client and SDK"
+optional = false
+python-versions = "<4.0,>=3.8"
+files = [
+ {file = "pinecone-5.3.1-py3-none-any.whl", hash = "sha256:dd180963d29cd648f2d58becf18b21f150362aef80446dd3a7ed15cbe85bb4c7"},
+ {file = "pinecone-5.3.1.tar.gz", hash = "sha256:a216630331753958f4ebcdc6e6d473402d17152f2194af3e19b3416c73b0dcc4"},
+]
+
+[package.dependencies]
+certifi = ">=2019.11.17"
+pinecone-plugin-inference = ">=1.1.0,<2.0.0"
+pinecone-plugin-interface = ">=0.0.7,<0.0.8"
+python-dateutil = ">=2.5.3"
+tqdm = ">=4.64.1"
+typing-extensions = ">=3.7.4"
+urllib3 = [
+ {version = ">=1.26.5", markers = "python_version >= \"3.12\" and python_version < \"4.0\""},
+ {version = ">=1.26.0", markers = "python_version >= \"3.8\" and python_version < \"3.12\""},
+]
+
+[package.extras]
+grpc = ["googleapis-common-protos (>=1.53.0)", "grpcio (>=1.44.0)", "grpcio (>=1.59.0)", "lz4 (>=3.1.3)", "protobuf (>=4.25,<5.0)", "protoc-gen-openapiv2 (>=0.0.1,<0.0.2)"]
+
+[[package]]
+name = "pinecone-plugin-inference"
+version = "1.1.0"
+description = "Embeddings plugin for Pinecone SDK"
+optional = false
+python-versions = "<4.0,>=3.8"
+files = [
+ {file = "pinecone_plugin_inference-1.1.0-py3-none-any.whl", hash = "sha256:32c61aba21c9a28fdcd0e782204c1ca641aeb3fd6e42764fbf0de8186eb657ec"},
+ {file = "pinecone_plugin_inference-1.1.0.tar.gz", hash = "sha256:283e5ae4590b901bf2179beb56fc3d1b715e63582f37ec7abb0708cf70912d1f"},
+]
+
+[package.dependencies]
+pinecone-plugin-interface = ">=0.0.7,<0.0.8"
+
+[[package]]
+name = "pinecone-plugin-interface"
+version = "0.0.7"
+description = "Plugin interface for the Pinecone python client"
+optional = false
+python-versions = "<4.0,>=3.8"
+files = [
+ {file = "pinecone_plugin_interface-0.0.7-py3-none-any.whl", hash = "sha256:875857ad9c9fc8bbc074dbe780d187a2afd21f5bfe0f3b08601924a61ef1bba8"},
+ {file = "pinecone_plugin_interface-0.0.7.tar.gz", hash = "sha256:b8e6675e41847333aa13923cc44daa3f85676d7157324682dc1640588a982846"},
+]
+
[[package]]
name = "platformdirs"
-version = "4.3.2"
+version = "4.3.6"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
optional = false
python-versions = ">=3.8"
files = [
- {file = "platformdirs-4.3.2-py3-none-any.whl", hash = "sha256:eb1c8582560b34ed4ba105009a4badf7f6f85768b30126f351328507b2beb617"},
- {file = "platformdirs-4.3.2.tar.gz", hash = "sha256:9e5e27a08aa095dd127b9f2e764d74254f482fef22b0970773bfba79d091ab8c"},
+ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
+ {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
]
[package.extras]
@@ -1848,18 +1925,19 @@ testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "poethepoet"
-version = "0.26.1"
+version = "0.29.0"
description = "A task runner that works well with poetry."
optional = false
python-versions = ">=3.8"
files = [
- {file = "poethepoet-0.26.1-py3-none-any.whl", hash = "sha256:aa43b443fec5d17d7e76771cccd484e5285805301721a74f059c483ad3276edd"},
- {file = "poethepoet-0.26.1.tar.gz", hash = "sha256:aaad8541f6072617a60bcff2562d00779b58b353bd0f1847b06d8d0f2b6dc192"},
+ {file = "poethepoet-0.29.0-py3-none-any.whl", hash = "sha256:f8dfe55006dcfb5cf31bcb1904e1262e1c642a4502fee3688cbf1bddfe5c7601"},
+ {file = "poethepoet-0.29.0.tar.gz", hash = "sha256:676842302f2304a86b31ac56398dd672fae8471128d2086896393384dbafc095"},
]
[package.dependencies]
pastel = ">=0.2.1,<0.3.0"
-tomli = ">=1.2.2"
+pyyaml = ">=6.0.2,<7.0.0"
+tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""}
[package.extras]
poetry-plugin = ["poetry (>=1.0,<2.0)"]
@@ -1969,51 +2047,53 @@ testing = ["google-api-core (>=1.31.5)"]
[[package]]
name = "protobuf"
-version = "5.28.1"
+version = "5.28.2"
description = ""
optional = false
python-versions = ">=3.8"
files = [
- {file = "protobuf-5.28.1-cp310-abi3-win32.whl", hash = "sha256:fc063acaf7a3d9ca13146fefb5b42ac94ab943ec6e978f543cd5637da2d57957"},
- {file = "protobuf-5.28.1-cp310-abi3-win_amd64.whl", hash = "sha256:4c7f5cb38c640919791c9f74ea80c5b82314c69a8409ea36f2599617d03989af"},
- {file = "protobuf-5.28.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4304e4fceb823d91699e924a1fdf95cde0e066f3b1c28edb665bda762ecde10f"},
- {file = "protobuf-5.28.1-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:0dfd86d2b5edf03d91ec2a7c15b4e950258150f14f9af5f51c17fa224ee1931f"},
- {file = "protobuf-5.28.1-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:51f09caab818707ab91cf09cc5c156026599cf05a4520779ccbf53c1b352fb25"},
- {file = "protobuf-5.28.1-cp38-cp38-win32.whl", hash = "sha256:1b04bde117a10ff9d906841a89ec326686c48ececeb65690f15b8cabe7149495"},
- {file = "protobuf-5.28.1-cp38-cp38-win_amd64.whl", hash = "sha256:cabfe43044ee319ad6832b2fda332646f9ef1636b0130186a3ae0a52fc264bb4"},
- {file = "protobuf-5.28.1-cp39-cp39-win32.whl", hash = "sha256:4b4b9a0562a35773ff47a3df823177ab71a1f5eb1ff56d8f842b7432ecfd7fd2"},
- {file = "protobuf-5.28.1-cp39-cp39-win_amd64.whl", hash = "sha256:f24e5d70e6af8ee9672ff605d5503491635f63d5db2fffb6472be78ba62efd8f"},
- {file = "protobuf-5.28.1-py3-none-any.whl", hash = "sha256:c529535e5c0effcf417682563719e5d8ac8d2b93de07a56108b4c2d436d7a29a"},
- {file = "protobuf-5.28.1.tar.gz", hash = "sha256:42597e938f83bb7f3e4b35f03aa45208d49ae8d5bcb4bc10b9fc825e0ab5e423"},
+ {file = "protobuf-5.28.2-cp310-abi3-win32.whl", hash = "sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d"},
+ {file = "protobuf-5.28.2-cp310-abi3-win_amd64.whl", hash = "sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132"},
+ {file = "protobuf-5.28.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7"},
+ {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f"},
+ {file = "protobuf-5.28.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f"},
+ {file = "protobuf-5.28.2-cp38-cp38-win32.whl", hash = "sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0"},
+ {file = "protobuf-5.28.2-cp38-cp38-win_amd64.whl", hash = "sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3"},
+ {file = "protobuf-5.28.2-cp39-cp39-win32.whl", hash = "sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36"},
+ {file = "protobuf-5.28.2-cp39-cp39-win_amd64.whl", hash = "sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276"},
+ {file = "protobuf-5.28.2-py3-none-any.whl", hash = "sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece"},
+ {file = "protobuf-5.28.2.tar.gz", hash = "sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0"},
]
[[package]]
name = "psutil"
-version = "5.9.8"
+version = "6.1.0"
description = "Cross-platform lib for process and system monitoring in Python."
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
- {file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"},
- {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"},
- {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7"},
- {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36"},
- {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d"},
- {file = "psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e"},
- {file = "psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631"},
- {file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"},
- {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"},
- {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"},
- {file = "psutil-5.9.8-cp36-cp36m-win32.whl", hash = "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee"},
- {file = "psutil-5.9.8-cp36-cp36m-win_amd64.whl", hash = "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2"},
- {file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"},
- {file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"},
- {file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"},
- {file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"},
+ {file = "psutil-6.1.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0"},
+ {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942"},
+ {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:000d1d1ebd634b4efb383f4034437384e44a6d455260aaee2eca1e9c1b55f047"},
+ {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5cd2bcdc75b452ba2e10f0e8ecc0b57b827dd5d7aaffbc6821b2a9a242823a76"},
+ {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:045f00a43c737f960d273a83973b2511430d61f283a44c96bf13a6e829ba8fdc"},
+ {file = "psutil-6.1.0-cp27-none-win32.whl", hash = "sha256:9118f27452b70bb1d9ab3198c1f626c2499384935aaf55388211ad982611407e"},
+ {file = "psutil-6.1.0-cp27-none-win_amd64.whl", hash = "sha256:a8506f6119cff7015678e2bce904a4da21025cc70ad283a53b099e7620061d85"},
+ {file = "psutil-6.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688"},
+ {file = "psutil-6.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e"},
+ {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38"},
+ {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b"},
+ {file = "psutil-6.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a"},
+ {file = "psutil-6.1.0-cp36-cp36m-win32.whl", hash = "sha256:6d3fbbc8d23fcdcb500d2c9f94e07b1342df8ed71b948a2649b5cb060a7c94ca"},
+ {file = "psutil-6.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1209036fbd0421afde505a4879dee3b2fd7b1e14fee81c0069807adcbbcca747"},
+ {file = "psutil-6.1.0-cp37-abi3-win32.whl", hash = "sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e"},
+ {file = "psutil-6.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be"},
+ {file = "psutil-6.1.0.tar.gz", hash = "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a"},
]
[package.extras]
-test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
+dev = ["black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "wheel"]
+test = ["pytest", "pytest-xdist", "setuptools"]
[[package]]
name = "pyasn1"
@@ -2053,18 +2133,18 @@ files = [
[[package]]
name = "pydantic"
-version = "2.9.1"
+version = "2.9.2"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"},
- {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"},
+ {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
+ {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
-pydantic-core = "2.23.3"
+pydantic-core = "2.23.4"
typing-extensions = [
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
@@ -2076,100 +2156,100 @@ timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
-version = "2.23.3"
+version = "2.23.4"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
- {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"},
- {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"},
- {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"},
- {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"},
- {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"},
- {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"},
- {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"},
- {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"},
- {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"},
- {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"},
- {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"},
- {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"},
- {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"},
- {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"},
- {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"},
- {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"},
- {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"},
- {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"},
- {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"},
- {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"},
- {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"},
- {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"},
- {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"},
- {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"},
- {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"},
- {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"},
- {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"},
- {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"},
- {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"},
- {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"},
- {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"},
- {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"},
- {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"},
- {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"},
- {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"},
- {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"},
- {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"},
- {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"},
- {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"},
- {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"},
- {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"},
- {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"},
- {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"},
- {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"},
- {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
+ {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
+ {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
+ {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
+ {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
+ {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
+ {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
+ {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
+ {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
+ {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
+ {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
+ {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
+ {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
+ {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
+ {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
+ {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
+ {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
+ {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
+ {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
+ {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
+ {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
+ {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
]
[package.dependencies]
@@ -2239,21 +2319,23 @@ diagrams = ["jinja2", "railroad-diagrams"]
[[package]]
name = "pyright"
-version = "1.1.380"
+version = "1.1.386"
description = "Command line wrapper for pyright"
optional = false
python-versions = ">=3.7"
files = [
- {file = "pyright-1.1.380-py3-none-any.whl", hash = "sha256:a6404392053d8848bacc7aebcbd9d318bb46baf1a1a000359305481920f43879"},
- {file = "pyright-1.1.380.tar.gz", hash = "sha256:e6ceb1a5f7e9f03106e0aa1d6fbb4d97735a5e7ffb59f3de6b2db590baf935b2"},
+ {file = "pyright-1.1.386-py3-none-any.whl", hash = "sha256:7071ac495593b2258ccdbbf495f1a5c0e5f27951f6b429bed4e8b296eb5cd21d"},
+ {file = "pyright-1.1.386.tar.gz", hash = "sha256:8e9975e34948ba5f8e07792a9c9d2bdceb2c6c0b61742b068d2229ca2bc4a9d9"},
]
[package.dependencies]
nodeenv = ">=1.6.0"
+typing-extensions = ">=4.1"
[package.extras]
-all = ["twine (>=3.4.1)"]
+all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"]
dev = ["twine (>=3.4.1)"]
+nodejs = ["nodejs-wheel-binaries"]
[[package]]
name = "pyro5"
@@ -2427,38 +2509,38 @@ files = [
[[package]]
name = "realtime"
-version = "2.0.2"
+version = "2.0.5"
description = ""
optional = false
python-versions = "<4.0,>=3.9"
files = [
- {file = "realtime-2.0.2-py3-none-any.whl", hash = "sha256:2634c915bc38807f2013f21e8bcc4d2f79870dfd81460ddb9393883d0489928a"},
- {file = "realtime-2.0.2.tar.gz", hash = "sha256:519da9325b3b8102139d51785013d592f6b2403d81fa21d838a0b0234723ed7d"},
+ {file = "realtime-2.0.5-py3-none-any.whl", hash = "sha256:f9ec2d762794709e37a8e2745c8dfd86eac4870678808f09676c8f2b7bfa6bbc"},
+ {file = "realtime-2.0.5.tar.gz", hash = "sha256:133828fbc2cc2325fb015fe071c6da9fb488819cac96d85ed297045c715b35f5"},
]
[package.dependencies]
-aiohttp = ">=3.10.2,<4.0.0"
+aiohttp = ">=3.10.6,<4.0.0"
python-dateutil = ">=2.8.1,<3.0.0"
typing-extensions = ">=4.12.2,<5.0.0"
-websockets = ">=11,<13"
+websockets = ">=11,<14"
[[package]]
name = "redis"
-version = "5.0.8"
+version = "5.1.0"
description = "Python client for Redis database and key-value store"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "redis-5.0.8-py3-none-any.whl", hash = "sha256:56134ee08ea909106090934adc36f65c9bcbbaecea5b21ba704ba6fb561f8eb4"},
- {file = "redis-5.0.8.tar.gz", hash = "sha256:0c5b10d387568dfe0698c6fad6615750c24170e548ca2deac10c649d463e9870"},
+ {file = "redis-5.1.0-py3-none-any.whl", hash = "sha256:fd4fccba0d7f6aa48c58a78d76ddb4afc698f5da4a2c1d03d916e4fd7ab88cdd"},
+ {file = "redis-5.1.0.tar.gz", hash = "sha256:b756df1e4a3858fcc0ef861f3fc53623a96c41e2b1f5304e09e0fe758d333d40"},
]
[package.dependencies]
async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""}
[package.extras]
-hiredis = ["hiredis (>1.0.0)"]
-ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"]
+hiredis = ["hiredis (>=3.0.0)"]
+ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"]
[[package]]
name = "referencing"
@@ -2475,6 +2557,23 @@ files = [
attrs = ">=22.2.0"
rpds-py = ">=0.7.0"
+[[package]]
+name = "replicate"
+version = "0.34.1"
+description = "Python client for Replicate"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "replicate-0.34.1-py3-none-any.whl", hash = "sha256:beeebbdd83dca46eee960c383dfd8dcc48d7922d9fe9e613f242cc69ed522f2f"},
+ {file = "replicate-0.34.1.tar.gz", hash = "sha256:57cf80c7f4d7f6ae503b1bef400f57c26d494724002d7e9a8750d01394dcfc76"},
+]
+
+[package.dependencies]
+httpx = ">=0.21.0,<1"
+packaging = "*"
+pydantic = ">1.10.7"
+typing-extensions = ">=4.5.0"
+
[[package]]
name = "requests"
version = "2.32.3"
@@ -2642,48 +2741,49 @@ pyasn1 = ">=0.1.3"
[[package]]
name = "ruff"
-version = "0.5.7"
+version = "0.7.1"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
- {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
- {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
- {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"},
- {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"},
- {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"},
- {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"},
- {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
+ {file = "ruff-0.7.1-py3-none-linux_armv6l.whl", hash = "sha256:cb1bc5ed9403daa7da05475d615739cc0212e861b7306f314379d958592aaa89"},
+ {file = "ruff-0.7.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27c1c52a8d199a257ff1e5582d078eab7145129aa02721815ca8fa4f9612dc35"},
+ {file = "ruff-0.7.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:588a34e1ef2ea55b4ddfec26bbe76bc866e92523d8c6cdec5e8aceefeff02d99"},
+ {file = "ruff-0.7.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94fc32f9cdf72dc75c451e5f072758b118ab8100727168a3df58502b43a599ca"},
+ {file = "ruff-0.7.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:985818742b833bffa543a84d1cc11b5e6871de1b4e0ac3060a59a2bae3969250"},
+ {file = "ruff-0.7.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32f1e8a192e261366c702c5fb2ece9f68d26625f198a25c408861c16dc2dea9c"},
+ {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:699085bf05819588551b11751eff33e9ca58b1b86a6843e1b082a7de40da1565"},
+ {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344cc2b0814047dc8c3a8ff2cd1f3d808bb23c6658db830d25147339d9bf9ea7"},
+ {file = "ruff-0.7.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4316bbf69d5a859cc937890c7ac7a6551252b6a01b1d2c97e8fc96e45a7c8b4a"},
+ {file = "ruff-0.7.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d3af9dca4c56043e738a4d6dd1e9444b6d6c10598ac52d146e331eb155a8ad"},
+ {file = "ruff-0.7.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5c121b46abde94a505175524e51891f829414e093cd8326d6e741ecfc0a9112"},
+ {file = "ruff-0.7.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8422104078324ea250886954e48f1373a8fe7de59283d747c3a7eca050b4e378"},
+ {file = "ruff-0.7.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:56aad830af8a9db644e80098fe4984a948e2b6fc2e73891538f43bbe478461b8"},
+ {file = "ruff-0.7.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:658304f02f68d3a83c998ad8bf91f9b4f53e93e5412b8f2388359d55869727fd"},
+ {file = "ruff-0.7.1-py3-none-win32.whl", hash = "sha256:b517a2011333eb7ce2d402652ecaa0ac1a30c114fbbd55c6b8ee466a7f600ee9"},
+ {file = "ruff-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f38c41fcde1728736b4eb2b18850f6d1e3eedd9678c914dede554a70d5241307"},
+ {file = "ruff-0.7.1-py3-none-win_arm64.whl", hash = "sha256:19aa200ec824c0f36d0c9114c8ec0087082021732979a359d6f3c390a6ff2a37"},
+ {file = "ruff-0.7.1.tar.gz", hash = "sha256:9d8a41d4aa2dad1575adb98a82870cf5db5f76b2938cf2206c22c940034a36f4"},
]
[[package]]
name = "sentry-sdk"
-version = "1.45.0"
+version = "2.17.0"
description = "Python client for Sentry (https://sentry.io)"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
files = [
- {file = "sentry-sdk-1.45.0.tar.gz", hash = "sha256:509aa9678c0512344ca886281766c2e538682f8acfa50fd8d405f8c417ad0625"},
- {file = "sentry_sdk-1.45.0-py2.py3-none-any.whl", hash = "sha256:1ce29e30240cc289a027011103a8c83885b15ef2f316a60bcc7c5300afa144f1"},
+ {file = "sentry_sdk-2.17.0-py2.py3-none-any.whl", hash = "sha256:625955884b862cc58748920f9e21efdfb8e0d4f98cca4ab0d3918576d5b606ad"},
+ {file = "sentry_sdk-2.17.0.tar.gz", hash = "sha256:dd0a05352b78ffeacced73a94e86f38b32e2eae15fff5f30ca5abb568a72eacf"},
]
[package.dependencies]
certifi = "*"
-urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
+urllib3 = ">=1.26.11"
[package.extras]
aiohttp = ["aiohttp (>=3.5)"]
+anthropic = ["anthropic (>=0.16)"]
arq = ["arq (>=0.23)"]
asyncpg = ["asyncpg (>=0.23)"]
beam = ["apache-beam (>=2.12)"]
@@ -2696,13 +2796,17 @@ django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
-grpcio = ["grpcio (>=1.21.1)"]
+grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"]
+http2 = ["httpcore[http2] (==1.*)"]
httpx = ["httpx (>=0.16.0)"]
huey = ["huey (>=2)"]
+huggingface-hub = ["huggingface-hub (>=0.22)"]
+langchain = ["langchain (>=0.0.210)"]
+litestar = ["litestar (>=2.0.0)"]
loguru = ["loguru (>=0.5)"]
openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"]
opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
-opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"]
+opentelemetry-experimental = ["opentelemetry-distro"]
pure-eval = ["asttokens", "executing", "pure-eval"]
pymongo = ["pymongo (>=3.1)"]
pyspark = ["pyspark (>=2.4.4)"]
@@ -2712,7 +2816,7 @@ sanic = ["sanic (>=0.8)"]
sqlalchemy = ["sqlalchemy (>=1.2)"]
starlette = ["starlette (>=0.19.1)"]
starlite = ["starlite (>=1.48)"]
-tornado = ["tornado (>=5)"]
+tornado = ["tornado (>=6)"]
[[package]]
name = "serpent"
@@ -2841,13 +2945,13 @@ httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
[[package]]
name = "tenacity"
-version = "8.5.0"
+version = "9.0.0"
description = "Retry code until it succeeds"
optional = false
python-versions = ">=3.8"
files = [
- {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"},
- {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"},
+ {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"},
+ {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"},
]
[package.extras]
@@ -3026,13 +3130,13 @@ files = [
[[package]]
name = "tzdata"
-version = "2024.1"
+version = "2024.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
files = [
- {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"},
- {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"},
+ {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"},
+ {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"},
]
[[package]]
@@ -3084,13 +3188,13 @@ files = [
[[package]]
name = "urllib3"
-version = "2.2.2"
+version = "2.2.3"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.8"
files = [
- {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"},
- {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"},
+ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
+ {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
]
[package.extras]
@@ -3171,41 +3275,41 @@ test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)"
[[package]]
name = "watchdog"
-version = "5.0.2"
+version = "5.0.3"
description = "Filesystem events monitoring"
optional = false
python-versions = ">=3.9"
files = [
- {file = "watchdog-5.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d961f4123bb3c447d9fcdcb67e1530c366f10ab3a0c7d1c0c9943050936d4877"},
- {file = "watchdog-5.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72990192cb63872c47d5e5fefe230a401b87fd59d257ee577d61c9e5564c62e5"},
- {file = "watchdog-5.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6bec703ad90b35a848e05e1b40bf0050da7ca28ead7ac4be724ae5ac2653a1a0"},
- {file = "watchdog-5.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:dae7a1879918f6544201d33666909b040a46421054a50e0f773e0d870ed7438d"},
- {file = "watchdog-5.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c4a440f725f3b99133de610bfec93d570b13826f89616377715b9cd60424db6e"},
- {file = "watchdog-5.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8b2918c19e0d48f5f20df458c84692e2a054f02d9df25e6c3c930063eca64c1"},
- {file = "watchdog-5.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:aa9cd6e24126d4afb3752a3e70fce39f92d0e1a58a236ddf6ee823ff7dba28ee"},
- {file = "watchdog-5.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f627c5bf5759fdd90195b0c0431f99cff4867d212a67b384442c51136a098ed7"},
- {file = "watchdog-5.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d7594a6d32cda2b49df3fd9abf9b37c8d2f3eab5df45c24056b4a671ac661619"},
- {file = "watchdog-5.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba32efcccfe2c58f4d01115440d1672b4eb26cdd6fc5b5818f1fb41f7c3e1889"},
- {file = "watchdog-5.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:963f7c4c91e3f51c998eeff1b3fb24a52a8a34da4f956e470f4b068bb47b78ee"},
- {file = "watchdog-5.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8c47150aa12f775e22efff1eee9f0f6beee542a7aa1a985c271b1997d340184f"},
- {file = "watchdog-5.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:14dd4ed023d79d1f670aa659f449bcd2733c33a35c8ffd88689d9d243885198b"},
- {file = "watchdog-5.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b84bff0391ad4abe25c2740c7aec0e3de316fdf7764007f41e248422a7760a7f"},
- {file = "watchdog-5.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e8d5ff39f0a9968952cce548e8e08f849141a4fcc1290b1c17c032ba697b9d7"},
- {file = "watchdog-5.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fb223456db6e5f7bd9bbd5cd969f05aae82ae21acc00643b60d81c770abd402b"},
- {file = "watchdog-5.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9814adb768c23727a27792c77812cf4e2fd9853cd280eafa2bcfa62a99e8bd6e"},
- {file = "watchdog-5.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:901ee48c23f70193d1a7bc2d9ee297df66081dd5f46f0ca011be4f70dec80dab"},
- {file = "watchdog-5.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:638bcca3d5b1885c6ec47be67bf712b00a9ab3d4b22ec0881f4889ad870bc7e8"},
- {file = "watchdog-5.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5597c051587f8757798216f2485e85eac583c3b343e9aa09127a3a6f82c65ee8"},
- {file = "watchdog-5.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:53ed1bf71fcb8475dd0ef4912ab139c294c87b903724b6f4a8bd98e026862e6d"},
- {file = "watchdog-5.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:29e4a2607bd407d9552c502d38b45a05ec26a8e40cc7e94db9bb48f861fa5abc"},
- {file = "watchdog-5.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:b6dc8f1d770a8280997e4beae7b9a75a33b268c59e033e72c8a10990097e5fde"},
- {file = "watchdog-5.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:d2ab34adc9bf1489452965cdb16a924e97d4452fcf88a50b21859068b50b5c3b"},
- {file = "watchdog-5.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:7d1aa7e4bb0f0c65a1a91ba37c10e19dabf7eaaa282c5787e51371f090748f4b"},
- {file = "watchdog-5.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:726eef8f8c634ac6584f86c9c53353a010d9f311f6c15a034f3800a7a891d941"},
- {file = "watchdog-5.0.2-py3-none-win32.whl", hash = "sha256:bda40c57115684d0216556671875e008279dea2dc00fcd3dde126ac8e0d7a2fb"},
- {file = "watchdog-5.0.2-py3-none-win_amd64.whl", hash = "sha256:d010be060c996db725fbce7e3ef14687cdcc76f4ca0e4339a68cc4532c382a73"},
- {file = "watchdog-5.0.2-py3-none-win_ia64.whl", hash = "sha256:3960136b2b619510569b90f0cd96408591d6c251a75c97690f4553ca88889769"},
- {file = "watchdog-5.0.2.tar.gz", hash = "sha256:dcebf7e475001d2cdeb020be630dc5b687e9acdd60d16fea6bb4508e7b94cf76"},
+ {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:85527b882f3facda0579bce9d743ff7f10c3e1e0db0a0d0e28170a7d0e5ce2ea"},
+ {file = "watchdog-5.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:53adf73dcdc0ef04f7735066b4a57a4cd3e49ef135daae41d77395f0b5b692cb"},
+ {file = "watchdog-5.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e25adddab85f674acac303cf1f5835951345a56c5f7f582987d266679979c75b"},
+ {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f01f4a3565a387080dc49bdd1fefe4ecc77f894991b88ef927edbfa45eb10818"},
+ {file = "watchdog-5.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:91b522adc25614cdeaf91f7897800b82c13b4b8ac68a42ca959f992f6990c490"},
+ {file = "watchdog-5.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d52db5beb5e476e6853da2e2d24dbbbed6797b449c8bf7ea118a4ee0d2c9040e"},
+ {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:94d11b07c64f63f49876e0ab8042ae034674c8653bfcdaa8c4b32e71cfff87e8"},
+ {file = "watchdog-5.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:349c9488e1d85d0a58e8cb14222d2c51cbc801ce11ac3936ab4c3af986536926"},
+ {file = "watchdog-5.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:53a3f10b62c2d569e260f96e8d966463dec1a50fa4f1b22aec69e3f91025060e"},
+ {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:950f531ec6e03696a2414b6308f5c6ff9dab7821a768c9d5788b1314e9a46ca7"},
+ {file = "watchdog-5.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae6deb336cba5d71476caa029ceb6e88047fc1dc74b62b7c4012639c0b563906"},
+ {file = "watchdog-5.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1021223c08ba8d2d38d71ec1704496471ffd7be42cfb26b87cd5059323a389a1"},
+ {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:752fb40efc7cc8d88ebc332b8f4bcbe2b5cc7e881bccfeb8e25054c00c994ee3"},
+ {file = "watchdog-5.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a2e8f3f955d68471fa37b0e3add18500790d129cc7efe89971b8a4cc6fdeb0b2"},
+ {file = "watchdog-5.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b8ca4d854adcf480bdfd80f46fdd6fb49f91dd020ae11c89b3a79e19454ec627"},
+ {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:90a67d7857adb1d985aca232cc9905dd5bc4803ed85cfcdcfcf707e52049eda7"},
+ {file = "watchdog-5.0.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:720ef9d3a4f9ca575a780af283c8fd3a0674b307651c1976714745090da5a9e8"},
+ {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:223160bb359281bb8e31c8f1068bf71a6b16a8ad3d9524ca6f523ac666bb6a1e"},
+ {file = "watchdog-5.0.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:560135542c91eaa74247a2e8430cf83c4342b29e8ad4f520ae14f0c8a19cfb5b"},
+ {file = "watchdog-5.0.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:dd021efa85970bd4824acacbb922066159d0f9e546389a4743d56919b6758b91"},
+ {file = "watchdog-5.0.3-py3-none-manylinux2014_armv7l.whl", hash = "sha256:78864cc8f23dbee55be34cc1494632a7ba30263951b5b2e8fc8286b95845f82c"},
+ {file = "watchdog-5.0.3-py3-none-manylinux2014_i686.whl", hash = "sha256:1e9679245e3ea6498494b3028b90c7b25dbb2abe65c7d07423ecfc2d6218ff7c"},
+ {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64.whl", hash = "sha256:9413384f26b5d050b6978e6fcd0c1e7f0539be7a4f1a885061473c5deaa57221"},
+ {file = "watchdog-5.0.3-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:294b7a598974b8e2c6123d19ef15de9abcd282b0fbbdbc4d23dfa812959a9e05"},
+ {file = "watchdog-5.0.3-py3-none-manylinux2014_s390x.whl", hash = "sha256:26dd201857d702bdf9d78c273cafcab5871dd29343748524695cecffa44a8d97"},
+ {file = "watchdog-5.0.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:0f9332243355643d567697c3e3fa07330a1d1abf981611654a1f2bf2175612b7"},
+ {file = "watchdog-5.0.3-py3-none-win32.whl", hash = "sha256:c66f80ee5b602a9c7ab66e3c9f36026590a0902db3aea414d59a2f55188c1f49"},
+ {file = "watchdog-5.0.3-py3-none-win_amd64.whl", hash = "sha256:f00b4cf737f568be9665563347a910f8bdc76f88c2970121c86243c8cfdf90e9"},
+ {file = "watchdog-5.0.3-py3-none-win_ia64.whl", hash = "sha256:49f4d36cb315c25ea0d946e018c01bb028048023b9e103d3d3943f58e109dd45"},
+ {file = "watchdog-5.0.3.tar.gz", hash = "sha256:108f42a7f0345042a854d4d0ad0834b741d421330d5f575b81cb27b883500176"},
]
[package.extras]
@@ -3324,83 +3428,97 @@ test = ["websockets"]
[[package]]
name = "websockets"
-version = "12.0"
+version = "13.1"
description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
optional = false
python-versions = ">=3.8"
files = [
- {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"},
- {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"},
- {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"},
- {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"},
- {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"},
- {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"},
- {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"},
- {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"},
- {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"},
- {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"},
- {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"},
- {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"},
- {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"},
- {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"},
- {file = "websockets-12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e332c210b14b57904869ca9f9bf4ca32f5427a03eeb625da9b616c85a3a506c"},
- {file = "websockets-12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5693ef74233122f8ebab026817b1b37fe25c411ecfca084b29bc7d6efc548f45"},
- {file = "websockets-12.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e9e7db18b4539a29cc5ad8c8b252738a30e2b13f033c2d6e9d0549b45841c04"},
- {file = "websockets-12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e2df67b8014767d0f785baa98393725739287684b9f8d8a1001eb2839031447"},
- {file = "websockets-12.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bea88d71630c5900690fcb03161ab18f8f244805c59e2e0dc4ffadae0a7ee0ca"},
- {file = "websockets-12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dff6cdf35e31d1315790149fee351f9e52978130cef6c87c4b6c9b3baf78bc53"},
- {file = "websockets-12.0-cp311-cp311-win32.whl", hash = "sha256:3e3aa8c468af01d70332a382350ee95f6986db479ce7af14d5e81ec52aa2b402"},
- {file = "websockets-12.0-cp311-cp311-win_amd64.whl", hash = "sha256:25eb766c8ad27da0f79420b2af4b85d29914ba0edf69f547cc4f06ca6f1d403b"},
- {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"},
- {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"},
- {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"},
- {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"},
- {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"},
- {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"},
- {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"},
- {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"},
- {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"},
- {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"},
- {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"},
- {file = "websockets-12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5f6ffe2c6598f7f7207eef9a1228b6f5c818f9f4d53ee920aacd35cec8110438"},
- {file = "websockets-12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9edf3fc590cc2ec20dc9d7a45108b5bbaf21c0d89f9fd3fd1685e223771dc0b2"},
- {file = "websockets-12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8572132c7be52632201a35f5e08348137f658e5ffd21f51f94572ca6c05ea81d"},
- {file = "websockets-12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604428d1b87edbf02b233e2c207d7d528460fa978f9e391bd8aaf9c8311de137"},
- {file = "websockets-12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a9d160fd080c6285e202327aba140fc9a0d910b09e423afff4ae5cbbf1c7205"},
- {file = "websockets-12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b4aafed34653e465eb77b7c93ef058516cb5acf3eb21e42f33928616172def"},
- {file = "websockets-12.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b2ee7288b85959797970114deae81ab41b731f19ebcd3bd499ae9ca0e3f1d2c8"},
- {file = "websockets-12.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7fa3d25e81bfe6a89718e9791128398a50dec6d57faf23770787ff441d851967"},
- {file = "websockets-12.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a571f035a47212288e3b3519944f6bf4ac7bc7553243e41eac50dd48552b6df7"},
- {file = "websockets-12.0-cp38-cp38-win32.whl", hash = "sha256:3c6cc1360c10c17463aadd29dd3af332d4a1adaa8796f6b0e9f9df1fdb0bad62"},
- {file = "websockets-12.0-cp38-cp38-win_amd64.whl", hash = "sha256:1bf386089178ea69d720f8db6199a0504a406209a0fc23e603b27b300fdd6892"},
- {file = "websockets-12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab3d732ad50a4fbd04a4490ef08acd0517b6ae6b77eb967251f4c263011a990d"},
- {file = "websockets-12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1d9697f3337a89691e3bd8dc56dea45a6f6d975f92e7d5f773bc715c15dde28"},
- {file = "websockets-12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1df2fbd2c8a98d38a66f5238484405b8d1d16f929bb7a33ed73e4801222a6f53"},
- {file = "websockets-12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23509452b3bc38e3a057382c2e941d5ac2e01e251acce7adc74011d7d8de434c"},
- {file = "websockets-12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e5fc14ec6ea568200ea4ef46545073da81900a2b67b3e666f04adf53ad452ec"},
- {file = "websockets-12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e71dbbd12850224243f5d2aeec90f0aaa0f2dde5aeeb8fc8df21e04d99eff9"},
- {file = "websockets-12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b81f90dcc6c85a9b7f29873beb56c94c85d6f0dac2ea8b60d995bd18bf3e2aae"},
- {file = "websockets-12.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a02413bc474feda2849c59ed2dfb2cddb4cd3d2f03a2fedec51d6e959d9b608b"},
- {file = "websockets-12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bbe6013f9f791944ed31ca08b077e26249309639313fff132bfbf3ba105673b9"},
- {file = "websockets-12.0-cp39-cp39-win32.whl", hash = "sha256:cbe83a6bbdf207ff0541de01e11904827540aa069293696dd528a6640bd6a5f6"},
- {file = "websockets-12.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc4e7fa5414512b481a2483775a8e8be7803a35b30ca805afa4998a84f9fd9e8"},
- {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"},
- {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"},
- {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"},
- {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"},
- {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"},
- {file = "websockets-12.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bee75f400895aef54157b36ed6d3b308fcab62e5260703add87f44cee9c82a6"},
- {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:423fc1ed29f7512fceb727e2d2aecb952c46aa34895e9ed96071821309951123"},
- {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a5e9964ef509016759f2ef3f2c1e13f403725a5e6a1775555994966a66e931"},
- {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3181df4583c4d3994d31fb235dc681d2aaad744fbdbf94c4802485ececdecf2"},
- {file = "websockets-12.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b067cb952ce8bf40115f6c19f478dc71c5e719b7fbaa511359795dfd9d1a6468"},
- {file = "websockets-12.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00700340c6c7ab788f176d118775202aadea7602c5cc6be6ae127761c16d6b0b"},
- {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e469d01137942849cff40517c97a30a93ae79917752b34029f0ec72df6b46399"},
- {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"},
- {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0cab91b3956dfa9f512147860783a1829a8d905ee218a9837c18f683239611"},
- {file = "websockets-12.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2cb388a5bfb56df4d9a406783b7f9dbefb888c09b71629351cc6b036e9259370"},
- {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"},
- {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"},
+ {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"},
+ {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"},
+ {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"},
+ {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"},
+ {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"},
+ {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"},
+ {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"},
+ {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"},
+ {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"},
+ {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"},
+ {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"},
+ {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"},
+ {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"},
+ {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"},
+ {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"},
+ {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"},
+ {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"},
+ {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"},
+ {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"},
+ {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"},
+ {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"},
+ {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"},
+ {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"},
+ {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"},
+ {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"},
+ {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"},
+ {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"},
+ {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"},
+ {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"},
+ {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"},
+ {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"},
+ {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"},
+ {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"},
+ {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"},
+ {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"},
+ {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"},
+ {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"},
+ {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"},
+ {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"},
+ {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"},
+ {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"},
+ {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"},
+ {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"},
+ {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"},
+ {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"},
+ {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"},
+ {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"},
+ {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"},
+ {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"},
+ {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"},
+ {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"},
+ {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"},
+ {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"},
+ {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"},
+ {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"},
+ {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"},
+ {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"},
+ {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"},
+ {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"},
+ {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"},
+ {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"},
+ {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"},
+ {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"},
+ {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"},
+ {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"},
+ {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"},
+ {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"},
+ {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"},
+ {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"},
+ {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"},
+ {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"},
]
[[package]]
@@ -3484,103 +3602,103 @@ files = [
[[package]]
name = "yarl"
-version = "1.11.1"
+version = "1.13.1"
description = "Yet another URL library"
optional = false
python-versions = ">=3.8"
files = [
- {file = "yarl-1.11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:400cd42185f92de559d29eeb529e71d80dfbd2f45c36844914a4a34297ca6f00"},
- {file = "yarl-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8258c86f47e080a258993eed877d579c71da7bda26af86ce6c2d2d072c11320d"},
- {file = "yarl-1.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2164cd9725092761fed26f299e3f276bb4b537ca58e6ff6b252eae9631b5c96e"},
- {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08ea567c16f140af8ddc7cb58e27e9138a1386e3e6e53982abaa6f2377b38cc"},
- {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:768ecc550096b028754ea28bf90fde071c379c62c43afa574edc6f33ee5daaec"},
- {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2909fa3a7d249ef64eeb2faa04b7957e34fefb6ec9966506312349ed8a7e77bf"},
- {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01a8697ec24f17c349c4f655763c4db70eebc56a5f82995e5e26e837c6eb0e49"},
- {file = "yarl-1.11.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e286580b6511aac7c3268a78cdb861ec739d3e5a2a53b4809faef6b49778eaff"},
- {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4179522dc0305c3fc9782549175c8e8849252fefeb077c92a73889ccbcd508ad"},
- {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:27fcb271a41b746bd0e2a92182df507e1c204759f460ff784ca614e12dd85145"},
- {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f61db3b7e870914dbd9434b560075e0366771eecbe6d2b5561f5bc7485f39efd"},
- {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:c92261eb2ad367629dc437536463dc934030c9e7caca861cc51990fe6c565f26"},
- {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d95b52fbef190ca87d8c42f49e314eace4fc52070f3dfa5f87a6594b0c1c6e46"},
- {file = "yarl-1.11.1-cp310-cp310-win32.whl", hash = "sha256:489fa8bde4f1244ad6c5f6d11bb33e09cf0d1d0367edb197619c3e3fc06f3d91"},
- {file = "yarl-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:476e20c433b356e16e9a141449f25161e6b69984fb4cdbd7cd4bd54c17844998"},
- {file = "yarl-1.11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:946eedc12895873891aaceb39bceb484b4977f70373e0122da483f6c38faaa68"},
- {file = "yarl-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21a7c12321436b066c11ec19c7e3cb9aec18884fe0d5b25d03d756a9e654edfe"},
- {file = "yarl-1.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c35f493b867912f6fda721a59cc7c4766d382040bdf1ddaeeaa7fa4d072f4675"},
- {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25861303e0be76b60fddc1250ec5986c42f0a5c0c50ff57cc30b1be199c00e63"},
- {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4b53f73077e839b3f89c992223f15b1d2ab314bdbdf502afdc7bb18e95eae27"},
- {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:327c724b01b8641a1bf1ab3b232fb638706e50f76c0b5bf16051ab65c868fac5"},
- {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4307d9a3417eea87715c9736d050c83e8c1904e9b7aada6ce61b46361b733d92"},
- {file = "yarl-1.11.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a28bed68ab8fb7e380775f0029a079f08a17799cb3387a65d14ace16c12e2b"},
- {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:067b961853c8e62725ff2893226fef3d0da060656a9827f3f520fb1d19b2b68a"},
- {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8215f6f21394d1f46e222abeb06316e77ef328d628f593502d8fc2a9117bde83"},
- {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:498442e3af2a860a663baa14fbf23fb04b0dd758039c0e7c8f91cb9279799bff"},
- {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:69721b8effdb588cb055cc22f7c5105ca6fdaa5aeb3ea09021d517882c4a904c"},
- {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e969fa4c1e0b1a391f3fcbcb9ec31e84440253325b534519be0d28f4b6b533e"},
- {file = "yarl-1.11.1-cp311-cp311-win32.whl", hash = "sha256:7d51324a04fc4b0e097ff8a153e9276c2593106a811704025bbc1d6916f45ca6"},
- {file = "yarl-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:15061ce6584ece023457fb8b7a7a69ec40bf7114d781a8c4f5dcd68e28b5c53b"},
- {file = "yarl-1.11.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:a4264515f9117be204935cd230fb2a052dd3792789cc94c101c535d349b3dab0"},
- {file = "yarl-1.11.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f41fa79114a1d2eddb5eea7b912d6160508f57440bd302ce96eaa384914cd265"},
- {file = "yarl-1.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:02da8759b47d964f9173c8675710720b468aa1c1693be0c9c64abb9d8d9a4867"},
- {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9361628f28f48dcf8b2f528420d4d68102f593f9c2e592bfc842f5fb337e44fd"},
- {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b91044952da03b6f95fdba398d7993dd983b64d3c31c358a4c89e3c19b6f7aef"},
- {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:74db2ef03b442276d25951749a803ddb6e270d02dda1d1c556f6ae595a0d76a8"},
- {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e975a2211952a8a083d1b9d9ba26472981ae338e720b419eb50535de3c02870"},
- {file = "yarl-1.11.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8aef97ba1dd2138112890ef848e17d8526fe80b21f743b4ee65947ea184f07a2"},
- {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a7915ea49b0c113641dc4d9338efa9bd66b6a9a485ffe75b9907e8573ca94b84"},
- {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:504cf0d4c5e4579a51261d6091267f9fd997ef58558c4ffa7a3e1460bd2336fa"},
- {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3de5292f9f0ee285e6bd168b2a77b2a00d74cbcfa420ed078456d3023d2f6dff"},
- {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a34e1e30f1774fa35d37202bbeae62423e9a79d78d0874e5556a593479fdf239"},
- {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:66b63c504d2ca43bf7221a1f72fbe981ff56ecb39004c70a94485d13e37ebf45"},
- {file = "yarl-1.11.1-cp312-cp312-win32.whl", hash = "sha256:a28b70c9e2213de425d9cba5ab2e7f7a1c8ca23a99c4b5159bf77b9c31251447"},
- {file = "yarl-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:17b5a386d0d36fb828e2fb3ef08c8829c1ebf977eef88e5367d1c8c94b454639"},
- {file = "yarl-1.11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1fa2e7a406fbd45b61b4433e3aa254a2c3e14c4b3186f6e952d08a730807fa0c"},
- {file = "yarl-1.11.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:750f656832d7d3cb0c76be137ee79405cc17e792f31e0a01eee390e383b2936e"},
- {file = "yarl-1.11.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b8486f322d8f6a38539136a22c55f94d269addb24db5cb6f61adc61eabc9d93"},
- {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fce4da3703ee6048ad4138fe74619c50874afe98b1ad87b2698ef95bf92c96d"},
- {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ed653638ef669e0efc6fe2acb792275cb419bf9cb5c5049399f3556995f23c7"},
- {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18ac56c9dd70941ecad42b5a906820824ca72ff84ad6fa18db33c2537ae2e089"},
- {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:688654f8507464745ab563b041d1fb7dab5d9912ca6b06e61d1c4708366832f5"},
- {file = "yarl-1.11.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4973eac1e2ff63cf187073cd4e1f1148dcd119314ab79b88e1b3fad74a18c9d5"},
- {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:964a428132227edff96d6f3cf261573cb0f1a60c9a764ce28cda9525f18f7786"},
- {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6d23754b9939cbab02c63434776df1170e43b09c6a517585c7ce2b3d449b7318"},
- {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c2dc4250fe94d8cd864d66018f8344d4af50e3758e9d725e94fecfa27588ff82"},
- {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09696438cb43ea6f9492ef237761b043f9179f455f405279e609f2bc9100212a"},
- {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:999bfee0a5b7385a0af5ffb606393509cfde70ecca4f01c36985be6d33e336da"},
- {file = "yarl-1.11.1-cp313-cp313-win32.whl", hash = "sha256:ce928c9c6409c79e10f39604a7e214b3cb69552952fbda8d836c052832e6a979"},
- {file = "yarl-1.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:501c503eed2bb306638ccb60c174f856cc3246c861829ff40eaa80e2f0330367"},
- {file = "yarl-1.11.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dae7bd0daeb33aa3e79e72877d3d51052e8b19c9025ecf0374f542ea8ec120e4"},
- {file = "yarl-1.11.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3ff6b1617aa39279fe18a76c8d165469c48b159931d9b48239065767ee455b2b"},
- {file = "yarl-1.11.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3257978c870728a52dcce8c2902bf01f6c53b65094b457bf87b2644ee6238ddc"},
- {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f351fa31234699d6084ff98283cb1e852270fe9e250a3b3bf7804eb493bd937"},
- {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8aef1b64da41d18026632d99a06b3fefe1d08e85dd81d849fa7c96301ed22f1b"},
- {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7175a87ab8f7fbde37160a15e58e138ba3b2b0e05492d7351314a250d61b1591"},
- {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba444bdd4caa2a94456ef67a2f383710928820dd0117aae6650a4d17029fa25e"},
- {file = "yarl-1.11.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ea9682124fc062e3d931c6911934a678cb28453f957ddccf51f568c2f2b5e05"},
- {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8418c053aeb236b20b0ab8fa6bacfc2feaaf7d4683dd96528610989c99723d5f"},
- {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:61a5f2c14d0a1adfdd82258f756b23a550c13ba4c86c84106be4c111a3a4e413"},
- {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f3a6d90cab0bdf07df8f176eae3a07127daafcf7457b997b2bf46776da2c7eb7"},
- {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:077da604852be488c9a05a524068cdae1e972b7dc02438161c32420fb4ec5e14"},
- {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:15439f3c5c72686b6c3ff235279630d08936ace67d0fe5c8d5bbc3ef06f5a420"},
- {file = "yarl-1.11.1-cp38-cp38-win32.whl", hash = "sha256:238a21849dd7554cb4d25a14ffbfa0ef380bb7ba201f45b144a14454a72ffa5a"},
- {file = "yarl-1.11.1-cp38-cp38-win_amd64.whl", hash = "sha256:67459cf8cf31da0e2cbdb4b040507e535d25cfbb1604ca76396a3a66b8ba37a6"},
- {file = "yarl-1.11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:884eab2ce97cbaf89f264372eae58388862c33c4f551c15680dd80f53c89a269"},
- {file = "yarl-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a336eaa7ee7e87cdece3cedb395c9657d227bfceb6781295cf56abcd3386a26"},
- {file = "yarl-1.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87f020d010ba80a247c4abc335fc13421037800ca20b42af5ae40e5fd75e7909"},
- {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:637c7ddb585a62d4469f843dac221f23eec3cbad31693b23abbc2c366ad41ff4"},
- {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48dfd117ab93f0129084577a07287376cc69c08138694396f305636e229caa1a"},
- {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e0ae31fb5ccab6eda09ba1494e87eb226dcbd2372dae96b87800e1dcc98804"},
- {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f46f81501160c28d0c0b7333b4f7be8983dbbc161983b6fb814024d1b4952f79"},
- {file = "yarl-1.11.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:04293941646647b3bfb1719d1d11ff1028e9c30199509a844da3c0f5919dc520"},
- {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:250e888fa62d73e721f3041e3a9abf427788a1934b426b45e1b92f62c1f68366"},
- {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e8f63904df26d1a66aabc141bfd258bf738b9bc7bc6bdef22713b4f5ef789a4c"},
- {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:aac44097d838dda26526cffb63bdd8737a2dbdf5f2c68efb72ad83aec6673c7e"},
- {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:267b24f891e74eccbdff42241c5fb4f974de2d6271dcc7d7e0c9ae1079a560d9"},
- {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6907daa4b9d7a688063ed098c472f96e8181733c525e03e866fb5db480a424df"},
- {file = "yarl-1.11.1-cp39-cp39-win32.whl", hash = "sha256:14438dfc5015661f75f85bc5adad0743678eefee266ff0c9a8e32969d5d69f74"},
- {file = "yarl-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:94d0caaa912bfcdc702a4204cd5e2bb01eb917fc4f5ea2315aa23962549561b0"},
- {file = "yarl-1.11.1-py3-none-any.whl", hash = "sha256:72bf26f66456baa0584eff63e44545c9f0eaed9b73cb6601b647c91f14c11f38"},
- {file = "yarl-1.11.1.tar.gz", hash = "sha256:1bb2d9e212fb7449b8fb73bc461b51eaa17cc8430b4a87d87be7b25052d92f53"},
+ {file = "yarl-1.13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:82e692fb325013a18a5b73a4fed5a1edaa7c58144dc67ad9ef3d604eccd451ad"},
+ {file = "yarl-1.13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df4e82e68f43a07735ae70a2d84c0353e58e20add20ec0af611f32cd5ba43fb4"},
+ {file = "yarl-1.13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec9dd328016d8d25702a24ee274932aebf6be9787ed1c28d021945d264235b3c"},
+ {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5820bd4178e6a639b3ef1db8b18500a82ceab6d8b89309e121a6859f56585b05"},
+ {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86c438ce920e089c8c2388c7dcc8ab30dfe13c09b8af3d306bcabb46a053d6f7"},
+ {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3de86547c820e4f4da4606d1c8ab5765dd633189791f15247706a2eeabc783ae"},
+ {file = "yarl-1.13.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca53632007c69ddcdefe1e8cbc3920dd88825e618153795b57e6ebcc92e752a"},
+ {file = "yarl-1.13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4ee1d240b84e2f213565f0ec08caef27a0e657d4c42859809155cf3a29d1735"},
+ {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c49f3e379177f4477f929097f7ed4b0622a586b0aa40c07ac8c0f8e40659a1ac"},
+ {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5c5e32fef09ce101fe14acd0f498232b5710effe13abac14cd95de9c274e689e"},
+ {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ab9524e45ee809a083338a749af3b53cc7efec458c3ad084361c1dbf7aaf82a2"},
+ {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:b1481c048fe787f65e34cb06f7d6824376d5d99f1231eae4778bbe5c3831076d"},
+ {file = "yarl-1.13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:31497aefd68036d8e31bfbacef915826ca2e741dbb97a8d6c7eac66deda3b606"},
+ {file = "yarl-1.13.1-cp310-cp310-win32.whl", hash = "sha256:1fa56f34b2236f5192cb5fceba7bbb09620e5337e0b6dfe2ea0ddbd19dd5b154"},
+ {file = "yarl-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:1bbb418f46c7f7355084833051701b2301092e4611d9e392360c3ba2e3e69f88"},
+ {file = "yarl-1.13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:216a6785f296169ed52cd7dcdc2612f82c20f8c9634bf7446327f50398732a51"},
+ {file = "yarl-1.13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:40c6e73c03a6befb85b72da213638b8aaa80fe4136ec8691560cf98b11b8ae6e"},
+ {file = "yarl-1.13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2430cf996113abe5aee387d39ee19529327205cda975d2b82c0e7e96e5fdabdc"},
+ {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fb4134cc6e005b99fa29dbc86f1ea0a298440ab6b07c6b3ee09232a3b48f495"},
+ {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:309c104ecf67626c033845b860d31594a41343766a46fa58c3309c538a1e22b2"},
+ {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f90575e9fe3aae2c1e686393a9689c724cd00045275407f71771ae5d690ccf38"},
+ {file = "yarl-1.13.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d2e1626be8712333a9f71270366f4a132f476ffbe83b689dd6dc0d114796c74"},
+ {file = "yarl-1.13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b66c87da3c6da8f8e8b648878903ca54589038a0b1e08dde2c86d9cd92d4ac9"},
+ {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cf1ad338620249f8dd6d4b6a91a69d1f265387df3697ad5dc996305cf6c26fb2"},
+ {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9915300fe5a0aa663c01363db37e4ae8e7c15996ebe2c6cce995e7033ff6457f"},
+ {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:703b0f584fcf157ef87816a3c0ff868e8c9f3c370009a8b23b56255885528f10"},
+ {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1d8e3ca29f643dd121f264a7c89f329f0fcb2e4461833f02de6e39fef80f89da"},
+ {file = "yarl-1.13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7055bbade838d68af73aea13f8c86588e4bcc00c2235b4b6d6edb0dbd174e246"},
+ {file = "yarl-1.13.1-cp311-cp311-win32.whl", hash = "sha256:a3442c31c11088e462d44a644a454d48110f0588de830921fd201060ff19612a"},
+ {file = "yarl-1.13.1-cp311-cp311-win_amd64.whl", hash = "sha256:81bad32c8f8b5897c909bf3468bf601f1b855d12f53b6af0271963ee67fff0d2"},
+ {file = "yarl-1.13.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f452cc1436151387d3d50533523291d5f77c6bc7913c116eb985304abdbd9ec9"},
+ {file = "yarl-1.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9cec42a20eae8bebf81e9ce23fb0d0c729fc54cf00643eb251ce7c0215ad49fe"},
+ {file = "yarl-1.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d959fe96e5c2712c1876d69af0507d98f0b0e8d81bee14cfb3f6737470205419"},
+ {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8c837ab90c455f3ea8e68bee143472ee87828bff19ba19776e16ff961425b57"},
+ {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94a993f976cdcb2dc1b855d8b89b792893220db8862d1a619efa7451817c836b"},
+ {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b2442a415a5f4c55ced0fade7b72123210d579f7d950e0b5527fc598866e62c"},
+ {file = "yarl-1.13.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fdbf0418489525231723cdb6c79e7738b3cbacbaed2b750cb033e4ea208f220"},
+ {file = "yarl-1.13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b7f6e699304717fdc265a7e1922561b02a93ceffdaefdc877acaf9b9f3080b8"},
+ {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bcd5bf4132e6a8d3eb54b8d56885f3d3a38ecd7ecae8426ecf7d9673b270de43"},
+ {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2a93a4557f7fc74a38ca5a404abb443a242217b91cd0c4840b1ebedaad8919d4"},
+ {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:22b739f99c7e4787922903f27a892744189482125cc7b95b747f04dd5c83aa9f"},
+ {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2db874dd1d22d4c2c657807562411ffdfabec38ce4c5ce48b4c654be552759dc"},
+ {file = "yarl-1.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4feaaa4742517eaceafcbe74595ed335a494c84634d33961214b278126ec1485"},
+ {file = "yarl-1.13.1-cp312-cp312-win32.whl", hash = "sha256:bbf9c2a589be7414ac4a534d54e4517d03f1cbb142c0041191b729c2fa23f320"},
+ {file = "yarl-1.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:d07b52c8c450f9366c34aa205754355e933922c79135125541daae6cbf31c799"},
+ {file = "yarl-1.13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:95c6737f28069153c399d875317f226bbdea939fd48a6349a3b03da6829fb550"},
+ {file = "yarl-1.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cd66152561632ed4b2a9192e7f8e5a1d41e28f58120b4761622e0355f0fe034c"},
+ {file = "yarl-1.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6a2acde25be0cf9be23a8f6cbd31734536a264723fca860af3ae5e89d771cd71"},
+ {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a18595e6a2ee0826bf7dfdee823b6ab55c9b70e8f80f8b77c37e694288f5de1"},
+ {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a31d21089894942f7d9a8df166b495101b7258ff11ae0abec58e32daf8088813"},
+ {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45f209fb4bbfe8630e3d2e2052535ca5b53d4ce2d2026bed4d0637b0416830da"},
+ {file = "yarl-1.13.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f722f30366474a99745533cc4015b1781ee54b08de73260b2bbe13316079851"},
+ {file = "yarl-1.13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3bf60444269345d712838bb11cc4eadaf51ff1a364ae39ce87a5ca8ad3bb2c8"},
+ {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:942c80a832a79c3707cca46bd12ab8aa58fddb34b1626d42b05aa8f0bcefc206"},
+ {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:44b07e1690f010c3c01d353b5790ec73b2f59b4eae5b0000593199766b3f7a5c"},
+ {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:396e59b8de7e4d59ff5507fb4322d2329865b909f29a7ed7ca37e63ade7f835c"},
+ {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:3bb83a0f12701c0b91112a11148b5217617982e1e466069d0555be9b372f2734"},
+ {file = "yarl-1.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c92b89bffc660f1274779cb6fbb290ec1f90d6dfe14492523a0667f10170de26"},
+ {file = "yarl-1.13.1-cp313-cp313-win32.whl", hash = "sha256:269c201bbc01d2cbba5b86997a1e0f73ba5e2f471cfa6e226bcaa7fd664b598d"},
+ {file = "yarl-1.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:1d0828e17fa701b557c6eaed5edbd9098eb62d8838344486248489ff233998b8"},
+ {file = "yarl-1.13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8be8cdfe20787e6a5fcbd010f8066227e2bb9058331a4eccddec6c0db2bb85b2"},
+ {file = "yarl-1.13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08d7148ff11cb8e886d86dadbfd2e466a76d5dd38c7ea8ebd9b0e07946e76e4b"},
+ {file = "yarl-1.13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4afdf84610ca44dcffe8b6c22c68f309aff96be55f5ea2fa31c0c225d6b83e23"},
+ {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0d12fe78dcf60efa205e9a63f395b5d343e801cf31e5e1dda0d2c1fb618073d"},
+ {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:298c1eecfd3257aa16c0cb0bdffb54411e3e831351cd69e6b0739be16b1bdaa8"},
+ {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c14c16831b565707149c742d87a6203eb5597f4329278446d5c0ae7a1a43928e"},
+ {file = "yarl-1.13.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a9bacedbb99685a75ad033fd4de37129449e69808e50e08034034c0bf063f99"},
+ {file = "yarl-1.13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:658e8449b84b92a4373f99305de042b6bd0d19bf2080c093881e0516557474a5"},
+ {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:373f16f38721c680316a6a00ae21cc178e3a8ef43c0227f88356a24c5193abd6"},
+ {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:45d23c4668d4925688e2ea251b53f36a498e9ea860913ce43b52d9605d3d8177"},
+ {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f7917697bcaa3bc3e83db91aa3a0e448bf5cde43c84b7fc1ae2427d2417c0224"},
+ {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5989a38ba1281e43e4663931a53fbf356f78a0325251fd6af09dd03b1d676a09"},
+ {file = "yarl-1.13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:11b3ca8b42a024513adce810385fcabdd682772411d95bbbda3b9ed1a4257644"},
+ {file = "yarl-1.13.1-cp38-cp38-win32.whl", hash = "sha256:dcaef817e13eafa547cdfdc5284fe77970b891f731266545aae08d6cce52161e"},
+ {file = "yarl-1.13.1-cp38-cp38-win_amd64.whl", hash = "sha256:7addd26594e588503bdef03908fc207206adac5bd90b6d4bc3e3cf33a829f57d"},
+ {file = "yarl-1.13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a0ae6637b173d0c40b9c1462e12a7a2000a71a3258fa88756a34c7d38926911c"},
+ {file = "yarl-1.13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:576365c9f7469e1f6124d67b001639b77113cfd05e85ce0310f5f318fd02fe85"},
+ {file = "yarl-1.13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78f271722423b2d4851cf1f4fa1a1c4833a128d020062721ba35e1a87154a049"},
+ {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d74f3c335cfe9c21ea78988e67f18eb9822f5d31f88b41aec3a1ec5ecd32da5"},
+ {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1891d69a6ba16e89473909665cd355d783a8a31bc84720902c5911dbb6373465"},
+ {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fb382fd7b4377363cc9f13ba7c819c3c78ed97c36a82f16f3f92f108c787cbbf"},
+ {file = "yarl-1.13.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c8854b9f80693d20cec797d8e48a848c2fb273eb6f2587b57763ccba3f3bd4b"},
+ {file = "yarl-1.13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbf2c3f04ff50f16404ce70f822cdc59760e5e2d7965905f0e700270feb2bbfc"},
+ {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fb9f59f3848edf186a76446eb8bcf4c900fe147cb756fbbd730ef43b2e67c6a7"},
+ {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ef9b85fa1bc91c4db24407e7c4da93a5822a73dd4513d67b454ca7064e8dc6a3"},
+ {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:098b870c18f1341786f290b4d699504e18f1cd050ed179af8123fd8232513424"},
+ {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:8c723c91c94a3bc8033dd2696a0f53e5d5f8496186013167bddc3fb5d9df46a3"},
+ {file = "yarl-1.13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:44a4c40a6f84e4d5955b63462a0e2a988f8982fba245cf885ce3be7618f6aa7d"},
+ {file = "yarl-1.13.1-cp39-cp39-win32.whl", hash = "sha256:84bbcdcf393139f0abc9f642bf03f00cac31010f3034faa03224a9ef0bb74323"},
+ {file = "yarl-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:fc2931ac9ce9c61c9968989ec831d3a5e6fcaaff9474e7cfa8de80b7aff5a093"},
+ {file = "yarl-1.13.1-py3-none-any.whl", hash = "sha256:6a5185ad722ab4dd52d5fb1f30dcc73282eb1ed494906a92d1a228d3f89607b0"},
+ {file = "yarl-1.13.1.tar.gz", hash = "sha256:ec8cfe2295f3e5e44c51f57272afbd69414ae629ec7c6b27f5a410efc78b70a0"},
]
[package.dependencies]
@@ -3603,13 +3721,13 @@ requests = "*"
[[package]]
name = "zipp"
-version = "3.20.1"
+version = "3.20.2"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.8"
files = [
- {file = "zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064"},
- {file = "zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"},
+ {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"},
+ {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"},
]
[package.extras]
@@ -3623,4 +3741,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "311c527a1d1947af049dac27c7a2b2f49d7fa4cdede52ef436422a528b0ad866"
+content-hash = "ab3ae697e0be22e3ed20ae136db5b6805086279ebb57c99b60ed1c4d8d2dbbae"
diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml
index c7c3be2005..ed31667c52 100644
--- a/autogpt_platform/backend/pyproject.toml
+++ b/autogpt_platform/backend/pyproject.toml
@@ -16,7 +16,6 @@ autogpt-libs = { path = "../autogpt_libs", develop = true }
click = "^8.1.7"
croniter = "^2.0.5"
discord-py = "^2.4.0"
-expiringdict = "^1.2.2"
fastapi = "^0.109.0"
feedparser = "^6.0.11"
flake8 = "^7.0.0"
@@ -28,9 +27,9 @@ jsonref = "^1.1.0"
jsonschema = "^4.22.0"
ollama = "^0.3.0"
openai = "^1.35.7"
-praw = "^7.7.1"
+praw = "~7.7.1"
prisma = "^0.13.1"
-psutil = "^5.9.8"
+psutil = "^6.1.0"
pydantic = "^2.7.2"
pydantic-settings = "^2.3.4"
pyro5 = "^5.15"
@@ -38,22 +37,25 @@ pytest = "^8.2.1"
pytest-asyncio = "^0.23.7"
python-dotenv = "^1.0.1"
redis = "^5.0.8"
-sentry-sdk = "1.45.0"
+sentry-sdk = "2.17.0"
supabase = "^2.7.2"
-tenacity = "^8.3.0"
+tenacity = "^9.0.0"
uvicorn = { extras = ["standard"], version = "^0.30.1" }
-websockets = "^12.0"
+websockets = "^13.1"
youtube-transcript-api = "^0.6.2"
-
+googlemaps = "^4.10.0"
+replicate = "^0.34.1"
+pinecone = "^5.3.1"
[tool.poetry.group.dev.dependencies]
-poethepoet = "^0.26.1"
+poethepoet = "^0.29.0"
httpx = "^0.27.0"
pytest-watcher = "^0.4.2"
requests = "^2.32.3"
-ruff = "^0.5.2"
-pyright = "^1.1.371"
+ruff = "^0.7.1"
+pyright = "^1.1.386"
isort = "^5.13.2"
-black = "^24.4.2"
+black = "^24.10.0"
+aiohappyeyeballs = "^2.4.3"
[build-system]
requires = ["poetry-core"]
diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma
index 299f57029f..b316e226d2 100644
--- a/autogpt_platform/backend/schema.prisma
+++ b/autogpt_platform/backend/schema.prisma
@@ -17,6 +17,7 @@ model User {
name String?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
+ metadata Json?
// Relations
AgentGraphs AgentGraph[]
@@ -44,7 +45,7 @@ model AgentGraph {
// Link to User model
userId String
- user User @relation(fields: [userId], references: [id])
+ user User @relation(fields: [userId], references: [id], onDelete: Cascade)
AgentNodes AgentNode[]
AgentGraphExecution AgentGraphExecution[]
@@ -53,7 +54,7 @@ model AgentGraph {
// All sub-graphs are defined within this 1-level depth list (even if it's a nested graph).
AgentSubGraphs AgentGraph[] @relation("AgentSubGraph")
agentGraphParentId String?
- AgentGraphParent AgentGraph? @relation("AgentSubGraph", fields: [agentGraphParentId, version], references: [id, version])
+ AgentGraphParent AgentGraph? @relation("AgentSubGraph", fields: [agentGraphParentId, version], references: [id, version], onDelete: Cascade)
@@id(name: "graphVersionId", [id, version])
}
@@ -63,11 +64,11 @@ model AgentNode {
id String @id @default(uuid())
agentBlockId String
- AgentBlock AgentBlock @relation(fields: [agentBlockId], references: [id])
+ AgentBlock AgentBlock @relation(fields: [agentBlockId], references: [id], onUpdate: Cascade)
agentGraphId String
agentGraphVersion Int @default(1)
- AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version])
+ AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version], onDelete: Cascade)
// List of consumed input, that the parent node should provide.
Input AgentNodeLink[] @relation("AgentNodeSink")
@@ -90,12 +91,12 @@ model AgentNodeLink {
// Output of a node is connected to the source of the link.
agentNodeSourceId String
- AgentNodeSource AgentNode @relation("AgentNodeSource", fields: [agentNodeSourceId], references: [id])
+ AgentNodeSource AgentNode @relation("AgentNodeSource", fields: [agentNodeSourceId], references: [id], onDelete: Cascade)
sourceName String
// Input of a node is connected to the sink of the link.
agentNodeSinkId String
- AgentNodeSink AgentNode @relation("AgentNodeSink", fields: [agentNodeSinkId], references: [id])
+ AgentNodeSink AgentNode @relation("AgentNodeSink", fields: [agentNodeSinkId], references: [id], onDelete: Cascade)
sinkName String
// Default: the data coming from the source can only be consumed by the sink once, Static: input data will be reused.
@@ -137,13 +138,13 @@ model AgentGraphExecution {
agentGraphId String
agentGraphVersion Int @default(1)
- AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version])
+ AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version], onDelete: Cascade)
AgentNodeExecutions AgentNodeExecution[]
// Link to User model
userId String
- user User @relation(fields: [userId], references: [id])
+ user User @relation(fields: [userId], references: [id], onDelete: Cascade)
stats String? // JSON serialized object
}
@@ -153,10 +154,10 @@ model AgentNodeExecution {
id String @id @default(uuid())
agentGraphExecutionId String
- AgentGraphExecution AgentGraphExecution @relation(fields: [agentGraphExecutionId], references: [id])
+ AgentGraphExecution AgentGraphExecution @relation(fields: [agentGraphExecutionId], references: [id], onDelete: Cascade)
agentNodeId String
- AgentNode AgentNode @relation(fields: [agentNodeId], references: [id])
+ AgentNode AgentNode @relation(fields: [agentNodeId], references: [id], onDelete: Cascade)
Input AgentNodeExecutionInputOutput[] @relation("AgentNodeExecutionInput")
Output AgentNodeExecutionInputOutput[] @relation("AgentNodeExecutionOutput")
@@ -182,9 +183,9 @@ model AgentNodeExecutionInputOutput {
// Prisma requires explicit back-references.
referencedByInputExecId String?
- ReferencedByInputExec AgentNodeExecution? @relation("AgentNodeExecutionInput", fields: [referencedByInputExecId], references: [id])
+ ReferencedByInputExec AgentNodeExecution? @relation("AgentNodeExecutionInput", fields: [referencedByInputExecId], references: [id], onDelete: Cascade)
referencedByOutputExecId String?
- ReferencedByOutputExec AgentNodeExecution? @relation("AgentNodeExecutionOutput", fields: [referencedByOutputExecId], references: [id])
+ ReferencedByOutputExec AgentNodeExecution? @relation("AgentNodeExecutionOutput", fields: [referencedByOutputExecId], references: [id], onDelete: Cascade)
// Input and Output pin names are unique for each AgentNodeExecution.
@@unique([referencedByInputExecId, referencedByOutputExecId, name])
@@ -198,7 +199,7 @@ model AgentGraphExecutionSchedule {
agentGraphId String
agentGraphVersion Int @default(1)
- AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version])
+ AgentGraph AgentGraph @relation(fields: [agentGraphId, agentGraphVersion], references: [id, version], onDelete: Cascade)
schedule String // cron expression
isEnabled Boolean @default(true)
@@ -209,7 +210,7 @@ model AgentGraphExecutionSchedule {
// Link to User model
userId String
- user User @relation(fields: [userId], references: [id])
+ user User @relation(fields: [userId], references: [id], onDelete: Cascade)
@@index([isEnabled])
}
@@ -224,7 +225,7 @@ model AnalyticsDetails {
// Link to User model
userId String
- user User @relation(fields: [userId], references: [id])
+ user User @relation(fields: [userId], references: [id], onDelete: Cascade)
// Analytics Categorical data used for filtering (indexable w and w/o userId)
type String
@@ -254,7 +255,7 @@ model AnalyticsMetrics {
// Link to User model
userId String
- user User @relation(fields: [userId], references: [id])
+ user User @relation(fields: [userId], references: [id], onDelete: Cascade)
}
enum UserBlockCreditType {
@@ -267,7 +268,7 @@ model UserBlockCredit {
createdAt DateTime @default(now())
userId String
- user User @relation(fields: [userId], references: [id])
+ user User @relation(fields: [userId], references: [id], onDelete: Cascade)
blockId String?
block AgentBlock? @relation(fields: [blockId], references: [id])
diff --git a/autogpt_platform/backend/target.prisma b/autogpt_platform/backend/target.prisma
new file mode 100644
index 0000000000..7c378b5a6f
--- /dev/null
+++ b/autogpt_platform/backend/target.prisma
@@ -0,0 +1,628 @@
+// We need to migrate our database schema to support the domain as we understand it now
+// To do so requires adding a bunch of new tables, but also modiftying old ones and how
+// they relate to each other. This is a large change, so instead of doing in in one go,
+// We have created the target schema, and will migrate to it incrementally.
+
+datasource db {
+ provider = "postgresql"
+ url = env("DATABASE_URL")
+}
+
+generator client {
+ provider = "prisma-client-py"
+ recursive_type_depth = 5
+ interface = "asyncio"
+}
+
+// User model to mirror Auth provider users
+model User {
+ id String @id @db.Uuid // This should match the Supabase user ID
+ email String @unique
+ name String?
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+ metadata Json? @default("{}")
+
+ // Relations
+ Agents Agent[]
+ AgentExecutions AgentExecution[]
+ AgentExecutionSchedules AgentExecutionSchedule[]
+ AnalyticsDetails AnalyticsDetails[]
+ AnalyticsMetrics AnalyticsMetrics[]
+ UserBlockCredit UserBlockCredit[]
+ AgentPresets AgentPreset[]
+ UserAgents UserAgent[]
+
+ // User Group relations
+ UserGroupMemberships UserGroupMembership[]
+ Profile Profile[]
+ StoreListing StoreListing[]
+ StoreListingSubmission StoreListingSubmission[]
+ StoreListingReview StoreListingReview[]
+}
+
+model UserGroup {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ name String
+ description String
+ groupIconUrl String?
+
+ UserGroupMemberships UserGroupMembership[]
+
+ Agents Agent[]
+ Profile Profile[]
+ StoreListing StoreListing[]
+
+ @@index([name])
+}
+
+enum UserGroupRole {
+ MEMBER
+ OWNER
+}
+
+model UserGroupMembership {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ userId String @db.Uuid
+ User User @relation(fields: [userId], references: [id], onDelete: Cascade)
+ userGroupId String @db.Uuid
+ UserGroup UserGroup @relation(fields: [userGroupId], references: [id], onDelete: Cascade)
+ Role UserGroupRole @default(MEMBER)
+
+ @@unique([userId, userGroupId])
+ @@index([userId])
+ @@index([userGroupId])
+}
+
+// This model describes the Agent Graph/Flow (Multi Agent System).
+model Agent {
+ id String @default(uuid()) @db.Uuid
+ version Int @default(1)
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ name String?
+ description String?
+
+ // Link to User model
+ createdByUserId String? @db.Uuid
+ // Do not cascade delete the agent when the user is deleted
+ // This allows us to delete user data with deleting the agent which maybe in use by other users
+ CreatedByUser User? @relation(fields: [createdByUserId], references: [id], onDelete: SetNull)
+
+ groupId String? @db.Uuid
+ // Do not cascade delete the agent when the group is deleted
+ // This allows us to delete user group data with deleting the agent which maybe in use by other users
+ Group UserGroup? @relation(fields: [groupId], references: [id], onDelete: SetNull)
+
+ AgentNodes AgentNode[]
+ AgentExecution AgentExecution[]
+
+ // All sub-graphs are defined within this 1-level depth list (even if it's a nested graph).
+ SubAgents Agent[] @relation("SubAgents")
+ agentParentId String? @db.Uuid
+ agentParentVersion Int?
+ AgentParent Agent? @relation("SubAgents", fields: [agentParentId, agentParentVersion], references: [id, version])
+
+ AgentPresets AgentPreset[]
+ WebhookTrigger WebhookTrigger[]
+ AgentExecutionSchedule AgentExecutionSchedule[]
+ UserAgents UserAgent[]
+ UserBlockCredit UserBlockCredit[]
+ StoreListing StoreListing[]
+ StoreListingVersion StoreListingVersion[]
+
+ @@id(name: "agentVersionId", [id, version])
+}
+
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+//////////////// USER SPECIFIC DATA ////////////////////
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+
+// An AgentPrest is an Agent + User Configuration of that agent.
+// For example, if someone has created a weather agent and they want to set it up to
+// Inform them of extreme weather warnings in Texas, the agent with the configuration to set it to
+// monitor texas, along with the cron setup or webhook tiggers, is an AgentPreset
+model AgentPreset {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ name String
+ description String
+
+ // For agents that can be triggered by webhooks or cronjob
+ // This bool allows us to disable a configured agent without deleting it
+ isActive Boolean @default(true)
+
+ userId String @db.Uuid
+ User User @relation(fields: [userId], references: [id], onDelete: Cascade)
+
+ agentId String @db.Uuid
+ agentVersion Int
+ Agent Agent @relation(fields: [agentId, agentVersion], references: [id, version], onDelete: Cascade)
+
+ InputPresets AgentNodeExecutionInputOutput[] @relation("AgentPresetsInputData")
+ UserAgents UserAgent[]
+ WebhookTrigger WebhookTrigger[]
+ AgentExecutionSchedule AgentExecutionSchedule[]
+ AgentExecution AgentExecution[]
+
+ @@index([userId])
+}
+
+// For the library page
+// It is a user controlled list of agents, that they will see in there library
+model UserAgent {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ userId String @db.Uuid
+ User User @relation(fields: [userId], references: [id], onDelete: Cascade)
+
+ agentId String @db.Uuid
+ agentVersion Int
+ Agent Agent @relation(fields: [agentId, agentVersion], references: [id, version])
+
+ agentPresetId String? @db.Uuid
+ AgentPreset AgentPreset? @relation(fields: [agentPresetId], references: [id])
+
+ isFavorite Boolean @default(false)
+ isCreatedByUser Boolean @default(false)
+ isArchived Boolean @default(false)
+ isDeleted Boolean @default(false)
+
+ @@index([userId])
+}
+
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+//////// AGENT DEFINITION AND EXECUTION TABLES ////////
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+// This model describes a single node in the Agent Graph/Flow (Multi Agent System).
+model AgentNode {
+ id String @id @default(uuid()) @db.Uuid
+
+ agentBlockId String @db.Uuid
+ AgentBlock AgentBlock @relation(fields: [agentBlockId], references: [id], onUpdate: Cascade)
+
+ agentId String @db.Uuid
+ agentVersion Int @default(1)
+ Agent Agent @relation(fields: [agentId, agentVersion], references: [id, version], onDelete: Cascade)
+
+ // List of consumed input, that the parent node should provide.
+ Input AgentNodeLink[] @relation("AgentNodeSink")
+
+ // List of produced output, that the child node should be executed.
+ Output AgentNodeLink[] @relation("AgentNodeSource")
+
+ // JSON serialized dict[str, str] containing predefined input values.
+ constantInput Json @default("{}")
+
+ // JSON serialized dict[str, str] containing the node metadata.
+ metadata Json @default("{}")
+
+ ExecutionHistory AgentNodeExecution[]
+}
+
+// This model describes the link between two AgentNodes.
+model AgentNodeLink {
+ id String @id @default(uuid()) @db.Uuid
+
+ // Output of a node is connected to the source of the link.
+ agentNodeSourceId String @db.Uuid
+ AgentNodeSource AgentNode @relation("AgentNodeSource", fields: [agentNodeSourceId], references: [id], onDelete: Cascade)
+ sourceName String
+
+ // Input of a node is connected to the sink of the link.
+ agentNodeSinkId String @db.Uuid
+ AgentNodeSink AgentNode @relation("AgentNodeSink", fields: [agentNodeSinkId], references: [id], onDelete: Cascade)
+ sinkName String
+
+ // Default: the data coming from the source can only be consumed by the sink once, Static: input data will be reused.
+ isStatic Boolean @default(false)
+}
+
+// This model describes a component that will be executed by the AgentNode.
+model AgentBlock {
+ id String @id @default(uuid()) @db.Uuid
+ name String @unique
+
+ // We allow a block to have multiple types of input & output.
+ // Serialized object-typed `jsonschema` with top-level properties as input/output name.
+ inputSchema Json @default("{}")
+ outputSchema Json @default("{}")
+
+ // Prisma requires explicit back-references.
+ ReferencedByAgentNode AgentNode[]
+ UserBlockCredit UserBlockCredit[]
+}
+
+// This model describes the status of an AgentExecution or AgentNodeExecution.
+enum AgentExecutionStatus {
+ INCOMPLETE
+ QUEUED
+ RUNNING
+ COMPLETED
+ FAILED
+}
+
+// Enum for execution trigger types
+enum ExecutionTriggerType {
+ MANUAL
+ SCHEDULE
+ WEBHOOK
+}
+
+// This model describes the execution of an AgentGraph.
+model AgentExecution {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+ startedAt DateTime?
+ executionTriggerType ExecutionTriggerType @default(MANUAL)
+
+ executionStatus AgentExecutionStatus @default(COMPLETED)
+
+ agentId String @db.Uuid
+ agentVersion Int @default(1)
+ Agent Agent @relation(fields: [agentId, agentVersion], references: [id, version], onDelete: Cascade)
+
+ // we need to be able to associate an agent execution with an agent preset
+ agentPresetId String? @db.Uuid
+ AgentPreset AgentPreset? @relation(fields: [agentPresetId], references: [id])
+
+ AgentNodeExecutions AgentNodeExecution[]
+
+ // This is so we can track which user executed the agent.
+ executedByUserId String @db.Uuid
+ ExecutedByUser User @relation(fields: [executedByUserId], references: [id], onDelete: Cascade)
+
+ stats Json @default("{}") // JSON serialized object
+}
+
+// This model describes the execution of an AgentNode.
+model AgentNodeExecution {
+ id String @id @default(uuid()) @db.Uuid
+
+ agentExecutionId String @db.Uuid
+ AgentExecution AgentExecution @relation(fields: [agentExecutionId], references: [id], onDelete: Cascade)
+
+ agentNodeId String @db.Uuid
+ AgentNode AgentNode @relation(fields: [agentNodeId], references: [id], onDelete: Cascade)
+
+ Input AgentNodeExecutionInputOutput[] @relation("AgentNodeExecutionInput")
+ Output AgentNodeExecutionInputOutput[] @relation("AgentNodeExecutionOutput")
+
+ executionStatus AgentExecutionStatus @default(COMPLETED)
+ // Final JSON serialized input data for the node execution.
+ executionData String?
+ addedTime DateTime @default(now())
+ queuedTime DateTime?
+ startedTime DateTime?
+ endedTime DateTime?
+
+ stats Json @default("{}") // JSON serialized object
+ UserBlockCredit UserBlockCredit[]
+}
+
+// This model describes the output of an AgentNodeExecution.
+model AgentNodeExecutionInputOutput {
+ id String @id @default(uuid()) @db.Uuid
+
+ name String
+ data String
+ time DateTime @default(now())
+
+ // Prisma requires explicit back-references.
+ referencedByInputExecId String? @db.Uuid
+ ReferencedByInputExec AgentNodeExecution? @relation("AgentNodeExecutionInput", fields: [referencedByInputExecId], references: [id], onDelete: Cascade)
+ referencedByOutputExecId String? @db.Uuid
+ ReferencedByOutputExec AgentNodeExecution? @relation("AgentNodeExecutionOutput", fields: [referencedByOutputExecId], references: [id], onDelete: Cascade)
+
+ agentPresetId String? @db.Uuid
+ AgentPreset AgentPreset? @relation("AgentPresetsInputData", fields: [agentPresetId], references: [id])
+
+ // Input and Output pin names are unique for each AgentNodeExecution.
+ @@unique([referencedByInputExecId, referencedByOutputExecId, name])
+}
+
+// This model describes the recurring execution schedule of an Agent.
+model AgentExecutionSchedule {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ agentPresetId String @db.Uuid
+ AgentPreset AgentPreset @relation(fields: [agentPresetId], references: [id], onDelete: Cascade)
+
+ schedule String // cron expression
+ isEnabled Boolean @default(true)
+
+ // Allows triggers to be routed down different execution paths in an agent graph
+ triggerIdentifier String
+
+ // default and set the value on each update, lastUpdated field has no time zone.
+ lastUpdated DateTime @default(now()) @updatedAt
+
+ // Link to User model
+ userId String @db.Uuid
+ User User @relation(fields: [userId], references: [id], onDelete: Cascade)
+ Agent Agent? @relation(fields: [agentId, agentVersion], references: [id, version])
+ agentId String? @db.Uuid
+ agentVersion Int?
+
+ @@index([isEnabled])
+}
+
+enum HttpMethod {
+ GET
+ POST
+ PUT
+ DELETE
+ PATCH
+}
+
+model WebhookTrigger {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ agentPresetId String @db.Uuid
+ AgentPreset AgentPreset @relation(fields: [agentPresetId], references: [id])
+
+ method HttpMethod
+ urlSlug String
+
+ // Allows triggers to be routed down different execution paths in an agent graph
+ triggerIdentifier String
+
+ isActive Boolean @default(true)
+ lastReceivedDataAt DateTime?
+ isDeleted Boolean @default(false)
+ Agent Agent? @relation(fields: [agentId, agentVersion], references: [id, version])
+ agentId String? @db.Uuid
+ agentVersion Int?
+
+ @@index([agentPresetId])
+}
+
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+////////////// METRICS TRACKING TABLES ////////////////
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+model AnalyticsDetails {
+ // PK uses gen_random_uuid() to allow the db inserts to happen outside of prisma
+ // typical uuid() inserts are handled by prisma
+ id String @id @default(dbgenerated("gen_random_uuid()")) @db.Uuid
+
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ // Link to User model
+ userId String @db.Uuid
+ User User @relation(fields: [userId], references: [id], onDelete: Cascade)
+
+ // Analytics Categorical data used for filtering (indexable w and w/o userId)
+ type String
+
+ // Analytic Specific Data. We should use a union type here, but prisma doesn't support it.
+ data Json @default("{}")
+
+ // Indexable field for any count based analytical measures like page order clicking, tutorial step completion, etc.
+ dataIndex String?
+
+ @@index([userId, type], name: "analyticsDetails")
+ @@index([type])
+}
+
+model AnalyticsMetrics {
+ id String @id @default(dbgenerated("gen_random_uuid()")) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ // Analytics Categorical data used for filtering (indexable w and w/o userId)
+ analyticMetric String
+ // Any numeric data that should be counted upon, summed, or otherwise aggregated.
+ value Float
+ // Any string data that should be used to identify the metric as distinct.
+ // ex: '/build' vs '/market'
+ dataString String?
+
+ // Link to User model
+ userId String @db.Uuid
+ User User @relation(fields: [userId], references: [id], onDelete: Cascade)
+}
+
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+//////// ACCOUNTING AND CREDIT SYSTEM TABLES //////////
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+
+enum UserBlockCreditType {
+ TOP_UP
+ USAGE
+}
+
+model UserBlockCredit {
+ transactionKey String @default(uuid())
+ createdAt DateTime @default(now())
+
+ userId String @db.Uuid
+ User User @relation(fields: [userId], references: [id], onDelete: Cascade)
+
+ blockId String? @db.Uuid
+ Block AgentBlock? @relation(fields: [blockId], references: [id])
+
+ // We need to be able to associate a credit transaction with an agent
+ executedAgentId String? @db.Uuid
+ executedAgentVersion Int?
+ ExecutedAgent Agent? @relation(fields: [executedAgentId, executedAgentVersion], references: [id, version])
+
+ // We need to be able to associate a cost with a specific agent execution
+ agentNodeExecutionId String? @db.Uuid
+ AgentNodeExecution AgentNodeExecution? @relation(fields: [agentNodeExecutionId], references: [id])
+
+ amount Int
+ type UserBlockCreditType
+
+ isActive Boolean @default(true)
+ metadata Json @default("{}")
+
+ @@id(name: "creditTransactionIdentifier", [transactionKey, userId])
+}
+
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+////////////// Store TABLES ///////////////////////////
+////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////
+
+model Profile {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ // Only 1 of user or group can be set.
+ // The user this profile belongs to, if any.
+ userId String? @db.Uuid
+ User User? @relation(fields: [userId], references: [id], onDelete: Cascade)
+
+ // The group this profile belongs to, if any.
+ groupId String? @db.Uuid
+ Group UserGroup? @relation(fields: [groupId], references: [id])
+
+ username String @unique
+ description String
+
+ links String[]
+
+ avatarUrl String?
+
+ @@index([username])
+}
+
+model StoreListing {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ isDeleted Boolean @default(false)
+ // Not needed but makes lookups faster
+ isApproved Boolean @default(false)
+
+ // The agent link here is only so we can do lookup on agentId, for the listing the StoreListingVersion is used.
+ agentId String @db.Uuid
+ agentVersion Int
+ Agent Agent @relation(fields: [agentId, agentVersion], references: [id, version], onDelete: Cascade)
+
+ owningUserId String @db.Uuid
+ OwningUser User @relation(fields: [owningUserId], references: [id])
+
+ isGroupListing Boolean @default(false)
+ owningGroupId String? @db.Uuid
+ OwningGroup UserGroup? @relation(fields: [owningGroupId], references: [id])
+
+ StoreListingVersions StoreListingVersion[]
+ StoreListingSubmission StoreListingSubmission[]
+
+ @@index([isApproved])
+ @@index([agentId])
+ @@index([owningUserId])
+ @@index([owningGroupId])
+}
+
+model StoreListingVersion {
+ id String @id @default(uuid()) @db.Uuid
+ version Int @default(1)
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ // The agent and version to be listed on the store
+ agentId String @db.Uuid
+ agentVersion Int
+ Agent Agent @relation(fields: [agentId, agentVersion], references: [id, version])
+
+ // The detials for this version of the agent, this allows the author to update the details of the agent,
+ // But still allow using old versions of the agent with there original details.
+ // TODO: Create a database view that shows only the latest version of each store listing.
+ slug String
+ name String
+ videoUrl String?
+ imageUrls String[]
+ description String
+ categories String[]
+
+ isFeatured Boolean @default(false)
+
+ isDeleted Boolean @default(false)
+ // Old versions can be made unavailable by the author if desired
+ isAvailable Boolean @default(true)
+ // Not needed but makes lookups faster
+ isApproved Boolean @default(false)
+ StoreListing StoreListing? @relation(fields: [storeListingId], references: [id], onDelete: Cascade)
+ storeListingId String? @db.Uuid
+ StoreListingSubmission StoreListingSubmission[]
+
+ // Reviews are on a specific version, but then aggregated up to the listing.
+ // This allows us to provide a review filter to current version of the agent.
+ StoreListingReview StoreListingReview[]
+
+ @@unique([agentId, agentVersion])
+ @@index([agentId, agentVersion, isApproved])
+}
+
+model StoreListingReview {
+ id String @id @default(dbgenerated("gen_random_uuid()")) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ storeListingVersionId String @db.Uuid
+ StoreListingVersion StoreListingVersion @relation(fields: [storeListingVersionId], references: [id], onDelete: Cascade)
+
+ reviewByUserId String @db.Uuid
+ ReviewByUser User @relation(fields: [reviewByUserId], references: [id])
+
+ score Int
+ comments String?
+}
+
+enum SubmissionStatus {
+ DAFT
+ PENDING
+ APPROVED
+ REJECTED
+}
+
+model StoreListingSubmission {
+ id String @id @default(uuid()) @db.Uuid
+ createdAt DateTime @default(now())
+ updatedAt DateTime @default(now()) @updatedAt
+
+ storeListingId String @db.Uuid
+ StoreListing StoreListing @relation(fields: [storeListingId], references: [id], onDelete: Cascade)
+
+ storeListingVersionId String @db.Uuid
+ StoreListingVersion StoreListingVersion @relation(fields: [storeListingVersionId], references: [id], onDelete: Cascade)
+
+ reviewerId String @db.Uuid
+ Reviewer User @relation(fields: [reviewerId], references: [id])
+
+ Status SubmissionStatus @default(PENDING)
+ reviewComments String?
+
+ @@index([storeListingId])
+ @@index([Status])
+}
diff --git a/autogpt_platform/backend/test/__init__.py b/autogpt_platform/backend/test/__init__.py
index e69de29bb2..d10438719d 100644
--- a/autogpt_platform/backend/test/__init__.py
+++ b/autogpt_platform/backend/test/__init__.py
@@ -0,0 +1,3 @@
+import os
+
+os.environ["ENABLE_AUTH"] = "false"
diff --git a/autogpt_platform/backend/test/block/test_block.py b/autogpt_platform/backend/test/block/test_block.py
index be16a0b1a7..48d2616f61 100644
--- a/autogpt_platform/backend/test/block/test_block.py
+++ b/autogpt_platform/backend/test/block/test_block.py
@@ -1,3 +1,5 @@
+from typing import Type
+
import pytest
from backend.data.block import Block, get_blocks
@@ -5,5 +7,5 @@ from backend.util.test import execute_block_test
@pytest.mark.parametrize("block", get_blocks().values(), ids=lambda b: b.name)
-def test_available_blocks(block: Block):
- execute_block_test(type(block)())
+def test_available_blocks(block: Type[Block]):
+ execute_block_test(block())
diff --git a/autogpt_platform/backend/test/conftest.py b/autogpt_platform/backend/test/conftest.py
index b0b5c6cc68..59d6f70cf9 100644
--- a/autogpt_platform/backend/test/conftest.py
+++ b/autogpt_platform/backend/test/conftest.py
@@ -7,3 +7,28 @@ from backend.util.test import SpinTestServer
async def server():
async with SpinTestServer() as server:
yield server
+
+
+@pytest.fixture(scope="session", autouse=True)
+async def graph_cleanup(server):
+ created_graph_ids = []
+ original_create_graph = server.agent_server.create_graph
+
+ async def create_graph_wrapper(*args, **kwargs):
+ created_graph = await original_create_graph(*args, **kwargs)
+ # Extract user_id correctly
+ user_id = kwargs.get("user_id", args[2] if len(args) > 2 else None)
+ created_graph_ids.append((created_graph.id, user_id))
+ return created_graph
+
+ try:
+ server.agent_server.create_graph = create_graph_wrapper
+ yield # This runs the test function
+ finally:
+ server.agent_server.create_graph = original_create_graph
+
+ # Delete the created graphs and assert they were deleted
+ for graph_id, user_id in created_graph_ids:
+ resp = await server.agent_server.delete_graph(graph_id, user_id)
+ num_deleted = resp["version_counts"]
+ assert num_deleted > 0, f"Graph {graph_id} was not deleted."
diff --git a/autogpt_platform/backend/test/data/test_credit.py b/autogpt_platform/backend/test/data/test_credit.py
index ea92d1759d..fe8fbde81c 100644
--- a/autogpt_platform/backend/test/data/test_credit.py
+++ b/autogpt_platform/backend/test/data/test_credit.py
@@ -19,7 +19,7 @@ async def test_block_credit_usage(server: SpinTestServer):
spending_amount_1 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
- AITextGeneratorBlock(),
+ AITextGeneratorBlock().id,
{"model": "gpt-4-turbo"},
0.0,
0.0,
@@ -30,7 +30,7 @@ async def test_block_credit_usage(server: SpinTestServer):
spending_amount_2 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
- AITextGeneratorBlock(),
+ AITextGeneratorBlock().id,
{"model": "gpt-4-turbo", "api_key": "owned_api_key"},
0.0,
0.0,
diff --git a/autogpt_platform/backend/test/executor/test_manager.py b/autogpt_platform/backend/test/executor/test_manager.py
index 66fb202240..5f5c3c3583 100644
--- a/autogpt_platform/backend/test/executor/test_manager.py
+++ b/autogpt_platform/backend/test/executor/test_manager.py
@@ -4,11 +4,16 @@ from prisma.models import User
from backend.blocks.basic import FindInDictionaryBlock, StoreValueBlock
from backend.blocks.maths import CalculatorBlock, Operation
from backend.data import execution, graph
-from backend.server import AgentServer
+from backend.server.model import CreateGraph
+from backend.server.rest_api import AgentServer
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.test import SpinTestServer, wait_execution
+async def create_graph(s: SpinTestServer, g: graph.Graph, u: User) -> graph.Graph:
+ return await s.agent_server.create_graph(CreateGraph(graph=g), False, u.id)
+
+
async def execute_graph(
agent_server: AgentServer,
test_graph: graph.Graph,
@@ -17,7 +22,7 @@ async def execute_graph(
num_execs: int = 4,
) -> str:
# --- Test adding new executions --- #
- response = await agent_server.execute_graph(test_graph.id, input_data, test_user.id)
+ response = agent_server.execute_graph(test_graph.id, input_data, test_user.id)
graph_exec_id = response["id"]
# Execution queue should be empty
@@ -99,9 +104,8 @@ async def assert_sample_graph_executions(
@pytest.mark.asyncio(scope="session")
async def test_agent_execution(server: SpinTestServer):
- test_graph = create_test_graph()
test_user = await create_test_user()
- await graph.create_graph(test_graph, user_id=test_user.id)
+ test_graph = await create_graph(server, create_test_graph(), test_user)
data = {"input_1": "Hello", "input_2": "World"}
graph_exec_id = await execute_graph(
server.agent_server,
@@ -163,7 +167,7 @@ async def test_input_pin_always_waited(server: SpinTestServer):
links=links,
)
test_user = await create_test_user()
- test_graph = await graph.create_graph(test_graph, user_id=test_user.id)
+ test_graph = await create_graph(server, test_graph, test_user)
graph_exec_id = await execute_graph(
server.agent_server, test_graph, test_user, {}, 3
)
@@ -244,7 +248,7 @@ async def test_static_input_link_on_graph(server: SpinTestServer):
links=links,
)
test_user = await create_test_user()
- test_graph = await graph.create_graph(test_graph, user_id=test_user.id)
+ test_graph = await create_graph(server, test_graph, test_user)
graph_exec_id = await execute_graph(
server.agent_server, test_graph, test_user, {}, 8
)
diff --git a/autogpt_platform/backend/test/executor/test_scheduler.py b/autogpt_platform/backend/test/executor/test_scheduler.py
index 6c46110776..49e46510a1 100644
--- a/autogpt_platform/backend/test/executor/test_scheduler.py
+++ b/autogpt_platform/backend/test/executor/test_scheduler.py
@@ -1,10 +1,10 @@
import pytest
-from backend.data import db, graph
+from backend.data import db
from backend.executor import ExecutionScheduler
+from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
-from backend.util.settings import Config
from backend.util.test import SpinTestServer
@@ -12,12 +12,13 @@ from backend.util.test import SpinTestServer
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
- test_graph = await graph.create_graph(create_test_graph(), user_id=test_user.id)
-
- scheduler = get_service_client(
- ExecutionScheduler, Config().execution_scheduler_port
+ test_graph = await server.agent_server.create_graph(
+ create_graph=CreateGraph(graph=create_test_graph()),
+ is_template=False,
+ user_id=test_user.id,
)
+ scheduler = get_service_client(ExecutionScheduler)
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
diff --git a/autogpt_platform/backend/test/util/test_service.py b/autogpt_platform/backend/test/util/test_service.py
index f56cbed287..a20810dbb1 100644
--- a/autogpt_platform/backend/test/util/test_service.py
+++ b/autogpt_platform/backend/test/util/test_service.py
@@ -2,14 +2,16 @@ import pytest
from backend.util.service import AppService, expose, get_service_client
+TEST_SERVICE_PORT = 8765
-class TestService(AppService):
+
+class ServiceTest(AppService):
def __init__(self):
- super().__init__(port=8005)
- self.use_redis = False
+ super().__init__()
- def run_service(self):
- super().run_service()
+ @classmethod
+ def get_port(cls) -> int:
+ return TEST_SERVICE_PORT
@expose
def add(self, a: int, b: int) -> int:
@@ -29,8 +31,8 @@ class TestService(AppService):
@pytest.mark.asyncio(scope="session")
async def test_service_creation(server):
- with TestService():
- client = get_service_client(TestService, 8005)
+ with ServiceTest():
+ client = get_service_client(ServiceTest)
assert client.add(5, 3) == 8
assert client.subtract(10, 4) == 6
assert client.fun_with_async(5, 3) == 8
diff --git a/autogpt_platform/backend/test/util/test_type.py b/autogpt_platform/backend/test/util/test_type.py
index a2a7b2bfab..f9a14d10a0 100644
--- a/autogpt_platform/backend/test/util/test_type.py
+++ b/autogpt_platform/backend/test/util/test_type.py
@@ -27,5 +27,6 @@ def test_type_conversion():
from typing import List
- # assert convert("5", List[int]) == [5]
+ assert convert("5", List[int]) == [5]
assert convert("[5,4,2]", List[int]) == [5, 4, 2]
+ assert convert([5, 4, 2], List[str]) == ["5", "4", "2"]
diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml
index 8f0fda2aff..e2a24fa51f 100644
--- a/autogpt_platform/docker-compose.platform.yml
+++ b/autogpt_platform/docker-compose.platform.yml
@@ -8,7 +8,7 @@ services:
develop:
watch:
- path: ./
- target: autogpt_platform/backend/migrate
+ target: autogpt_platform/backend/migrations
action: rebuild
depends_on:
db:
@@ -66,6 +66,7 @@ services:
- ENABLE_AUTH=true
- PYRO_HOST=0.0.0.0
- EXECUTIONMANAGER_HOST=executor
+ - DATABASEMANAGER_HOST=executor
- FRONTEND_BASE_URL=http://localhost:3000
- BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
ports:
@@ -103,6 +104,7 @@ services:
- ENABLE_AUTH=true
- PYRO_HOST=0.0.0.0
- AGENTSERVER_HOST=rest_server
+ - DATABASEMANAGER_HOST=0.0.0.0
ports:
- "8002:8000"
networks:
@@ -207,6 +209,7 @@ services:
# - NEXT_PUBLIC_AGPT_SERVER_URL=http://localhost:8006/api
# - NEXT_PUBLIC_AGPT_WS_SERVER_URL=ws://localhost:8001/ws
# - NEXT_PUBLIC_AGPT_MARKETPLACE_URL=http://localhost:8015/api/v1/market
+# - NEXT_PUBLIC_BEHAVE_AS=LOCAL
# ports:
# - "3000:3000"
# networks:
diff --git a/autogpt_platform/docker-compose.yml b/autogpt_platform/docker-compose.yml
index be6f1f49ed..a1ae16b3ea 100644
--- a/autogpt_platform/docker-compose.yml
+++ b/autogpt_platform/docker-compose.yml
@@ -142,3 +142,24 @@ services:
extends:
file: ./supabase/docker/docker-compose.yml
service: vector
+
+ deps:
+ <<: *supabase-services
+ profiles:
+ - local
+ image: busybox
+ command: /bin/true
+ depends_on:
+ - studio
+ - kong
+ - auth
+ - rest
+ - realtime
+ - storage
+ - imgproxy
+ - meta
+ - functions
+ - analytics
+ - db
+ - vector
+ - redis
diff --git a/autogpt_platform/frontend/.env.example b/autogpt_platform/frontend/.env.example
index 34309245e3..0fe0b75402 100644
--- a/autogpt_platform/frontend/.env.example
+++ b/autogpt_platform/frontend/.env.example
@@ -13,3 +13,6 @@ NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAic
## Only used if you're using Supabase and OAuth
AUTH_CALLBACK_URL=http://localhost:3000/auth/callback
GA_MEASUREMENT_ID=G-FH2XK2W4GN
+
+# When running locally, set NEXT_PUBLIC_BEHAVE_AS=CLOUD to use the a locally hosted marketplace (as is typical in development, and the cloud deployment), otherwise set it to LOCAL to have the marketplace open in a new tab
+NEXT_PUBLIC_BEHAVE_AS=LOCAL
\ No newline at end of file
diff --git a/autogpt_platform/frontend/.eslintrc.json b/autogpt_platform/frontend/.eslintrc.json
index bffb357a71..bb8b1c099d 100644
--- a/autogpt_platform/frontend/.eslintrc.json
+++ b/autogpt_platform/frontend/.eslintrc.json
@@ -1,3 +1,3 @@
{
- "extends": "next/core-web-vitals"
+ "extends": ["next/core-web-vitals", "plugin:storybook/recommended"]
}
diff --git a/autogpt_platform/frontend/.gitignore b/autogpt_platform/frontend/.gitignore
index cfe0cde0bb..2611438bd9 100644
--- a/autogpt_platform/frontend/.gitignore
+++ b/autogpt_platform/frontend/.gitignore
@@ -42,3 +42,6 @@ node_modules/
/playwright-report/
/blob-report/
/playwright/.cache/
+
+*storybook.log
+storybook-static
diff --git a/autogpt_platform/frontend/.storybook/main.ts b/autogpt_platform/frontend/.storybook/main.ts
new file mode 100644
index 0000000000..d6f94d7958
--- /dev/null
+++ b/autogpt_platform/frontend/.storybook/main.ts
@@ -0,0 +1,18 @@
+import type { StorybookConfig } from "@storybook/nextjs";
+
+const config: StorybookConfig = {
+ stories: ["../src/**/*.mdx", "../src/**/*.stories.@(js|jsx|mjs|ts|tsx)"],
+ addons: [
+ "@storybook/addon-onboarding",
+ "@storybook/addon-links",
+ "@storybook/addon-essentials",
+ "@chromatic-com/storybook",
+ "@storybook/addon-interactions",
+ ],
+ framework: {
+ name: "@storybook/nextjs",
+ options: {},
+ },
+ staticDirs: ["../public"],
+};
+export default config;
diff --git a/autogpt_platform/frontend/.storybook/preview.ts b/autogpt_platform/frontend/.storybook/preview.ts
new file mode 100644
index 0000000000..2c5c6ea845
--- /dev/null
+++ b/autogpt_platform/frontend/.storybook/preview.ts
@@ -0,0 +1,15 @@
+import type { Preview } from "@storybook/react";
+import "../src/app/globals.css";
+
+const preview: Preview = {
+ parameters: {
+ controls: {
+ matchers: {
+ color: /(background|color)$/i,
+ date: /Date$/i,
+ },
+ },
+ },
+};
+
+export default preview;
diff --git a/autogpt_platform/frontend/Dockerfile b/autogpt_platform/frontend/Dockerfile
index 70600723d2..7e55378367 100644
--- a/autogpt_platform/frontend/Dockerfile
+++ b/autogpt_platform/frontend/Dockerfile
@@ -14,7 +14,7 @@ CMD ["yarn", "run", "dev"]
# Build stage for prod
FROM base AS build
COPY autogpt_platform/frontend/ .
-RUN npm run build
+RUN yarn build
# Prod stage
FROM node:21-alpine AS prod
@@ -29,4 +29,4 @@ COPY --from=build /app/public ./public
COPY --from=build /app/next.config.mjs ./next.config.mjs
EXPOSE 3000
-CMD ["npm", "start"]
+CMD ["yarn", "start"]
diff --git a/autogpt_platform/frontend/README.md b/autogpt_platform/frontend/README.md
index c7fe91a26a..a89d28f406 100644
--- a/autogpt_platform/frontend/README.md
+++ b/autogpt_platform/frontend/README.md
@@ -39,3 +39,50 @@ This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-opti
## Deploy
TODO
+
+## Storybook
+
+Storybook is a powerful development environment for UI components. It allows you to build UI components in isolation, making it easier to develop, test, and document your components independently from your main application.
+
+### Purpose in the Development Process
+
+1. **Component Development**: Develop and test UI components in isolation.
+2. **Visual Testing**: Easily spot visual regressions.
+3. **Documentation**: Automatically document components and their props.
+4. **Collaboration**: Share components with your team or stakeholders for feedback.
+
+### How to Use Storybook
+
+1. **Start Storybook**:
+ Run the following command to start the Storybook development server:
+
+ ```bash
+ npm run storybook
+ ```
+
+ This will start Storybook on port 6006. Open [http://localhost:6006](http://localhost:6006) in your browser to view your component library.
+
+2. **Build Storybook**:
+ To build a static version of Storybook for deployment, use:
+
+ ```bash
+ npm run build-storybook
+ ```
+
+3. **Running Storybook Tests**:
+ Storybook tests can be run using:
+
+ ```bash
+ npm run test-storybook
+ ```
+
+ For CI environments, use:
+
+ ```bash
+ npm run test-storybook:ci
+ ```
+
+4. **Writing Stories**:
+ Create `.stories.tsx` files alongside your components to define different states and variations of your components.
+
+By integrating Storybook into our development workflow, we can streamline UI development, improve component reusability, and maintain a consistent design system across the project.
diff --git a/autogpt_platform/frontend/next.config.mjs b/autogpt_platform/frontend/next.config.mjs
index 2204c43a80..7dd19b5267 100644
--- a/autogpt_platform/frontend/next.config.mjs
+++ b/autogpt_platform/frontend/next.config.mjs
@@ -1,16 +1,7 @@
import { withSentryConfig } from "@sentry/nextjs";
-import dotenv from "dotenv";
-
-// Load environment variables
-dotenv.config();
/** @type {import('next').NextConfig} */
const nextConfig = {
- env: {
- NEXT_PUBLIC_AGPT_SERVER_URL: process.env.NEXT_PUBLIC_AGPT_SERVER_URL,
- NEXT_PUBLIC_AGPT_MARKETPLACE_URL:
- process.env.NEXT_PUBLIC_AGPT_MARKETPLACE_URL,
- },
images: {
domains: ["images.unsplash.com"],
},
diff --git a/autogpt_platform/frontend/package.json b/autogpt_platform/frontend/package.json
index 0945f7fb8f..a6488f74f5 100644
--- a/autogpt_platform/frontend/package.json
+++ b/autogpt_platform/frontend/package.json
@@ -8,11 +8,15 @@
"dev:test": "export NODE_ENV=test && next dev",
"build": "next build",
"start": "next start",
- "lint": "next lint",
+ "lint": "next lint && prettier --check .",
"format": "prettier --write .",
"test": "playwright test",
"test-ui": "playwright test --ui",
- "gentests": "playwright codegen http://localhost:3000"
+ "gentests": "playwright codegen http://localhost:3000",
+ "storybook": "storybook dev -p 6006",
+ "build-storybook": "storybook build",
+ "test-storybook": "test-storybook",
+ "test-storybook:ci": "concurrently -k -s first -n \"SB,TEST\" -c \"magenta,blue\" \"npm run build-storybook -- --quiet && npx http-server storybook-static --port 6006 --silent\" \"wait-on tcp:6006 && npm run test-storybook\""
},
"browserslist": [
"defaults"
@@ -23,6 +27,7 @@
"@radix-ui/react-avatar": "^1.1.0",
"@radix-ui/react-checkbox": "^1.1.1",
"@radix-ui/react-collapsible": "^1.1.0",
+ "@radix-ui/react-context-menu": "^2.2.1",
"@radix-ui/react-dialog": "^1.1.1",
"@radix-ui/react-dropdown-menu": "^2.1.1",
"@radix-ui/react-icons": "^1.3.0",
@@ -39,7 +44,7 @@
"@supabase/ssr": "^0.4.0",
"@supabase/supabase-js": "^2.45.0",
"@tanstack/react-table": "^8.20.5",
- "@xyflow/react": "^12.1.0",
+ "@xyflow/react": "^12.3.1",
"ajv": "^8.17.1",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
@@ -48,10 +53,10 @@
"dotenv": "^16.4.5",
"lucide-react": "^0.407.0",
"moment": "^2.30.1",
- "next": "14.2.4",
+ "next": "^14.2.13",
"next-themes": "^0.3.0",
"react": "^18",
- "react-day-picker": "^8.10.1",
+ "react-day-picker": "^9.2.0",
"react-dom": "^18",
"react-hook-form": "^7.52.1",
"react-icons": "^5.3.0",
@@ -65,17 +70,31 @@
"zod": "^3.23.8"
},
"devDependencies": {
+ "@chromatic-com/storybook": "^1.9.0",
"@playwright/test": "^1.47.1",
- "@types/node": "^20",
+ "@storybook/addon-essentials": "^8.3.5",
+ "@storybook/addon-interactions": "^8.3.5",
+ "@storybook/addon-links": "^8.3.5",
+ "@storybook/addon-onboarding": "^8.3.5",
+ "@storybook/blocks": "^8.3.5",
+ "@storybook/nextjs": "^8.3.5",
+ "@storybook/react": "^8.3.5",
+ "@storybook/test": "^8.3.5",
+ "@storybook/test-runner": "^0.19.1",
+ "@types/node": "^22.7.3",
"@types/react": "^18",
"@types/react-dom": "^18",
"@types/react-modal": "^3.16.3",
+ "concurrently": "^9.0.1",
"eslint": "^8",
"eslint-config-next": "14.2.4",
+ "eslint-plugin-storybook": "^0.9.0",
"postcss": "^8",
"prettier": "^3.3.3",
"prettier-plugin-tailwindcss": "^0.6.6",
+ "storybook": "^8.3.5",
"tailwindcss": "^3.4.1",
"typescript": "^5"
- }
+ },
+ "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e"
}
diff --git a/autogpt_platform/frontend/src/app/admin/marketplace/page.tsx b/autogpt_platform/frontend/src/app/admin/marketplace/page.tsx
index 7eb82d7d32..00fce10789 100644
--- a/autogpt_platform/frontend/src/app/admin/marketplace/page.tsx
+++ b/autogpt_platform/frontend/src/app/admin/marketplace/page.tsx
@@ -10,7 +10,7 @@ async function AdminMarketplace() {
return (
<>
-
+
>
diff --git a/autogpt_platform/frontend/src/app/auth/callback/route.ts b/autogpt_platform/frontend/src/app/auth/callback/route.ts
index c813415239..5d3e1e9536 100644
--- a/autogpt_platform/frontend/src/app/auth/callback/route.ts
+++ b/autogpt_platform/frontend/src/app/auth/callback/route.ts
@@ -6,7 +6,7 @@ export async function GET(request: Request) {
const { searchParams, origin } = new URL(request.url);
const code = searchParams.get("code");
// if "next" is in param, use it as the redirect URL
- const next = searchParams.get("next") ?? "/profile";
+ const next = searchParams.get("next") ?? "/";
if (code) {
const supabase = createServerClient();
diff --git a/autogpt_platform/frontend/src/app/auth/integrations/oauth_callback/route.ts b/autogpt_platform/frontend/src/app/auth/integrations/oauth_callback/route.ts
index 6ec06a46ef..5d4100d48e 100644
--- a/autogpt_platform/frontend/src/app/auth/integrations/oauth_callback/route.ts
+++ b/autogpt_platform/frontend/src/app/auth/integrations/oauth_callback/route.ts
@@ -9,7 +9,8 @@ export async function GET(request: Request) {
const code = searchParams.get("code");
const state = searchParams.get("state");
- // Send message from popup window to host window
+ console.debug("OAuth callback received:", { code, state });
+
const message: OAuthPopupResultMessage =
code && state
? { message_type: "oauth_popup_result", success: true, code, state }
@@ -19,13 +20,15 @@ export async function GET(request: Request) {
message: `Incomplete query: ${searchParams.toString()}`,
};
+ console.debug("Sending message to opener:", message);
+
// Return a response with the message as JSON and a script to close the window
return new NextResponse(
`
diff --git a/autogpt_platform/frontend/src/app/build/page.tsx b/autogpt_platform/frontend/src/app/build/page.tsx
index 6d4c928c8d..6b5ea497e2 100644
--- a/autogpt_platform/frontend/src/app/build/page.tsx
+++ b/autogpt_platform/frontend/src/app/build/page.tsx
@@ -8,7 +8,7 @@ export default function Home() {
return (
diff --git a/autogpt_platform/frontend/src/app/globals.css b/autogpt_platform/frontend/src/app/globals.css
index 130a1e630b..7930a00b3c 100644
--- a/autogpt_platform/frontend/src/app/globals.css
+++ b/autogpt_platform/frontend/src/app/globals.css
@@ -27,7 +27,7 @@
--destructive: 0 84.2% 60.2%;
--destructive-foreground: 0 0% 98%;
--border: 240 5.9% 90%;
- --input: 240 5.9% 90%;
+ --input: 240 5.9% 85%;
--ring: 240 5.9% 10%;
--radius: 0.5rem;
--chart-1: 12 76% 61%;
@@ -72,4 +72,12 @@
body {
@apply bg-background text-foreground;
}
+
+ .agpt-border-input {
+ @apply border-input focus-visible:border-gray-400 focus-visible:outline-none;
+ }
+
+ .agpt-shadow-input {
+ @apply shadow-sm focus-visible:shadow-md;
+ }
}
diff --git a/autogpt_platform/frontend/src/app/layout.tsx b/autogpt_platform/frontend/src/app/layout.tsx
index 49ba9c7ac5..d175adc210 100644
--- a/autogpt_platform/frontend/src/app/layout.tsx
+++ b/autogpt_platform/frontend/src/app/layout.tsx
@@ -34,7 +34,7 @@ export default function RootLayout({
>
- {children}
+ {children}
diff --git a/autogpt_platform/frontend/src/app/login/actions.ts b/autogpt_platform/frontend/src/app/login/actions.ts
index b0f4a59bf1..131fb9de89 100644
--- a/autogpt_platform/frontend/src/app/login/actions.ts
+++ b/autogpt_platform/frontend/src/app/login/actions.ts
@@ -22,6 +22,11 @@ export async function login(values: z.infer) {
const { data, error } = await supabase.auth.signInWithPassword(values);
if (error) {
+ if (error.status == 400) {
+ // Hence User is not present
+ redirect("/signup");
+ }
+
return error.message;
}
@@ -30,38 +35,6 @@ export async function login(values: z.infer) {
}
revalidatePath("/", "layout");
- redirect("/profile");
+ redirect("/");
});
}
-
-export async function signup(values: z.infer) {
- "use server";
- return await Sentry.withServerActionInstrumentation(
- "signup",
- {},
- async () => {
- const supabase = createServerClient();
-
- if (!supabase) {
- redirect("/error");
- }
-
- // We are sure that the values are of the correct type because zod validates the form
- const { data, error } = await supabase.auth.signUp(values);
-
- if (error) {
- if (error.message.includes("P0001")) {
- return "Please join our waitlist for your turn: https://agpt.co/waitlist";
- }
- return error.message;
- }
-
- if (data.session) {
- await supabase.auth.setSession(data.session);
- }
-
- revalidatePath("/", "layout");
- redirect("/profile");
- },
- );
-}
diff --git a/autogpt_platform/frontend/src/app/login/page.tsx b/autogpt_platform/frontend/src/app/login/page.tsx
index b21dd95e5c..1c2f3c28e8 100644
--- a/autogpt_platform/frontend/src/app/login/page.tsx
+++ b/autogpt_platform/frontend/src/app/login/page.tsx
@@ -27,7 +27,7 @@ const loginFormSchema = z.object({
email: z.string().email().min(2).max(64),
password: z.string().min(6).max(64),
agreeToTerms: z.boolean().refine((value) => value === true, {
- message: "You must agree to the Terms of Service and Privacy Policy",
+ message: "You must agree to the Terms of Use and Privacy Policy",
}),
});
@@ -48,8 +48,8 @@ export default function LoginPage() {
});
if (user) {
- console.log("User exists, redirecting to profile");
- router.push("/profile");
+ console.log("User exists, redirecting to home");
+ router.push("/");
}
if (isUserLoading || isSupabaseLoading || user) {
@@ -98,23 +98,11 @@ export default function LoginPage() {
setFeedback(null);
};
- const onSignup = async (data: z.infer) => {
- if (await form.trigger()) {
- setIsLoading(true);
- const error = await signup(data);
- setIsLoading(false);
- if (error) {
- setFeedback(error);
- return;
- }
- setFeedback(null);
- }
- };
-
return (
-
+
Log in to your Account
+ {/*
-
+
*/}