mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-13 09:08:02 -05:00
Compare commits
2 Commits
builder-re
...
docker-qol
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52de22469f | ||
|
|
c27f163623 |
@@ -1,18 +0,0 @@
|
||||
version = 1
|
||||
|
||||
test_patterns = ["**/*.spec.ts","**/*_test.py","**/*_tests.py","**/test_*.py"]
|
||||
|
||||
exclude_patterns = ["classic/**"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "javascript"
|
||||
|
||||
[analyzers.meta]
|
||||
plugins = ["react"]
|
||||
environment = ["nodejs"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "python"
|
||||
|
||||
[analyzers.meta]
|
||||
runtime_version = "3.x.x"
|
||||
@@ -1,61 +1,40 @@
|
||||
# Ignore everything by default, selectively add things to context
|
||||
*
|
||||
classic/run
|
||||
|
||||
# Platform - Libs
|
||||
!autogpt_platform/autogpt_libs/autogpt_libs/
|
||||
!autogpt_platform/autogpt_libs/pyproject.toml
|
||||
!autogpt_platform/autogpt_libs/poetry.lock
|
||||
!autogpt_platform/autogpt_libs/README.md
|
||||
|
||||
# Platform - Backend
|
||||
!autogpt_platform/backend/backend/
|
||||
!autogpt_platform/backend/migrations/
|
||||
!autogpt_platform/backend/schema.prisma
|
||||
!autogpt_platform/backend/pyproject.toml
|
||||
!autogpt_platform/backend/poetry.lock
|
||||
!autogpt_platform/backend/README.md
|
||||
|
||||
# Platform - Market
|
||||
!autogpt_platform/market/market/
|
||||
!autogpt_platform/market/scripts.py
|
||||
!autogpt_platform/market/schema.prisma
|
||||
!autogpt_platform/market/pyproject.toml
|
||||
!autogpt_platform/market/poetry.lock
|
||||
!autogpt_platform/market/README.md
|
||||
|
||||
# Platform - Frontend
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/yarn.lock
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
!autogpt_platform/frontend/README.md
|
||||
## config
|
||||
!autogpt_platform/frontend/*.config.*
|
||||
!autogpt_platform/frontend/.env.*
|
||||
|
||||
# Classic - AutoGPT
|
||||
# AutoGPT
|
||||
!classic/original_autogpt/autogpt/
|
||||
!classic/original_autogpt/pyproject.toml
|
||||
!classic/original_autogpt/poetry.lock
|
||||
!classic/original_autogpt/README.md
|
||||
!classic/original_autogpt/tests/
|
||||
|
||||
# Classic - Benchmark
|
||||
# Benchmark
|
||||
!classic/benchmark/agbenchmark/
|
||||
!classic/benchmark/pyproject.toml
|
||||
!classic/benchmark/poetry.lock
|
||||
!classic/benchmark/README.md
|
||||
|
||||
# Classic - Forge
|
||||
# Forge
|
||||
!classic/forge/
|
||||
!classic/forge/pyproject.toml
|
||||
!classic/forge/poetry.lock
|
||||
!classic/forge/README.md
|
||||
|
||||
# Classic - Frontend
|
||||
# Frontend
|
||||
!classic/frontend/build/web/
|
||||
|
||||
# Platform
|
||||
!autogpt_platform/
|
||||
|
||||
# Explicitly re-ignore some folders
|
||||
.*
|
||||
**/__pycache__
|
||||
|
||||
autogpt_platform/frontend/.next/
|
||||
autogpt_platform/frontend/node_modules
|
||||
autogpt_platform/frontend/.env.example
|
||||
autogpt_platform/frontend/.env.local
|
||||
autogpt_platform/backend/.env
|
||||
autogpt_platform/backend/.venv/
|
||||
|
||||
autogpt_platform/market/.env
|
||||
|
||||
52
.github/PULL_REQUEST_TEMPLATE.md
vendored
52
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,38 +1,36 @@
|
||||
### Background
|
||||
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Changes 🏗️
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### Checklist 📋
|
||||
|
||||
#### For code changes:
|
||||
- [ ] I have clearly listed my changes in the PR description
|
||||
- [ ] I have made a test plan
|
||||
- [ ] I have tested my changes according to the test plan:
|
||||
<!-- Put your test plan here: -->
|
||||
- [ ] ...
|
||||
### Testing 🔍
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in autogpt_platform/
|
||||
|
||||
<details>
|
||||
<summary>Example test plan</summary>
|
||||
|
||||
- [ ] Create from scratch and execute an agent with at least 3 blocks
|
||||
- [ ] Import an agent from file upload, and confirm it executes correctly
|
||||
- [ ] Upload agent to marketplace
|
||||
- [ ] Import an agent from marketplace and confirm it executes correctly
|
||||
- [ ] Edit an agent from monitor, and confirm it executes correctly
|
||||
</details>
|
||||
<!--
|
||||
Please make sure your changes have been tested and are in good working condition.
|
||||
Here is a list of our critical paths, if you need some inspiration on what and how to test:
|
||||
-->
|
||||
|
||||
#### For configuration changes:
|
||||
- [ ] `.env.example` is updated or already compatible with my changes
|
||||
- [ ] `docker-compose.yml` is updated or already compatible with my changes
|
||||
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
|
||||
- Create from scratch and execute an agent with at least 3 blocks
|
||||
- Import an agent from file upload, and confirm it executes correctly
|
||||
- Upload agent to marketplace
|
||||
- Import an agent from marketplace and confirm it executes correctly
|
||||
- Edit an agent from monitor, and confirm it executes correctly
|
||||
|
||||
<details>
|
||||
<summary>Examples of configuration changes</summary>
|
||||
### Configuration Changes 📝
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in autogpt_platform/
|
||||
|
||||
- Changing ports
|
||||
- Adding new services that need to communicate with each other
|
||||
- Secrets or environment variable changes
|
||||
- New or infrastructure changes such as databases
|
||||
</details>
|
||||
If you're making configuration or infrastructure changes, please remember to check you've updated the related infrastructure code in the autogpt_platform/infra folder.
|
||||
|
||||
Examples of such changes might include:
|
||||
|
||||
- Changing ports
|
||||
- Adding new services that need to communicate with each other
|
||||
- Secrets or environment variable changes
|
||||
- New or infrastructure changes such as databases
|
||||
|
||||
38
.github/dependabot.yml
vendored
38
.github/dependabot.yml
vendored
@@ -7,9 +7,6 @@ updates:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(libs/deps)"
|
||||
prefix-development: "chore(libs/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
@@ -29,9 +26,6 @@ updates:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(backend/deps)"
|
||||
prefix-development: "chore(backend/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
@@ -44,6 +38,7 @@ updates:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# frontend (Next.js project)
|
||||
- package-ecosystem: "npm"
|
||||
directory: "autogpt_platform/frontend"
|
||||
@@ -51,9 +46,6 @@ updates:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(frontend/deps)"
|
||||
prefix-development: "chore(frontend/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
@@ -66,6 +58,7 @@ updates:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# infra (Terraform)
|
||||
- package-ecosystem: "terraform"
|
||||
directory: "autogpt_platform/infra"
|
||||
@@ -73,10 +66,26 @@ updates:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(infra/deps)"
|
||||
prefix-development: "chore(infra/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# market (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/market"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
@@ -137,9 +146,6 @@ updates:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(platform/deps)"
|
||||
prefix-development: "chore(platform/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
@@ -160,8 +166,6 @@ updates:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(docs/deps)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
|
||||
@@ -5,7 +5,7 @@ on:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: dev
|
||||
BASE_BRANCH: development
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
@@ -15,46 +15,46 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
92
.github/workflows/classic-autogpt-docker-ci.yml
vendored
92
.github/workflows/classic-autogpt-docker-ci.yml
vendored
@@ -2,7 +2,7 @@ name: Classic - AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev]
|
||||
branches: [ master, development ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -34,58 +34,58 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -117,16 +117,16 @@ jobs:
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
|
||||
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
@@ -2,7 +2,7 @@ name: Classic - AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, edited]
|
||||
types: [ published, edited ]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -19,69 +19,69 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
24
.github/workflows/classic-benchmark-ci.yml
vendored
24
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [forge]
|
||||
agent-name: [ forge ]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
@@ -146,23 +146,23 @@ jobs:
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
# poetry run agbenchmark --mock
|
||||
poetry run agbenchmark --mock
|
||||
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
exit 1
|
||||
else
|
||||
echo "No unstaged changes."
|
||||
fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
|
||||
62
.github/workflows/classic-frontend-ci.yml
vendored
62
.github/workflows/classic-frontend-ci.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- dev
|
||||
- development
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
@@ -24,37 +24,37 @@ jobs:
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
|
||||
5
.github/workflows/codeql.yml
vendored
5
.github/workflows/codeql.yml
vendored
@@ -13,10 +13,9 @@ name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
branches: [ "master", "release-*" ]
|
||||
pull_request:
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
merge_group:
|
||||
branches: [ "master", "release-*" ]
|
||||
schedule:
|
||||
- cron: '15 4 * * 0'
|
||||
|
||||
|
||||
153
.github/workflows/platform-autgpt-deploy-prod.yml
vendored
153
.github/workflows/platform-autgpt-deploy-prod.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: AutoGPT Platform - Deploy Prod Environment
|
||||
name: AutoGPT Platform - Build, Push, and Deploy Prod Environment
|
||||
|
||||
on:
|
||||
release:
|
||||
@@ -8,6 +8,12 @@ permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
env:
|
||||
PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }}
|
||||
GKE_CLUSTER: prod-gke-cluster
|
||||
GKE_ZONE: us-central1-a
|
||||
NAMESPACE: prod-agpt
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: production
|
||||
@@ -35,15 +41,142 @@ jobs:
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
- name: Run Market Migrations
|
||||
working-directory: ./autogpt_platform/market
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
build-push-deploy:
|
||||
environment: production
|
||||
name: Build, Push, and Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_prod
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: 'projects/1021527134101/locations/global/workloadIdentityPools/prod-pool/providers/github'
|
||||
service_account: 'prod-github-actions-sa@agpt-prod.iam.gserviceaccount.com'
|
||||
token_format: 'access_token'
|
||||
create_credentials_file: true
|
||||
|
||||
- name: 'Set up Cloud SDK'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'Configure Docker'
|
||||
run: |
|
||||
gcloud auth configure-docker us-east1-docker.pkg.dev
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Check for changes
|
||||
id: check_changes
|
||||
run: |
|
||||
git fetch origin master
|
||||
BACKEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false")
|
||||
FRONTEND_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false")
|
||||
MARKET_CHANGED=$(git diff --name-only origin/master HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false")
|
||||
echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT
|
||||
echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT
|
||||
echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get GKE credentials
|
||||
uses: 'google-github-actions/get-gke-credentials@v2'
|
||||
with:
|
||||
cluster_name: ${{ env.GKE_CLUSTER }}
|
||||
location: ${{ env.GKE_ZONE }}
|
||||
|
||||
- name: Build and Push Backend
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/backend/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-prod/agpt-backend-prod/agpt-backend-prod:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Build and Push Frontend
|
||||
if: steps.check_changes.outputs.frontend_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/frontend/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-prod/agpt-frontend-prod/agpt-frontend-prod:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Build and Push Market
|
||||
if: steps.check_changes.outputs.market_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/market/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-prod/agpt-market-prod/agpt-market-prod:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- name: Deploy Backend
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-server ./autogpt-server \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-server/values.yaml \
|
||||
-f autogpt-server/values.prod.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Websocket
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-websocket-server ./autogpt-websocket-server \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-websocket-server/values.yaml \
|
||||
-f autogpt-websocket-server/values.prod.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Market
|
||||
if: steps.check_changes.outputs.market_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-market ./autogpt-market \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-market/values.yaml \
|
||||
-f autogpt-market/values.prod.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Frontend
|
||||
if: steps.check_changes.outputs.frontend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-builder ./autogpt-builder \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-builder/values.yaml \
|
||||
-f autogpt-builder/values.prod.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
name: AutoGPT Platform - Deploy Dev Environment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/**'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_dev
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
186
.github/workflows/platform-autogpt-deploy.yaml
vendored
Normal file
186
.github/workflows/platform-autogpt-deploy.yaml
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
name: AutoGPT Platform - Build, Push, and Deploy Dev Environment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/backend/**'
|
||||
- 'autogpt_platform/frontend/**'
|
||||
- 'autogpt_platform/market/**'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
env:
|
||||
PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }}
|
||||
GKE_CLUSTER: dev-gke-cluster
|
||||
GKE_ZONE: us-central1-a
|
||||
NAMESPACE: dev-agpt
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
- name: Run Market Migrations
|
||||
working-directory: ./autogpt_platform/market
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
|
||||
|
||||
build-push-deploy:
|
||||
name: Build, Push, and Deploy
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
workload_identity_provider: 'projects/638488734936/locations/global/workloadIdentityPools/dev-pool/providers/github'
|
||||
service_account: 'dev-github-actions-sa@agpt-dev.iam.gserviceaccount.com'
|
||||
token_format: 'access_token'
|
||||
create_credentials_file: true
|
||||
|
||||
- name: 'Set up Cloud SDK'
|
||||
uses: 'google-github-actions/setup-gcloud@v2'
|
||||
|
||||
- name: 'Configure Docker'
|
||||
run: |
|
||||
gcloud auth configure-docker us-east1-docker.pkg.dev
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Check for changes
|
||||
id: check_changes
|
||||
run: |
|
||||
git fetch origin dev
|
||||
BACKEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/backend/" && echo "true" || echo "false")
|
||||
FRONTEND_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/frontend/" && echo "true" || echo "false")
|
||||
MARKET_CHANGED=$(git diff --name-only origin/dev HEAD | grep "^autogpt_platform/market/" && echo "true" || echo "false")
|
||||
echo "backend_changed=$BACKEND_CHANGED" >> $GITHUB_OUTPUT
|
||||
echo "frontend_changed=$FRONTEND_CHANGED" >> $GITHUB_OUTPUT
|
||||
echo "market_changed=$MARKET_CHANGED" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get GKE credentials
|
||||
uses: 'google-github-actions/get-gke-credentials@v2'
|
||||
with:
|
||||
cluster_name: ${{ env.GKE_CLUSTER }}
|
||||
location: ${{ env.GKE_ZONE }}
|
||||
|
||||
- name: Build and Push Backend
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/backend/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-dev/agpt-backend-dev/agpt-backend-dev:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Build and Push Frontend
|
||||
if: steps.check_changes.outputs.frontend_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/frontend/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-dev/agpt-frontend-dev/agpt-frontend-dev:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Build and Push Market
|
||||
if: steps.check_changes.outputs.market_changed == 'true'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./autogpt_platform/market/Dockerfile
|
||||
push: true
|
||||
tags: us-east1-docker.pkg.dev/agpt-dev/agpt-market-dev/agpt-market-dev:${{ github.sha }}
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new,mode=max
|
||||
|
||||
- name: Move cache
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- name: Deploy Backend
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-server ./autogpt-server \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-server/values.yaml \
|
||||
-f autogpt-server/values.dev.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Websocket
|
||||
if: steps.check_changes.outputs.backend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-websocket-server ./autogpt-websocket-server \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-websocket-server/values.yaml \
|
||||
-f autogpt-websocket-server/values.dev.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Market
|
||||
if: steps.check_changes.outputs.market_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-market ./autogpt-market \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-market/values.yaml \
|
||||
-f autogpt-market/values.dev.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
|
||||
- name: Deploy Frontend
|
||||
if: steps.check_changes.outputs.frontend_changed == 'true'
|
||||
run: |
|
||||
helm upgrade autogpt-builder ./autogpt-builder \
|
||||
--namespace ${{ env.NAMESPACE }} \
|
||||
-f autogpt-builder/values.yaml \
|
||||
-f autogpt-builder/values.dev.yaml \
|
||||
--set image.tag=${{ github.sha }}
|
||||
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
Normal file
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: AutoGPT Platform - Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, dev ]
|
||||
paths:
|
||||
- '.github/workflows/platform-autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/platform-autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: TFLint
|
||||
uses: pauloconnor/tflint-action@v0.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tflint_path: terraform/
|
||||
tflint_recurse: true
|
||||
tflint_changed_only: false
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.14.4
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
29
.github/workflows/platform-backend-ci.yml
vendored
29
.github/workflows/platform-backend-ci.yml
vendored
@@ -6,14 +6,11 @@ on:
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
- "autogpt_platform/autogpt_libs/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -42,14 +39,6 @@ jobs:
|
||||
REDIS_PASSWORD: testpassword
|
||||
ports:
|
||||
- 6379:6379
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.12-management
|
||||
ports:
|
||||
- 5672:5672
|
||||
- 15672:15672
|
||||
env:
|
||||
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
|
||||
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -87,17 +76,6 @@ jobs:
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Check poetry.lock
|
||||
run: |
|
||||
poetry lock
|
||||
|
||||
if ! git diff --quiet poetry.lock; then
|
||||
echo "Error: poetry.lock not up to date."
|
||||
echo
|
||||
git diff poetry.lock
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
@@ -147,13 +125,6 @@ jobs:
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
# We know these are here, don't report this as a security vulnerability
|
||||
# This is used as the default credential for the entire system's RabbitMQ instance
|
||||
# If you want to replace this, you can do so by making our entire system generate
|
||||
# new credentials for each local user and update the environment variables in
|
||||
# the backend service, docker composes, and examples
|
||||
RABBITMQ_DEFAULT_USER: 'rabbitmq_user_default'
|
||||
RABBITMQ_DEFAULT_PASS: 'k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7'
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
|
||||
65
.github/workflows/platform-frontend-ci.yml
vendored
65
.github/workflows/platform-frontend-ci.yml
vendored
@@ -10,7 +10,6 @@ on:
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
merge_group:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -23,7 +22,6 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
@@ -37,33 +35,26 @@ jobs:
|
||||
run: |
|
||||
yarn lint
|
||||
|
||||
type-check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
|
||||
- name: Run tsc check
|
||||
run: |
|
||||
yarn type-check
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
browser: [chromium, webkit]
|
||||
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed,
|
||||
# if set to "true" but frees about 6 GB
|
||||
tool-cache: false
|
||||
|
||||
# all of these default to true, but feel free to set to
|
||||
# "false" if necessary for your workflow
|
||||
android: false
|
||||
dotnet: false
|
||||
haskell: false
|
||||
large-packages: true
|
||||
docker-images: true
|
||||
swap-storage: true
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -74,20 +65,10 @@ jobs:
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
large-packages: false # slow
|
||||
docker-images: false # limited benefit
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../supabase/docker/.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.example ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml up -d
|
||||
@@ -100,22 +81,16 @@ jobs:
|
||||
run: |
|
||||
cp .env.example .env
|
||||
|
||||
- name: Install Browser '${{ matrix.browser }}'
|
||||
run: yarn playwright install --with-deps ${{ matrix.browser }}
|
||||
- name: Install Playwright Browsers
|
||||
run: yarn playwright install --with-deps
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
yarn test --project=${{ matrix.browser }}
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml logs
|
||||
yarn test
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
with:
|
||||
name: playwright-report-${{ matrix.browser }}
|
||||
name: playwright-report
|
||||
path: playwright-report/
|
||||
retention-days: 30
|
||||
|
||||
125
.github/workflows/platform-market-ci.yml
vendored
Normal file
125
.github/workflows/platform-market-ci.yml
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
name: AutoGPT Platform - Backend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-market-ci.yml"
|
||||
- "autogpt_platform/market/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-market-ci.yml"
|
||||
- "autogpt_platform/market/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/market
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Setup Supabase
|
||||
uses: supabase/setup-cli@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/market/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- id: supabase
|
||||
name: Start Supabase
|
||||
working-directory: .
|
||||
run: |
|
||||
supabase init
|
||||
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
|
||||
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
|
||||
# outputs:
|
||||
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
# Tests comment out because they do not work with prisma mock, nor have they been updated since they were created
|
||||
# - name: Run pytest with coverage
|
||||
# run: |
|
||||
# if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
# poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
# else
|
||||
# poetry run pytest -s -vv test
|
||||
# fi
|
||||
# if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
# env:
|
||||
# LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
# DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
# SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
|
||||
# SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
|
||||
# SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
|
||||
# REDIS_HOST: 'localhost'
|
||||
# REDIS_PORT: '6379'
|
||||
# REDIS_PASSWORD: 'testpassword'
|
||||
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: backend,${{ runner.os }}
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
close-issue-message: >
|
||||
This issue was closed automatically because it has been stale for 10 days
|
||||
with no activity.
|
||||
days-before-stale: 100
|
||||
days-before-stale: 50
|
||||
days-before-close: 10
|
||||
# Do not touch meta issues:
|
||||
exempt-issue-labels: meta,fridge,project management
|
||||
|
||||
1
.github/workflows/repo-workflow-checker.yml
vendored
1
.github/workflows/repo-workflow-checker.yml
vendored
@@ -2,7 +2,6 @@ name: Repo - PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
|
||||
@@ -7,18 +7,13 @@ from typing import Dict, List, Tuple
|
||||
|
||||
CHECK_INTERVAL = 30
|
||||
|
||||
|
||||
def get_environment_variables() -> Tuple[str, str, str, str, str]:
|
||||
"""Retrieve and return necessary environment variables."""
|
||||
try:
|
||||
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
|
||||
event = json.load(f)
|
||||
|
||||
# Handle both PR and merge group events
|
||||
if "pull_request" in event:
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
else:
|
||||
sha = os.environ["GITHUB_SHA"]
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
|
||||
return (
|
||||
os.environ["GITHUB_API_URL"],
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -171,8 +171,3 @@ ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
autogpt_platform/backend/settings.py
|
||||
/.auth
|
||||
/autogpt_platform/frontend/.auth
|
||||
|
||||
*.ign.*
|
||||
.test-contents
|
||||
|
||||
@@ -9,7 +9,7 @@ repos:
|
||||
- id: check-merge-conflict
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
|
||||
- repo: https://github.com/Yelp/detect-secrets
|
||||
rev: v1.5.0
|
||||
hooks:
|
||||
@@ -19,122 +19,27 @@ repos:
|
||||
files: ^autogpt_platform/
|
||||
stages: [push]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, all dependencies need to be up-to-date.
|
||||
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
|
||||
hooks:
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Backend
|
||||
alias: poetry-install-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend install
|
||||
# include autogpt_libs source (since it's a path dependency)
|
||||
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Libs
|
||||
alias: poetry-install-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs install
|
||||
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, Prisma client must be up-to-date.
|
||||
hooks:
|
||||
- id: prisma-generate
|
||||
name: Prisma Generate - AutoGPT Platform - Backend
|
||||
alias: prisma-generate-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
|
||||
# include everything that triggers poetry install + the prisma schema
|
||||
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.7.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Backend
|
||||
alias: ruff-lint-platform-backend
|
||||
files: ^autogpt_platform/backend/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff-format
|
||||
name: Format (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
hooks:
|
||||
- id: isort
|
||||
name: Lint (isort) - AutoGPT Platform - Backend
|
||||
alias: isort-platform-backend
|
||||
entry: poetry -P autogpt_platform/backend run isort -p backend
|
||||
files: ^autogpt_platform/backend/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -P classic/original_autogpt run isort -p autogpt
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C classic/original_autogpt run isort
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -P classic/forge run isort -p forge
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C classic/forge run isort
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -P classic/benchmark run isort -p agbenchmark
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C classic/benchmark run isort
|
||||
files: ^classic/benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
@@ -145,7 +50,8 @@ repos:
|
||||
# everything in .gitignore, so it works fine without any config or arguments.
|
||||
hooks:
|
||||
- id: black
|
||||
name: Format (Black)
|
||||
name: Lint (Black)
|
||||
language_version: python3.12
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
@@ -153,79 +59,53 @@ repos:
|
||||
# them separately.
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: prettier
|
||||
name: Format (Prettier) - AutoGPT Platform - Frontend
|
||||
alias: format-platform-frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' --
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
|
||||
- repo: local
|
||||
# To have watertight type checking, we check *all* the files in an affected
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Backend
|
||||
alias: pyright-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run pyright
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT Platform - Libs
|
||||
alias: pyright-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs run pyright
|
||||
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
args: [-p, autogpt, autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
args: [-p, forge, forge]
|
||||
files: ^classic/forge/(classic/forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
@@ -233,45 +113,23 @@ repos:
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: tsc
|
||||
name: Typecheck - AutoGPT Platform - Frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && npm run type-check'
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest
|
||||
name: Run tests - AutoGPT Platform - Backend
|
||||
alias: pytest-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
|
||||
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - AutoGPT (excl. slow tests)
|
||||
alias: pytest-classic-autogpt
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Forge (excl. slow tests)
|
||||
alias: pytest-classic-forge
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
|
||||
files: ^classic/forge/(classic/forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Benchmark
|
||||
alias: pytest-classic-benchmark
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
|
||||
67
.vscode/launch.json
vendored
67
.vscode/launch.json
vendored
@@ -1,67 +0,0 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Frontend: Server Side",
|
||||
"type": "node-terminal",
|
||||
"request": "launch",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"command": "yarn dev"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Client Side",
|
||||
"type": "msedge",
|
||||
"request": "launch",
|
||||
"url": "http://localhost:3000"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Full Stack",
|
||||
"type": "node-terminal",
|
||||
|
||||
"request": "launch",
|
||||
"command": "yarn dev",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"serverReadyAction": {
|
||||
"pattern": "- Local:.+(https?://.+)",
|
||||
"uriFormat": "%s",
|
||||
"action": "debugWithEdge"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Backend",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "backend.app",
|
||||
// "env": {
|
||||
// "ENV": "dev"
|
||||
// },
|
||||
"envFile": "${workspaceFolder}/backend/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "Marketplace",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "autogpt_platform.market.main",
|
||||
"env": {
|
||||
"ENV": "dev"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/market/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/market"
|
||||
}
|
||||
],
|
||||
"compounds": [
|
||||
{
|
||||
"name": "Everything",
|
||||
"configurations": ["Backend", "Frontend: Full Stack"],
|
||||
// "preLaunchTask": "${defaultBuildTask}",
|
||||
"stopAll": true,
|
||||
"presentation": {
|
||||
"hidden": false,
|
||||
"order": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -35,7 +35,7 @@ The AutoGPT frontend is where users interact with our powerful AI automation pla
|
||||
|
||||
**Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes.
|
||||
|
||||
[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks.
|
||||
[Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
|
||||
|
||||
### 💽 AutoGPT Server
|
||||
|
||||
|
||||
47
SECURITY.md
47
SECURITY.md
@@ -1,47 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
|
||||
|
||||
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
|
||||
|
||||
Instead, please report them via:
|
||||
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
|
||||
<!--- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty-->
|
||||
|
||||
### Reporting Process
|
||||
1. **Submit Report**: Use one of the above channels to submit your report
|
||||
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
|
||||
3. **Collaboration**: We will collaborate with you to understand and validate the issue
|
||||
4. **Resolution**: We will work on a fix and coordinate the release process
|
||||
|
||||
### Disclosure Policy
|
||||
- Please provide detailed reports with reproducible steps
|
||||
- Include the version/commit hash where you discovered the vulnerability
|
||||
- Allow us a 90-day security fix window before any public disclosure
|
||||
- Share any potential mitigations or workarounds if known
|
||||
|
||||
## Supported Versions
|
||||
Only the following versions are eligible for security updates:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|-----------|
|
||||
| Latest release on master branch | ✅ |
|
||||
| Development commits (pre-master) | ✅ |
|
||||
| Classic folder (deprecated) | ❌ |
|
||||
| All other versions | ❌ |
|
||||
|
||||
## Security Best Practices
|
||||
When using this project:
|
||||
1. Always use the latest stable version
|
||||
2. Review security advisories before updating
|
||||
3. Follow our security documentation and guidelines
|
||||
4. Keep your dependencies up to date
|
||||
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
|
||||
|
||||
## Past Security Advisories
|
||||
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
|
||||
|
||||
---
|
||||
Last updated: November 2024
|
||||
2
autogpt_platform/.gitignore
vendored
2
autogpt_platform/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
@@ -22,7 +22,7 @@ To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
2. Run the following command:
|
||||
```
|
||||
git submodule update --init --recursive --progress
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
|
||||
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
import hashlib
|
||||
import secrets
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class APIKeyContainer(NamedTuple):
|
||||
"""Container for API key parts."""
|
||||
|
||||
raw: str
|
||||
prefix: str
|
||||
postfix: str
|
||||
hash: str
|
||||
|
||||
|
||||
class APIKeyManager:
|
||||
PREFIX: str = "agpt_"
|
||||
PREFIX_LENGTH: int = 8
|
||||
POSTFIX_LENGTH: int = 8
|
||||
|
||||
def generate_api_key(self) -> APIKeyContainer:
|
||||
"""Generate a new API key with all its parts."""
|
||||
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
|
||||
return APIKeyContainer(
|
||||
raw=raw_key,
|
||||
prefix=raw_key[: self.PREFIX_LENGTH],
|
||||
postfix=raw_key[-self.POSTFIX_LENGTH :],
|
||||
hash=hashlib.sha256(raw_key.encode()).hexdigest(),
|
||||
)
|
||||
|
||||
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
|
||||
"""Verify if a provided API key matches the stored hash."""
|
||||
if not provided_key.startswith(self.PREFIX):
|
||||
return False
|
||||
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
||||
@@ -1,8 +1,7 @@
|
||||
import fastapi
|
||||
|
||||
from .config import Settings
|
||||
from .middleware import auth_middleware
|
||||
from .models import DEFAULT_USER_ID, User
|
||||
from .models import User
|
||||
|
||||
|
||||
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
|
||||
@@ -17,12 +16,8 @@ def requires_admin_user(
|
||||
|
||||
def verify_user(payload: dict | None, admin_only: bool) -> User:
|
||||
if not payload:
|
||||
if Settings.ENABLE_AUTH:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=401, detail="Authorization header is missing"
|
||||
)
|
||||
# This handles the case when authentication is disabled
|
||||
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
|
||||
payload = {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "admin"}
|
||||
|
||||
user_id = payload.get("sub")
|
||||
|
||||
@@ -35,12 +30,3 @@ def verify_user(payload: dict | None, admin_only: bool) -> User:
|
||||
raise fastapi.HTTPException(status_code=403, detail="Admin access required")
|
||||
|
||||
return User.from_payload(payload)
|
||||
|
||||
|
||||
def get_user_id(payload: dict = fastapi.Depends(auth_middleware)) -> str:
|
||||
user_id = payload.get("sub")
|
||||
if not user_id:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=401, detail="User ID not found in token"
|
||||
)
|
||||
return user_id
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
DEFAULT_EMAIL = "default@example.com"
|
||||
|
||||
|
||||
# Using dataclass here to avoid adding dependency on pydantic
|
||||
@dataclass(frozen=True)
|
||||
|
||||
@@ -1,166 +0,0 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
import logging
|
||||
from functools import wraps
|
||||
from typing import Any, Awaitable, Callable, Dict, Optional, TypeVar, Union, cast
|
||||
|
||||
import ldclient
|
||||
from fastapi import HTTPException
|
||||
from ldclient import Context, LDClient
|
||||
from ldclient.config import Config
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from .config import SETTINGS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def get_client() -> LDClient:
|
||||
"""Get the LaunchDarkly client singleton."""
|
||||
return ldclient.get()
|
||||
|
||||
|
||||
def initialize_launchdarkly() -> None:
|
||||
sdk_key = SETTINGS.launch_darkly_sdk_key
|
||||
logger.debug(
|
||||
f"Initializing LaunchDarkly with SDK key: {'present' if sdk_key else 'missing'}"
|
||||
)
|
||||
|
||||
if not sdk_key:
|
||||
logger.warning("LaunchDarkly SDK key not configured")
|
||||
return
|
||||
|
||||
config = Config(sdk_key)
|
||||
ldclient.set_config(config)
|
||||
|
||||
if ldclient.get().is_initialized():
|
||||
logger.info("LaunchDarkly client initialized successfully")
|
||||
else:
|
||||
logger.error("LaunchDarkly client failed to initialize")
|
||||
|
||||
|
||||
def shutdown_launchdarkly() -> None:
|
||||
"""Shutdown the LaunchDarkly client."""
|
||||
if ldclient.get().is_initialized():
|
||||
ldclient.get().close()
|
||||
logger.info("LaunchDarkly client closed successfully")
|
||||
|
||||
|
||||
def create_context(
|
||||
user_id: str, additional_attributes: Optional[Dict[str, Any]] = None
|
||||
) -> Context:
|
||||
"""Create LaunchDarkly context with optional additional attributes."""
|
||||
builder = Context.builder(str(user_id)).kind("user")
|
||||
if additional_attributes:
|
||||
for key, value in additional_attributes.items():
|
||||
builder.set(key, value)
|
||||
return builder.build()
|
||||
|
||||
|
||||
def feature_flag(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""
|
||||
Decorator for feature flag protected endpoints.
|
||||
"""
|
||||
|
||||
def decorator(
|
||||
func: Callable[P, Union[T, Awaitable[T]]],
|
||||
) -> Callable[P, Union[T, Awaitable[T]]]:
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
context = create_context(str(user_id))
|
||||
is_enabled = get_client().variation(flag_key, context, default)
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
result = func(*args, **kwargs)
|
||||
if asyncio.iscoroutine(result):
|
||||
return await result
|
||||
return cast(T, result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
context = create_context(str(user_id))
|
||||
is_enabled = get_client().variation(flag_key, context, default)
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
return cast(T, func(*args, **kwargs))
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
return cast(
|
||||
Callable[P, Union[T, Awaitable[T]]],
|
||||
async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper,
|
||||
)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def percentage_rollout(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""Decorator for percentage-based rollouts."""
|
||||
return feature_flag(flag_key, default)
|
||||
|
||||
|
||||
def beta_feature(
|
||||
flag_key: Optional[str] = None,
|
||||
unauthorized_response: Any = {"message": "Not available in beta"},
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""Decorator for beta features."""
|
||||
actual_key = f"beta-{flag_key}" if flag_key else "beta"
|
||||
return feature_flag(actual_key, False)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mock_flag_variation(flag_key: str, return_value: Any):
|
||||
"""Context manager for testing feature flags."""
|
||||
original_variation = get_client().variation
|
||||
get_client().variation = lambda key, context, default: (
|
||||
return_value if key == flag_key else original_variation(key, context, default)
|
||||
)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
get_client().variation = original_variation
|
||||
@@ -1,45 +0,0 @@
|
||||
import pytest
|
||||
from ldclient import LDClient
|
||||
|
||||
from autogpt_libs.feature_flag.client import feature_flag, mock_flag_variation
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ld_client(mocker):
|
||||
client = mocker.Mock(spec=LDClient)
|
||||
mocker.patch("ldclient.get", return_value=client)
|
||||
client.is_initialized.return_value = True
|
||||
return client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_enabled(ld_client):
|
||||
ld_client.variation.return_value = True
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == "success"
|
||||
ld_client.variation.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_unauthorized_response(ld_client):
|
||||
ld_client.variation.return_value = False
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == {"error": "disabled"}
|
||||
|
||||
|
||||
def test_mock_flag_variation(ld_client):
|
||||
with mock_flag_variation("test-flag", True):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
|
||||
with mock_flag_variation("test-flag", False):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
@@ -1,15 +0,0 @@
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
launch_darkly_sdk_key: str = Field(
|
||||
default="",
|
||||
description="The Launch Darkly SDK key",
|
||||
validation_alias="LAUNCH_DARKLY_SDK_KEY",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(case_sensitive=True, extra="ignore")
|
||||
|
||||
|
||||
SETTINGS = Settings()
|
||||
@@ -6,7 +6,6 @@ from pathlib import Path
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import AGPTFormatter, StructuredLoggingFormatter
|
||||
|
||||
@@ -18,11 +17,12 @@ ERROR_LOG_FILE = "error.log"
|
||||
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s"
|
||||
|
||||
DEBUG_LOG_FORMAT = (
|
||||
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(title)s%(message)s"
|
||||
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d" " %(title)s%(message)s"
|
||||
)
|
||||
|
||||
|
||||
class LoggingConfig(BaseSettings):
|
||||
|
||||
level: str = Field(
|
||||
default="INFO",
|
||||
description="Logging level",
|
||||
|
||||
@@ -24,10 +24,10 @@ from .utils import remove_color_codes
|
||||
),
|
||||
("", ""),
|
||||
("hello", "hello"),
|
||||
("hello\x1b[31m world", "hello world"),
|
||||
("\x1b[36mHello,\x1b[32m World!", "Hello, World!"),
|
||||
("hello\x1B[31m world", "hello world"),
|
||||
("\x1B[36mHello,\x1B[32m World!", "Hello, World!"),
|
||||
(
|
||||
"\x1b[1m\x1b[31mError:\x1b[0m\x1b[31m file not found",
|
||||
"\x1B[1m\x1B[31mError:\x1B[0m\x1B[31m file not found",
|
||||
"Error: file not found",
|
||||
),
|
||||
],
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class RateLimitSettings(BaseSettings):
|
||||
redis_host: str = Field(
|
||||
default="redis://localhost:6379",
|
||||
description="Redis host",
|
||||
validation_alias="REDIS_HOST",
|
||||
)
|
||||
|
||||
redis_port: str = Field(
|
||||
default="6379", description="Redis port", validation_alias="REDIS_PORT"
|
||||
)
|
||||
|
||||
redis_password: str = Field(
|
||||
default="password",
|
||||
description="Redis password",
|
||||
validation_alias="REDIS_PASSWORD",
|
||||
)
|
||||
|
||||
requests_per_minute: int = Field(
|
||||
default=60,
|
||||
description="Maximum number of requests allowed per minute per API key",
|
||||
validation_alias="RATE_LIMIT_REQUESTS_PER_MINUTE",
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(case_sensitive=True, extra="ignore")
|
||||
|
||||
|
||||
RATE_LIMIT_SETTINGS = RateLimitSettings()
|
||||
@@ -1,51 +0,0 @@
|
||||
import time
|
||||
from typing import Tuple
|
||||
|
||||
from redis import Redis
|
||||
|
||||
from .config import RATE_LIMIT_SETTINGS
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
redis_host: str = RATE_LIMIT_SETTINGS.redis_host,
|
||||
redis_port: str = RATE_LIMIT_SETTINGS.redis_port,
|
||||
redis_password: str = RATE_LIMIT_SETTINGS.redis_password,
|
||||
requests_per_minute: int = RATE_LIMIT_SETTINGS.requests_per_minute,
|
||||
):
|
||||
self.redis = Redis(
|
||||
host=redis_host,
|
||||
port=int(redis_port),
|
||||
password=redis_password,
|
||||
decode_responses=True,
|
||||
)
|
||||
self.window = 60
|
||||
self.max_requests = requests_per_minute
|
||||
|
||||
async def check_rate_limit(self, api_key_id: str) -> Tuple[bool, int, int]:
|
||||
"""
|
||||
Check if request is within rate limits.
|
||||
|
||||
Args:
|
||||
api_key_id: The API key identifier to check
|
||||
|
||||
Returns:
|
||||
Tuple of (is_allowed, remaining_requests, reset_time)
|
||||
"""
|
||||
now = time.time()
|
||||
window_start = now - self.window
|
||||
key = f"ratelimit:{api_key_id}:1min"
|
||||
|
||||
pipe = self.redis.pipeline()
|
||||
pipe.zremrangebyscore(key, 0, window_start)
|
||||
pipe.zadd(key, {str(now): now})
|
||||
pipe.zcount(key, window_start, now)
|
||||
pipe.expire(key, self.window)
|
||||
|
||||
_, _, request_count, _ = pipe.execute()
|
||||
|
||||
remaining = max(0, self.max_requests - request_count)
|
||||
reset_time = int(now + self.window)
|
||||
|
||||
return request_count <= self.max_requests, remaining, reset_time
|
||||
@@ -1,32 +0,0 @@
|
||||
from fastapi import HTTPException, Request
|
||||
from starlette.middleware.base import RequestResponseEndpoint
|
||||
|
||||
from .limiter import RateLimiter
|
||||
|
||||
|
||||
async def rate_limit_middleware(request: Request, call_next: RequestResponseEndpoint):
|
||||
"""FastAPI middleware for rate limiting API requests."""
|
||||
limiter = RateLimiter()
|
||||
|
||||
if not request.url.path.startswith("/api"):
|
||||
return await call_next(request)
|
||||
|
||||
api_key = request.headers.get("Authorization")
|
||||
if not api_key:
|
||||
return await call_next(request)
|
||||
|
||||
api_key = api_key.replace("Bearer ", "")
|
||||
|
||||
is_allowed, remaining, reset_time = await limiter.check_rate_limit(api_key)
|
||||
|
||||
if not is_allowed:
|
||||
raise HTTPException(
|
||||
status_code=429, detail="Rate limit exceeded. Please try again later."
|
||||
)
|
||||
|
||||
response = await call_next(request)
|
||||
response.headers["X-RateLimit-Limit"] = str(limiter.max_requests)
|
||||
response.headers["X-RateLimit-Remaining"] = str(remaining)
|
||||
response.headers["X-RateLimit-Reset"] = str(reset_time)
|
||||
|
||||
return response
|
||||
@@ -0,0 +1,9 @@
|
||||
from .store import SupabaseIntegrationCredentialsStore
|
||||
from .types import Credentials, APIKeyCredentials, OAuth2Credentials
|
||||
|
||||
__all__ = [
|
||||
"SupabaseIntegrationCredentialsStore",
|
||||
"Credentials",
|
||||
"APIKeyCredentials",
|
||||
"OAuth2Credentials",
|
||||
]
|
||||
@@ -1,37 +1,28 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import secrets
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from redis import Redis
|
||||
from backend.executor.database import DatabaseManager
|
||||
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
from autogpt_libs.utils.synchronize import RedisKeyedMutex
|
||||
|
||||
from backend.data.model import (
|
||||
from .types import (
|
||||
APIKeyCredentials,
|
||||
Credentials,
|
||||
OAuth2Credentials,
|
||||
OAuthState,
|
||||
UserIntegrations,
|
||||
)
|
||||
|
||||
from backend.util.settings import Settings
|
||||
|
||||
settings = Settings()
|
||||
|
||||
# This is an overrride since ollama doesn't actually require an API key, but the creddential system enforces one be attached
|
||||
ollama_credentials = APIKeyCredentials(
|
||||
id="744fdc56-071a-4761-b5a5-0af0ce10a2b5",
|
||||
provider="ollama",
|
||||
api_key=SecretStr("FAKE_API_KEY"),
|
||||
title="Use Credits for Ollama",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
revid_credentials = APIKeyCredentials(
|
||||
id="fdb7f412-f519-48d1-9b5f-d2f73d0e01fe",
|
||||
provider="revid",
|
||||
@@ -55,21 +46,21 @@ replicate_credentials = APIKeyCredentials(
|
||||
)
|
||||
openai_credentials = APIKeyCredentials(
|
||||
id="53c25cb8-e3ee-465c-a4d1-e75a4c899c2a",
|
||||
provider="openai",
|
||||
provider="llm",
|
||||
api_key=SecretStr(settings.secrets.openai_api_key),
|
||||
title="Use Credits for OpenAI",
|
||||
expires_at=None,
|
||||
)
|
||||
anthropic_credentials = APIKeyCredentials(
|
||||
id="24e5d942-d9e3-4798-8151-90143ee55629",
|
||||
provider="anthropic",
|
||||
provider="llm",
|
||||
api_key=SecretStr(settings.secrets.anthropic_api_key),
|
||||
title="Use Credits for Anthropic",
|
||||
expires_at=None,
|
||||
)
|
||||
groq_credentials = APIKeyCredentials(
|
||||
id="4ec22295-8f97-4dd1-b42b-2c6957a02545",
|
||||
provider="groq",
|
||||
provider="llm",
|
||||
api_key=SecretStr(settings.secrets.groq_api_key),
|
||||
title="Use Credits for Groq",
|
||||
expires_at=None,
|
||||
@@ -81,96 +72,8 @@ did_credentials = APIKeyCredentials(
|
||||
title="Use Credits for D-ID",
|
||||
expires_at=None,
|
||||
)
|
||||
jina_credentials = APIKeyCredentials(
|
||||
id="7f26de70-ba0d-494e-ba76-238e65e7b45f",
|
||||
provider="jina",
|
||||
api_key=SecretStr(settings.secrets.jina_api_key),
|
||||
title="Use Credits for Jina",
|
||||
expires_at=None,
|
||||
)
|
||||
unreal_credentials = APIKeyCredentials(
|
||||
id="66f20754-1b81-48e4-91d0-f4f0dd82145f",
|
||||
provider="unreal",
|
||||
api_key=SecretStr(settings.secrets.unreal_speech_api_key),
|
||||
title="Use Credits for Unreal",
|
||||
expires_at=None,
|
||||
)
|
||||
open_router_credentials = APIKeyCredentials(
|
||||
id="b5a0e27d-0c98-4df3-a4b9-10193e1f3c40",
|
||||
provider="open_router",
|
||||
api_key=SecretStr(settings.secrets.open_router_api_key),
|
||||
title="Use Credits for Open Router",
|
||||
expires_at=None,
|
||||
)
|
||||
fal_credentials = APIKeyCredentials(
|
||||
id="6c0f5bd0-9008-4638-9d79-4b40b631803e",
|
||||
provider="fal",
|
||||
api_key=SecretStr(settings.secrets.fal_api_key),
|
||||
title="Use Credits for FAL",
|
||||
expires_at=None,
|
||||
)
|
||||
exa_credentials = APIKeyCredentials(
|
||||
id="96153e04-9c6c-4486-895f-5bb683b1ecec",
|
||||
provider="exa",
|
||||
api_key=SecretStr(settings.secrets.exa_api_key),
|
||||
title="Use Credits for Exa search",
|
||||
expires_at=None,
|
||||
)
|
||||
e2b_credentials = APIKeyCredentials(
|
||||
id="78d19fd7-4d59-4a16-8277-3ce310acf2b7",
|
||||
provider="e2b",
|
||||
api_key=SecretStr(settings.secrets.e2b_api_key),
|
||||
title="Use Credits for E2B",
|
||||
expires_at=None,
|
||||
)
|
||||
nvidia_credentials = APIKeyCredentials(
|
||||
id="96b83908-2789-4dec-9968-18f0ece4ceb3",
|
||||
provider="nvidia",
|
||||
api_key=SecretStr(settings.secrets.nvidia_api_key),
|
||||
title="Use Credits for Nvidia",
|
||||
expires_at=None,
|
||||
)
|
||||
screenshotone_credentials = APIKeyCredentials(
|
||||
id="3b1bdd16-8818-4bc2-8cbb-b23f9a3439ed",
|
||||
provider="screenshotone",
|
||||
api_key=SecretStr(settings.secrets.screenshotone_api_key),
|
||||
title="Use Credits for ScreenshotOne",
|
||||
expires_at=None,
|
||||
)
|
||||
mem0_credentials = APIKeyCredentials(
|
||||
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
|
||||
provider="mem0",
|
||||
api_key=SecretStr(settings.secrets.mem0_api_key),
|
||||
title="Use Credits for Mem0",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
apollo_credentials = APIKeyCredentials(
|
||||
id="544c62b5-1d0f-4156-8fb4-9525f11656eb",
|
||||
provider="apollo",
|
||||
api_key=SecretStr(settings.secrets.apollo_api_key),
|
||||
title="Use Credits for Apollo",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
smartlead_credentials = APIKeyCredentials(
|
||||
id="3bcdbda3-84a3-46af-8fdb-bfd2472298b8",
|
||||
provider="smartlead",
|
||||
api_key=SecretStr(settings.secrets.smartlead_api_key),
|
||||
title="Use Credits for SmartLead",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
zerobounce_credentials = APIKeyCredentials(
|
||||
id="63a6e279-2dc2-448e-bf57-85776f7176dc",
|
||||
provider="zerobounce",
|
||||
api_key=SecretStr(settings.secrets.zerobounce_api_key),
|
||||
title="Use Credits for ZeroBounce",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
ideogram_credentials,
|
||||
replicate_credentials,
|
||||
@@ -178,26 +81,12 @@ DEFAULT_CREDENTIALS = [
|
||||
anthropic_credentials,
|
||||
groq_credentials,
|
||||
did_credentials,
|
||||
jina_credentials,
|
||||
unreal_credentials,
|
||||
open_router_credentials,
|
||||
fal_credentials,
|
||||
exa_credentials,
|
||||
e2b_credentials,
|
||||
mem0_credentials,
|
||||
nvidia_credentials,
|
||||
screenshotone_credentials,
|
||||
apollo_credentials,
|
||||
smartlead_credentials,
|
||||
zerobounce_credentials,
|
||||
]
|
||||
|
||||
|
||||
class IntegrationCredentialsStore:
|
||||
def __init__(self):
|
||||
from backend.data.redis import get_redis
|
||||
|
||||
self.locks = RedisKeyedMutex(get_redis())
|
||||
class SupabaseIntegrationCredentialsStore:
|
||||
def __init__(self, redis: "Redis"):
|
||||
self.locks = RedisKeyedMutex(redis)
|
||||
|
||||
@property
|
||||
@thread_cached
|
||||
@@ -221,10 +110,6 @@ class IntegrationCredentialsStore:
|
||||
def get_all_creds(self, user_id: str) -> list[Credentials]:
|
||||
users_credentials = self._get_user_integrations(user_id).credentials
|
||||
all_credentials = users_credentials
|
||||
# These will always be added
|
||||
all_credentials.append(ollama_credentials)
|
||||
|
||||
# These will only be added if the API key is set
|
||||
if settings.secrets.revid_api_key:
|
||||
all_credentials.append(revid_credentials)
|
||||
if settings.secrets.ideogram_api_key:
|
||||
@@ -239,30 +124,6 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(anthropic_credentials)
|
||||
if settings.secrets.did_api_key:
|
||||
all_credentials.append(did_credentials)
|
||||
if settings.secrets.jina_api_key:
|
||||
all_credentials.append(jina_credentials)
|
||||
if settings.secrets.unreal_speech_api_key:
|
||||
all_credentials.append(unreal_credentials)
|
||||
if settings.secrets.open_router_api_key:
|
||||
all_credentials.append(open_router_credentials)
|
||||
if settings.secrets.fal_api_key:
|
||||
all_credentials.append(fal_credentials)
|
||||
if settings.secrets.exa_api_key:
|
||||
all_credentials.append(exa_credentials)
|
||||
if settings.secrets.e2b_api_key:
|
||||
all_credentials.append(e2b_credentials)
|
||||
if settings.secrets.nvidia_api_key:
|
||||
all_credentials.append(nvidia_credentials)
|
||||
if settings.secrets.screenshotone_api_key:
|
||||
all_credentials.append(screenshotone_credentials)
|
||||
if settings.secrets.mem0_api_key:
|
||||
all_credentials.append(mem0_credentials)
|
||||
if settings.secrets.apollo_api_key:
|
||||
all_credentials.append(apollo_credentials)
|
||||
if settings.secrets.smartlead_api_key:
|
||||
all_credentials.append(smartlead_credentials)
|
||||
if settings.secrets.zerobounce_api_key:
|
||||
all_credentials.append(zerobounce_credentials)
|
||||
return all_credentials
|
||||
|
||||
def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None:
|
||||
@@ -318,24 +179,18 @@ class IntegrationCredentialsStore:
|
||||
]
|
||||
self._set_user_integration_creds(user_id, filtered_credentials)
|
||||
|
||||
def store_state_token(
|
||||
self, user_id: str, provider: str, scopes: list[str], use_pkce: bool = False
|
||||
) -> tuple[str, str]:
|
||||
def store_state_token(self, user_id: str, provider: str, scopes: list[str]) -> str:
|
||||
token = secrets.token_urlsafe(32)
|
||||
expires_at = datetime.now(timezone.utc) + timedelta(minutes=10)
|
||||
|
||||
(code_challenge, code_verifier) = self._generate_code_challenge()
|
||||
|
||||
state = OAuthState(
|
||||
token=token,
|
||||
provider=provider,
|
||||
code_verifier=code_verifier,
|
||||
expires_at=int(expires_at.timestamp()),
|
||||
scopes=scopes,
|
||||
)
|
||||
|
||||
with self.locked_user_integrations(user_id):
|
||||
|
||||
user_integrations = self._get_user_integrations(user_id)
|
||||
oauth_states = user_integrations.oauth_states
|
||||
oauth_states.append(state)
|
||||
@@ -345,21 +200,39 @@ class IntegrationCredentialsStore:
|
||||
user_id=user_id, data=user_integrations
|
||||
)
|
||||
|
||||
return token, code_challenge
|
||||
return token
|
||||
|
||||
def _generate_code_challenge(self) -> tuple[str, str]:
|
||||
"""
|
||||
Generate code challenge using SHA256 from the code verifier.
|
||||
Currently only SHA256 is supported.(In future if we want to support more methods we can add them here)
|
||||
"""
|
||||
code_verifier = secrets.token_urlsafe(128)
|
||||
sha256_hash = hashlib.sha256(code_verifier.encode("utf-8")).digest()
|
||||
code_challenge = base64.urlsafe_b64encode(sha256_hash).decode("utf-8")
|
||||
return code_challenge.replace("=", ""), code_verifier
|
||||
|
||||
def verify_state_token(
|
||||
def get_any_valid_scopes_from_state_token(
|
||||
self, user_id: str, token: str, provider: str
|
||||
) -> Optional[OAuthState]:
|
||||
) -> list[str]:
|
||||
"""
|
||||
Get the valid scopes from the OAuth state token. This will return any valid scopes
|
||||
from any OAuth state token for the given provider. If no valid scopes are found,
|
||||
an empty list is returned. DO NOT RELY ON THIS TOKEN TO AUTHENTICATE A USER, AS IT
|
||||
IS TO CHECK IF THE USER HAS GIVEN PERMISSIONS TO THE APPLICATION BEFORE EXCHANGING
|
||||
THE CODE FOR TOKENS.
|
||||
"""
|
||||
user_integrations = self._get_user_integrations(user_id)
|
||||
oauth_states = user_integrations.oauth_states
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
valid_state = next(
|
||||
(
|
||||
state
|
||||
for state in oauth_states
|
||||
if state.token == token
|
||||
and state.provider == provider
|
||||
and state.expires_at > now.timestamp()
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if valid_state:
|
||||
return valid_state.scopes
|
||||
|
||||
return []
|
||||
|
||||
def verify_state_token(self, user_id: str, token: str, provider: str) -> bool:
|
||||
with self.locked_user_integrations(user_id):
|
||||
user_integrations = self._get_user_integrations(user_id)
|
||||
oauth_states = user_integrations.oauth_states
|
||||
@@ -381,9 +254,9 @@ class IntegrationCredentialsStore:
|
||||
oauth_states.remove(valid_state)
|
||||
user_integrations.oauth_states = oauth_states
|
||||
self.db_manager.update_user_integrations(user_id, user_integrations)
|
||||
return valid_state
|
||||
return True
|
||||
|
||||
return None
|
||||
return False
|
||||
|
||||
def _set_user_integration_creds(
|
||||
self, user_id: str, credentials: list[Credentials]
|
||||
@@ -401,5 +274,5 @@ class IntegrationCredentialsStore:
|
||||
return integrations
|
||||
|
||||
def locked_user_integrations(self, user_id: str):
|
||||
key = (f"user:{user_id}", "integrations")
|
||||
key = (self.db_manager, f"user:{user_id}", "integrations")
|
||||
return self.locks.locked(key)
|
||||
@@ -56,7 +56,6 @@ class OAuthState(BaseModel):
|
||||
token: str
|
||||
provider: str
|
||||
expires_at: int
|
||||
code_verifier: Optional[str] = None
|
||||
scopes: list[str]
|
||||
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from typing import Callable, TypeVar, ParamSpec
|
||||
import threading
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
@@ -31,8 +31,7 @@ class RedisKeyedMutex:
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if lock.locked():
|
||||
lock.release()
|
||||
lock.release()
|
||||
|
||||
def acquire(self, key: Any) -> "RedisLock":
|
||||
"""Acquires and returns a lock with the given key"""
|
||||
@@ -46,7 +45,7 @@ class RedisKeyedMutex:
|
||||
return lock
|
||||
|
||||
def release(self, key: Any):
|
||||
if (lock := self.locks.get(key)) and lock.locked() and lock.owned():
|
||||
if lock := self.locks.get(key):
|
||||
lock.release()
|
||||
|
||||
def release_all_locks(self):
|
||||
|
||||
493
autogpt_platform/autogpt_libs/poetry.lock
generated
493
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "aiohappyeyeballs"
|
||||
@@ -6,7 +6,6 @@ version = "2.4.0"
|
||||
description = "Happy Eyeballs for asyncio"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd"},
|
||||
{file = "aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2"},
|
||||
@@ -18,7 +17,6 @@ version = "3.10.5"
|
||||
description = "Async http client/server framework (asyncio)"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:18a01eba2574fb9edd5f6e5fb25f66e6ce061da5dab5db75e13fe1558142e0a3"},
|
||||
{file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:94fac7c6e77ccb1ca91e9eb4cb0ac0270b9fb9b289738654120ba8cebb1189c6"},
|
||||
@@ -131,7 +129,6 @@ version = "1.3.1"
|
||||
description = "aiosignal: a list of registered asynchronous callbacks"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
|
||||
{file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
|
||||
@@ -146,7 +143,6 @@ version = "0.7.0"
|
||||
description = "Reusable constraint types to use with typing.Annotated"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
|
||||
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
|
||||
@@ -158,7 +154,6 @@ version = "4.4.0"
|
||||
description = "High level compatibility layer for multiple asynchronous event loop implementations"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
|
||||
{file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
|
||||
@@ -181,12 +176,10 @@ version = "4.0.3"
|
||||
description = "Timeout context manager for asyncio programs"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main", "dev"]
|
||||
files = [
|
||||
{file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
|
||||
{file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
|
||||
]
|
||||
markers = {main = "python_version < \"3.11\"", dev = "python_full_version < \"3.11.3\""}
|
||||
|
||||
[[package]]
|
||||
name = "attrs"
|
||||
@@ -194,7 +187,6 @@ version = "24.2.0"
|
||||
description = "Classes Without Boilerplate"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"},
|
||||
{file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"},
|
||||
@@ -214,7 +206,6 @@ version = "5.5.0"
|
||||
description = "Extensible memoizing collections and decorators"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"},
|
||||
{file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"},
|
||||
@@ -226,7 +217,6 @@ version = "2024.8.30"
|
||||
description = "Python package for providing Mozilla's CA Bundle."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"},
|
||||
{file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"},
|
||||
@@ -238,7 +228,6 @@ version = "3.3.2"
|
||||
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
|
||||
optional = false
|
||||
python-versions = ">=3.7.0"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
|
||||
{file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
|
||||
@@ -338,7 +327,6 @@ version = "0.4.6"
|
||||
description = "Cross-platform colored terminal text."
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
|
||||
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
|
||||
@@ -350,7 +338,6 @@ version = "1.2.14"
|
||||
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"},
|
||||
{file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"},
|
||||
@@ -368,7 +355,6 @@ version = "2.1.0"
|
||||
description = "A library to handle automated deprecations"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"},
|
||||
{file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"},
|
||||
@@ -383,8 +369,6 @@ version = "1.2.2"
|
||||
description = "Backport of PEP 654 (exception groups)"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.11\""
|
||||
files = [
|
||||
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
|
||||
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
|
||||
@@ -399,7 +383,6 @@ version = "1.2.2"
|
||||
description = "Dictionary with auto-expiring values for caching purposes"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "expiringdict-1.2.2-py3-none-any.whl", hash = "sha256:09a5d20bc361163e6432a874edd3179676e935eb81b925eccef48d409a8a45e8"},
|
||||
{file = "expiringdict-1.2.2.tar.gz", hash = "sha256:300fb92a7e98f15b05cf9a856c1415b3bc4f2e132be07daa326da6414c23ee09"},
|
||||
@@ -414,7 +397,6 @@ version = "1.4.1"
|
||||
description = "A list-like structure which implements collections.abc.MutableSequence"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"},
|
||||
{file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"},
|
||||
@@ -501,7 +483,6 @@ version = "2.19.2"
|
||||
description = "Google API client core library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "google_api_core-2.19.2-py3-none-any.whl", hash = "sha256:53ec0258f2837dd53bbd3d3df50f5359281b3cc13f800c941dd15a9b5a415af4"},
|
||||
{file = "google_api_core-2.19.2.tar.gz", hash = "sha256:ca07de7e8aa1c98a8bfca9321890ad2340ef7f2eb136e558cee68f24b94b0a8f"},
|
||||
@@ -533,7 +514,6 @@ version = "2.34.0"
|
||||
description = "Google Authentication Library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "google_auth-2.34.0-py2.py3-none-any.whl", hash = "sha256:72fd4733b80b6d777dcde515628a9eb4a577339437012874ea286bca7261ee65"},
|
||||
{file = "google_auth-2.34.0.tar.gz", hash = "sha256:8eb87396435c19b20d32abd2f984e31c191a15284af72eb922f10e5bde9c04cc"},
|
||||
@@ -557,7 +537,6 @@ version = "1.4.5"
|
||||
description = "Google Cloud Appengine Logging API client library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "google_cloud_appengine_logging-1.4.5-py2.py3-none-any.whl", hash = "sha256:344e0244404049b42164e4d6dc718ca2c81b393d066956e7cb85fd9407ed9c48"},
|
||||
{file = "google_cloud_appengine_logging-1.4.5.tar.gz", hash = "sha256:de7d766e5d67b19fc5833974b505b32d2a5bbdfb283fd941e320e7cfdae4cb83"},
|
||||
@@ -575,7 +554,6 @@ version = "0.3.0"
|
||||
description = "Google Cloud Audit Protos"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "google_cloud_audit_log-0.3.0-py2.py3-none-any.whl", hash = "sha256:8340793120a1d5aa143605def8704ecdcead15106f754ef1381ae3bab533722f"},
|
||||
{file = "google_cloud_audit_log-0.3.0.tar.gz", hash = "sha256:901428b257020d8c1d1133e0fa004164a555e5a395c7ca3cdbb8486513df3a65"},
|
||||
@@ -591,7 +569,6 @@ version = "2.4.1"
|
||||
description = "Google Cloud API client core library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073"},
|
||||
{file = "google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61"},
|
||||
@@ -606,14 +583,13 @@ grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-logging"
|
||||
version = "3.11.4"
|
||||
version = "3.11.3"
|
||||
description = "Stackdriver Logging API client library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "google_cloud_logging-3.11.4-py2.py3-none-any.whl", hash = "sha256:1d465ac62df29fb94bba4d6b4891035e57d573d84541dd8a40eebbc74422b2f0"},
|
||||
{file = "google_cloud_logging-3.11.4.tar.gz", hash = "sha256:32305d989323f3c58603044e2ac5d9cf23e9465ede511bbe90b4309270d3195c"},
|
||||
{file = "google_cloud_logging-3.11.3-py2.py3-none-any.whl", hash = "sha256:b8ec23f2998f76a58f8492db26a0f4151dd500425c3f08448586b85972f3c494"},
|
||||
{file = "google_cloud_logging-3.11.3.tar.gz", hash = "sha256:0a73cd94118875387d4535371d9e9426861edef8e44fba1261e86782d5b8d54f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -625,8 +601,7 @@ google-cloud-core = ">=2.0.0,<3.0.0dev"
|
||||
grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev"
|
||||
opentelemetry-api = ">=1.9.0"
|
||||
proto-plus = [
|
||||
{version = ">=1.25.0,<2.0.0dev", markers = "python_version >= \"3.13\""},
|
||||
{version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\" and python_version < \"3.13\""},
|
||||
{version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\""},
|
||||
{version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""},
|
||||
]
|
||||
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev"
|
||||
@@ -637,7 +612,6 @@ version = "1.65.0"
|
||||
description = "Common protobufs used in Google APIs"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"},
|
||||
{file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"},
|
||||
@@ -652,18 +626,17 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"]
|
||||
|
||||
[[package]]
|
||||
name = "gotrue"
|
||||
version = "2.11.1"
|
||||
version = "2.9.3"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "gotrue-2.11.1-py3-none-any.whl", hash = "sha256:1b2d915bdc65fd0ad608532759ce9c72fa2e910145c1e6901f2188519e7bcd2d"},
|
||||
{file = "gotrue-2.11.1.tar.gz", hash = "sha256:5594ceee60bd873e5f4fdd028b08dece3906f6013b6ed08e7786b71c0092fed0"},
|
||||
{file = "gotrue-2.9.3-py3-none-any.whl", hash = "sha256:9d2e9c74405d879f4828e0a7b94daf167a6e109c10ae6e5c59a0e21446f6e423"},
|
||||
{file = "gotrue-2.9.3.tar.gz", hash = "sha256:051551d80e642bdd2ab42cac78207745d89a2a08f429a1512d82624e675d8255"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.26,<0.29", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
pydantic = ">=1.10,<3"
|
||||
|
||||
[[package]]
|
||||
@@ -672,7 +645,6 @@ version = "0.13.1"
|
||||
description = "IAM API client library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "grpc-google-iam-v1-0.13.1.tar.gz", hash = "sha256:3ff4b2fd9d990965e410965253c0da6f66205d5a8291c4c31c6ebecca18a9001"},
|
||||
{file = "grpc_google_iam_v1-0.13.1-py2.py3-none-any.whl", hash = "sha256:c3e86151a981811f30d5e7330f271cee53e73bb87755e88cc3b6f0c7b5fe374e"},
|
||||
@@ -689,7 +661,6 @@ version = "1.66.1"
|
||||
description = "HTTP/2-based RPC framework"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "grpcio-1.66.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:4877ba180591acdf127afe21ec1c7ff8a5ecf0fe2600f0d3c50e8c4a1cbc6492"},
|
||||
{file = "grpcio-1.66.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3750c5a00bd644c75f4507f77a804d0189d97a107eb1481945a0cf3af3e7a5ac"},
|
||||
@@ -748,7 +719,6 @@ version = "1.66.1"
|
||||
description = "Status proto mapping for gRPC"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "grpcio_status-1.66.1-py3-none-any.whl", hash = "sha256:cf9ed0b4a83adbe9297211c95cb5488b0cd065707e812145b842c85c4782ff02"},
|
||||
{file = "grpcio_status-1.66.1.tar.gz", hash = "sha256:b3f7d34ccc46d83fea5261eea3786174459f763c31f6e34f1d24eba6d515d024"},
|
||||
@@ -765,7 +735,6 @@ version = "0.14.0"
|
||||
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
|
||||
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
|
||||
@@ -777,7 +746,6 @@ version = "4.1.0"
|
||||
description = "HTTP/2 State-Machine based protocol implementation"
|
||||
optional = false
|
||||
python-versions = ">=3.6.1"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"},
|
||||
{file = "h2-4.1.0.tar.gz", hash = "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb"},
|
||||
@@ -793,7 +761,6 @@ version = "4.0.0"
|
||||
description = "Pure-Python HPACK header compression"
|
||||
optional = false
|
||||
python-versions = ">=3.6.1"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "hpack-4.0.0-py3-none-any.whl", hash = "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c"},
|
||||
{file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"},
|
||||
@@ -805,7 +772,6 @@ version = "1.0.5"
|
||||
description = "A minimal low-level HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
|
||||
{file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
|
||||
@@ -827,7 +793,6 @@ version = "0.27.2"
|
||||
description = "The next generation HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"},
|
||||
{file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"},
|
||||
@@ -854,7 +819,6 @@ version = "6.0.1"
|
||||
description = "HTTP/2 framing layer for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.6.1"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15"},
|
||||
{file = "hyperframe-6.0.1.tar.gz", hash = "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914"},
|
||||
@@ -866,7 +830,6 @@ version = "3.8"
|
||||
description = "Internationalized Domain Names in Applications (IDNA)"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"},
|
||||
{file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"},
|
||||
@@ -878,7 +841,6 @@ version = "8.4.0"
|
||||
description = "Read metadata from Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"},
|
||||
{file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"},
|
||||
@@ -892,25 +854,12 @@ doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linke
|
||||
perf = ["ipython"]
|
||||
test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "2.0.0"
|
||||
description = "brain-dead simple config-ini parsing"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
|
||||
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "multidict"
|
||||
version = "6.1.0"
|
||||
description = "multidict implementation"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"},
|
||||
{file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"},
|
||||
@@ -1015,7 +964,6 @@ version = "1.27.0"
|
||||
description = "OpenTelemetry Python API"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"},
|
||||
{file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"},
|
||||
@@ -1031,56 +979,37 @@ version = "24.1"
|
||||
description = "Core utilities for Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
|
||||
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.5.0"
|
||||
description = "plugin and hook calling mechanisms for python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
|
||||
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["pre-commit", "tox"]
|
||||
testing = ["pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "0.19.1"
|
||||
version = "0.17.2"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "postgrest-0.19.1-py3-none-any.whl", hash = "sha256:a8e7be4e1abc69fd8eee5a49d7dc3a76dfbffbd778beed0b2bd7accb3f4f3a2a"},
|
||||
{file = "postgrest-0.19.1.tar.gz", hash = "sha256:d8fa88953cced4f45efa0f412056c364f64ece8a35b5b35f458a7e58c133fbca"},
|
||||
{file = "postgrest-0.17.2-py3-none-any.whl", hash = "sha256:f7c4f448e5a5e2d4c1dcf192edae9d1007c4261e9a6fb5116783a0046846ece2"},
|
||||
{file = "postgrest-0.17.2.tar.gz", hash = "sha256:445cd4e4a191e279492549df0c4e827d32f9d01d0852599bb8a6efb0f07fcf78"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
deprecation = ">=2.1.0,<3.0.0"
|
||||
httpx = {version = ">=0.26,<0.29", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
pydantic = ">=1.9,<3.0"
|
||||
strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""}
|
||||
|
||||
[[package]]
|
||||
name = "proto-plus"
|
||||
version = "1.26.0"
|
||||
description = "Beautiful, Pythonic protocol buffers"
|
||||
version = "1.24.0"
|
||||
description = "Beautiful, Pythonic protocol buffers."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "proto_plus-1.26.0-py3-none-any.whl", hash = "sha256:bf2dfaa3da281fc3187d12d224c707cb57214fb2c22ba854eb0c105a3fb2d4d7"},
|
||||
{file = "proto_plus-1.26.0.tar.gz", hash = "sha256:6e93d5f5ca267b54300880fff156b6a3386b3fa3f43b1da62e680fc0c586ef22"},
|
||||
{file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"},
|
||||
{file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1095,7 +1024,6 @@ version = "5.28.0"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "protobuf-5.28.0-cp310-abi3-win32.whl", hash = "sha256:66c3edeedb774a3508ae70d87b3a19786445fe9a068dd3585e0cefa8a77b83d0"},
|
||||
{file = "protobuf-5.28.0-cp310-abi3-win_amd64.whl", hash = "sha256:6d7cc9e60f976cf3e873acb9a40fed04afb5d224608ed5c1a105db4a3f09c5b6"},
|
||||
@@ -1116,7 +1044,6 @@ version = "0.6.1"
|
||||
description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"},
|
||||
{file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"},
|
||||
@@ -1128,7 +1055,6 @@ version = "0.4.1"
|
||||
description = "A collection of ASN.1-based protocols modules"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"},
|
||||
{file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"},
|
||||
@@ -1139,20 +1065,22 @@ pyasn1 = ">=0.4.6,<0.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.10.6"
|
||||
version = "2.9.2"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"},
|
||||
{file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"},
|
||||
{file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
|
||||
{file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.6.0"
|
||||
pydantic-core = "2.27.2"
|
||||
typing-extensions = ">=4.12.2"
|
||||
pydantic-core = "2.23.4"
|
||||
typing-extensions = [
|
||||
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
|
||||
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
@@ -1160,112 +1088,100 @@ timezone = ["tzdata"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.27.2"
|
||||
version = "2.23.4"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"},
|
||||
{file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"},
|
||||
{file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"},
|
||||
{file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"},
|
||||
{file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"},
|
||||
{file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"},
|
||||
{file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"},
|
||||
{file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"},
|
||||
{file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"},
|
||||
{file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"},
|
||||
{file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"},
|
||||
{file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"},
|
||||
{file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"},
|
||||
{file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"},
|
||||
{file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"},
|
||||
{file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"},
|
||||
{file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"},
|
||||
{file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"},
|
||||
{file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"},
|
||||
{file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"},
|
||||
{file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"},
|
||||
{file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"},
|
||||
{file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"},
|
||||
{file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"},
|
||||
{file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"},
|
||||
{file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
|
||||
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
|
||||
{file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
|
||||
{file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
|
||||
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
|
||||
{file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
|
||||
{file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
|
||||
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
|
||||
{file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
|
||||
{file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
|
||||
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
|
||||
{file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
|
||||
{file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
|
||||
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
|
||||
{file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
|
||||
{file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
|
||||
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
|
||||
{file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
|
||||
{file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
|
||||
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
|
||||
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
|
||||
{file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1273,14 +1189,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-settings"
|
||||
version = "2.7.1"
|
||||
version = "2.6.1"
|
||||
description = "Settings management using Pydantic"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydantic_settings-2.7.1-py3-none-any.whl", hash = "sha256:590be9e6e24d06db33a4262829edef682500ef008565a969c73d39d5f8bfb3fd"},
|
||||
{file = "pydantic_settings-2.7.1.tar.gz", hash = "sha256:10c9caad35e64bfb3c2fbf70a078c0e25cc92499782e5200747f942a065dec93"},
|
||||
{file = "pydantic_settings-2.6.1-py3-none-any.whl", hash = "sha256:7fb0637c786a558d3103436278a7c4f1cfd29ba8973238a50c5bb9a55387da87"},
|
||||
{file = "pydantic_settings-2.6.1.tar.gz", hash = "sha256:e0f92546d8a9923cb8941689abf85d6601a8c19a23e97a34b2964a2e3f813ca0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1294,14 +1209,13 @@ yaml = ["pyyaml (>=6.0.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyjwt"
|
||||
version = "2.10.1"
|
||||
version = "2.9.0"
|
||||
description = "JSON Web Token implementation in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb"},
|
||||
{file = "pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953"},
|
||||
{file = "PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850"},
|
||||
{file = "pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -1310,73 +1224,12 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte
|
||||
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
|
||||
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "8.3.3"
|
||||
description = "pytest: simple powerful testing with Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"},
|
||||
{file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
|
||||
iniconfig = "*"
|
||||
packaging = "*"
|
||||
pluggy = ">=1.5,<2"
|
||||
tomli = {version = ">=1", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-asyncio"
|
||||
version = "0.25.3"
|
||||
description = "Pytest support for asyncio"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3"},
|
||||
{file = "pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pytest = ">=8.2,<9"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"]
|
||||
testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-mock"
|
||||
version = "3.14.0"
|
||||
description = "Thin-wrapper around the mock package for easier use with pytest"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
|
||||
{file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pytest = ">=6.2.5"
|
||||
|
||||
[package.extras]
|
||||
dev = ["pre-commit", "pytest-asyncio", "tox"]
|
||||
|
||||
[[package]]
|
||||
name = "python-dateutil"
|
||||
version = "2.9.0.post0"
|
||||
description = "Extensions to the standard Python datetime module"
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
|
||||
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
|
||||
@@ -1391,7 +1244,6 @@ version = "1.0.1"
|
||||
description = "Read key-value pairs from a .env file and set them as environment variables"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"},
|
||||
{file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"},
|
||||
@@ -1406,7 +1258,6 @@ version = "2.0.2"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "realtime-2.0.2-py3-none-any.whl", hash = "sha256:2634c915bc38807f2013f21e8bcc4d2f79870dfd81460ddb9393883d0489928a"},
|
||||
{file = "realtime-2.0.2.tar.gz", hash = "sha256:519da9325b3b8102139d51785013d592f6b2403d81fa21d838a0b0234723ed7d"},
|
||||
@@ -1420,14 +1271,13 @@ websockets = ">=11,<13"
|
||||
|
||||
[[package]]
|
||||
name = "redis"
|
||||
version = "5.2.1"
|
||||
version = "5.2.0"
|
||||
description = "Python client for Redis database and key-value store"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "redis-5.2.1-py3-none-any.whl", hash = "sha256:ee7e1056b9aea0f04c6c2ed59452947f34c4940ee025f5dd83e6a6418b6989e4"},
|
||||
{file = "redis-5.2.1.tar.gz", hash = "sha256:16f2e22dff21d5125e8481515e386711a34cbec50f0e44413dd7d9c060a54e0f"},
|
||||
{file = "redis-5.2.0-py3-none-any.whl", hash = "sha256:ae174f2bb3b1bf2b09d54bf3e51fbc1469cf6c10aa03e21141f51969801a7897"},
|
||||
{file = "redis-5.2.0.tar.gz", hash = "sha256:0b1087665a771b1ff2e003aa5bdd354f15a70c9e25d5a7dbf9c722c16528a7b0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1443,7 +1293,6 @@ version = "2.32.3"
|
||||
description = "Python HTTP for Humans."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
|
||||
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
|
||||
@@ -1465,7 +1314,6 @@ version = "4.9"
|
||||
description = "Pure-Python RSA implementation"
|
||||
optional = false
|
||||
python-versions = ">=3.6,<4"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"},
|
||||
{file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"},
|
||||
@@ -1474,41 +1322,12 @@ files = [
|
||||
[package.dependencies]
|
||||
pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.9.3"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.9.3-py3-none-linux_armv6l.whl", hash = "sha256:7f39b879064c7d9670197d91124a75d118d00b0990586549949aae80cdc16624"},
|
||||
{file = "ruff-0.9.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:a187171e7c09efa4b4cc30ee5d0d55a8d6c5311b3e1b74ac5cb96cc89bafc43c"},
|
||||
{file = "ruff-0.9.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c59ab92f8e92d6725b7ded9d4a31be3ef42688a115c6d3da9457a5bda140e2b4"},
|
||||
{file = "ruff-0.9.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc153c25e715be41bb228bc651c1e9b1a88d5c6e5ed0194fa0dfea02b026439"},
|
||||
{file = "ruff-0.9.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:646909a1e25e0dc28fbc529eab8eb7bb583079628e8cbe738192853dbbe43af5"},
|
||||
{file = "ruff-0.9.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a5a46e09355695fbdbb30ed9889d6cf1c61b77b700a9fafc21b41f097bfbba4"},
|
||||
{file = "ruff-0.9.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c4bb09d2bbb394e3730d0918c00276e79b2de70ec2a5231cd4ebb51a57df9ba1"},
|
||||
{file = "ruff-0.9.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96a87ec31dc1044d8c2da2ebbed1c456d9b561e7d087734336518181b26b3aa5"},
|
||||
{file = "ruff-0.9.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bb7554aca6f842645022fe2d301c264e6925baa708b392867b7a62645304df4"},
|
||||
{file = "ruff-0.9.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabc332b7075a914ecea912cd1f3d4370489c8018f2c945a30bcc934e3bc06a6"},
|
||||
{file = "ruff-0.9.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:33866c3cc2a575cbd546f2cd02bdd466fed65118e4365ee538a3deffd6fcb730"},
|
||||
{file = "ruff-0.9.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:006e5de2621304c8810bcd2ee101587712fa93b4f955ed0985907a36c427e0c2"},
|
||||
{file = "ruff-0.9.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ba6eea4459dbd6b1be4e6bfc766079fb9b8dd2e5a35aff6baee4d9b1514ea519"},
|
||||
{file = "ruff-0.9.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:90230a6b8055ad47d3325e9ee8f8a9ae7e273078a66401ac66df68943ced029b"},
|
||||
{file = "ruff-0.9.3-py3-none-win32.whl", hash = "sha256:eabe5eb2c19a42f4808c03b82bd313fc84d4e395133fb3fc1b1516170a31213c"},
|
||||
{file = "ruff-0.9.3-py3-none-win_amd64.whl", hash = "sha256:040ceb7f20791dfa0e78b4230ee9dce23da3b64dd5848e40e3bf3ab76468dcf4"},
|
||||
{file = "ruff-0.9.3-py3-none-win_arm64.whl", hash = "sha256:800d773f6d4d33b0a3c60e2c6ae8f4c202ea2de056365acfa519aa48acf28e0b"},
|
||||
{file = "ruff-0.9.3.tar.gz", hash = "sha256:8293f89985a090ebc3ed1064df31f3b4b56320cdfcec8b60d3295bddb955c22a"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "six"
|
||||
version = "1.16.0"
|
||||
description = "Python 2 and 3 compatibility utilities"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
|
||||
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
|
||||
@@ -1520,7 +1339,6 @@ version = "1.3.1"
|
||||
description = "Sniff out which async library your code is running under"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
|
||||
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
|
||||
@@ -1528,19 +1346,19 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "0.11.0"
|
||||
version = "0.8.2"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "storage3-0.11.0-py3-none-any.whl", hash = "sha256:de2d8f9c9103ca91a9a9d0d69d80b07a3ab6f647b93e023e6a1a97d3607b9728"},
|
||||
{file = "storage3-0.11.0.tar.gz", hash = "sha256:243583f2180686c0f0a19e6117d8a9796fd60c0ca72ec567d62b75a5af0d57a1"},
|
||||
{file = "storage3-0.8.2-py3-none-any.whl", hash = "sha256:f2e995b18c77a2a9265d1a33047d43e4d6abb11eb3ca5067959f68281c305de3"},
|
||||
{file = "storage3-0.8.2.tar.gz", hash = "sha256:db05d3fe8fb73bd30c814c4c4749664f37a5dfc78b629e8c058ef558c2b89f5a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.26,<0.29", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
python-dateutil = ">=2.8.2,<3.0.0"
|
||||
typing-extensions = ">=4.2.0,<5.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "strenum"
|
||||
@@ -1548,7 +1366,6 @@ version = "0.4.15"
|
||||
description = "An Enum that inherits from str."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659"},
|
||||
{file = "StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff"},
|
||||
@@ -1561,52 +1378,36 @@ test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"]
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.13.0"
|
||||
version = "2.9.1"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase-2.13.0-py3-none-any.whl", hash = "sha256:6cfccc055be21dab311afc5e9d5b37f3a4966f8394703763fbc8f8e86f36eaa6"},
|
||||
{file = "supabase-2.13.0.tar.gz", hash = "sha256:452574d34bd978c8d11b5f02b0182b48e8854e511c969483c83875ec01495f11"},
|
||||
{file = "supabase-2.9.1-py3-none-any.whl", hash = "sha256:a96f857a465712cb551679c1df66ba772c834f861756ce4aa2aa4cb703f6aeb7"},
|
||||
{file = "supabase-2.9.1.tar.gz", hash = "sha256:51fce39c9eb50573126dabb342541ec5e1f13e7476938768f4b0ccfdb8c522cd"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
gotrue = ">=2.11.0,<3.0.0"
|
||||
httpx = ">=0.26,<0.29"
|
||||
postgrest = ">=0.19,<0.20"
|
||||
gotrue = ">=2.9.0,<3.0.0"
|
||||
httpx = ">=0.26,<0.28"
|
||||
postgrest = ">=0.17.0,<0.18.0"
|
||||
realtime = ">=2.0.0,<3.0.0"
|
||||
storage3 = ">=0.10,<0.12"
|
||||
supafunc = ">=0.9,<0.10"
|
||||
storage3 = ">=0.8.0,<0.9.0"
|
||||
supafunc = ">=0.6.0,<0.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "supafunc"
|
||||
version = "0.9.2"
|
||||
version = "0.6.2"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supafunc-0.9.2-py3-none-any.whl", hash = "sha256:be5ee9f53842c4b0ba5f4abfb5bddf9f9e37e69e755ec0526852bb15af9d2ff5"},
|
||||
{file = "supafunc-0.9.2.tar.gz", hash = "sha256:f5164114a3e65e7e552539f3f1050aa3d4970885abdd7405555c17fd216e2da1"},
|
||||
{file = "supafunc-0.6.2-py3-none-any.whl", hash = "sha256:101b30616b0a1ce8cf938eca1df362fa4cf1deacb0271f53ebbd674190fb0da5"},
|
||||
{file = "supafunc-0.6.2.tar.gz", hash = "sha256:c7dfa20db7182f7fe4ae436e94e05c06cd7ed98d697fed75d68c7b9792822adc"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.26,<0.29", extras = ["http2"]}
|
||||
strenum = ">=0.4.15,<0.5.0"
|
||||
|
||||
[[package]]
|
||||
name = "tomli"
|
||||
version = "2.1.0"
|
||||
description = "A lil' TOML parser"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
markers = "python_version < \"3.11\""
|
||||
files = [
|
||||
{file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"},
|
||||
{file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"},
|
||||
]
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
@@ -1614,7 +1415,6 @@ version = "4.12.2"
|
||||
description = "Backported and Experimental Type Hints for Python 3.8+"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
|
||||
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
|
||||
@@ -1626,7 +1426,6 @@ version = "2.2.2"
|
||||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"},
|
||||
{file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"},
|
||||
@@ -1644,7 +1443,6 @@ version = "12.0"
|
||||
description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"},
|
||||
{file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"},
|
||||
@@ -1726,7 +1524,6 @@ version = "1.16.0"
|
||||
description = "Module for decorators, wrappers and monkey patching."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"},
|
||||
{file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"},
|
||||
@@ -1806,7 +1603,6 @@ version = "1.11.1"
|
||||
description = "Yet another URL library"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "yarl-1.11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:400cd42185f92de559d29eeb529e71d80dfbd2f45c36844914a4a34297ca6f00"},
|
||||
{file = "yarl-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8258c86f47e080a258993eed877d579c71da7bda26af86ce6c2d2d072c11320d"},
|
||||
@@ -1912,7 +1708,6 @@ version = "3.20.1"
|
||||
description = "Backport of pathlib-compatible object wrapper for zip files"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "zipp-3.20.1-py3-none-any.whl", hash = "sha256:9960cd8967c8f85a56f920d5d507274e74f9ff813a0ab8889a5b5be2daf44064"},
|
||||
{file = "zipp-3.20.1.tar.gz", hash = "sha256:c22b14cc4763c5a5b04134207736c107db42e9d3ef2d9779d465f5f1bcba572b"},
|
||||
@@ -1927,6 +1722,6 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools",
|
||||
type = ["pytest-mypy"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "a4d81b3b55a67036ca7a441793e13e8fbe20af973fcf1623f36cdee7bc82999f"
|
||||
content-hash = "f80654aae542b1f2f3a44a01f197f87ffbaea52f474dd2cc2b72b8d56b155563"
|
||||
|
||||
@@ -9,26 +9,17 @@ packages = [{ include = "autogpt_libs" }]
|
||||
[tool.poetry.dependencies]
|
||||
colorama = "^0.4.6"
|
||||
expiringdict = "^1.2.2"
|
||||
google-cloud-logging = "^3.11.4"
|
||||
pydantic = "^2.10.6"
|
||||
pydantic-settings = "^2.7.1"
|
||||
pyjwt = "^2.10.1"
|
||||
pytest-asyncio = "^0.25.3"
|
||||
pytest-mock = "^3.14.0"
|
||||
google-cloud-logging = "^3.11.3"
|
||||
pydantic = "^2.9.2"
|
||||
pydantic-settings = "^2.6.1"
|
||||
pyjwt = "^2.8.0"
|
||||
python = ">=3.10,<4.0"
|
||||
python-dotenv = "^1.0.1"
|
||||
supabase = "^2.13.0"
|
||||
supabase = "^2.9.1"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
redis = "^5.2.1"
|
||||
ruff = "^0.9.3"
|
||||
redis = "^5.2.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
[tool.ruff.lint]
|
||||
extend-select = ["I"] # sort dependencies
|
||||
|
||||
@@ -15,9 +15,6 @@ REDIS_PORT=6379
|
||||
REDIS_PASSWORD=password
|
||||
|
||||
ENABLE_CREDIT=false
|
||||
STRIPE_API_KEY=
|
||||
STRIPE_WEBHOOK_SECRET=
|
||||
|
||||
# What environment things should be logged under: local dev or prod
|
||||
APP_ENV=local
|
||||
# What environment to behave as: "local" or "cloud"
|
||||
@@ -25,31 +22,14 @@ BEHAVE_AS=local
|
||||
PYRO_HOST=localhost
|
||||
SENTRY_DSN=
|
||||
|
||||
# Email For Postmark so we can send emails
|
||||
POSTMARK_SERVER_API_TOKEN=
|
||||
POSTMARK_SENDER_EMAIL=invalid@invalid.com
|
||||
|
||||
## User auth with Supabase is required for any of the 3rd party integrations with auth to work.
|
||||
ENABLE_AUTH=true
|
||||
SUPABASE_URL=http://localhost:8000
|
||||
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
|
||||
# RabbitMQ credentials -- Used for communication between services
|
||||
RABBITMQ_HOST=localhost
|
||||
RABBITMQ_PORT=5672
|
||||
RABBITMQ_DEFAULT_USER=rabbitmq_user_default
|
||||
RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
|
||||
|
||||
## For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow
|
||||
## for integrations to work. Defaults to the value of PLATFORM_BASE_URL if not set.
|
||||
# FRONTEND_BASE_URL=http://localhost:3000
|
||||
|
||||
## PLATFORM_BASE_URL must be set to a *publicly accessible* URL pointing to your backend
|
||||
## to use the platform's webhook-related functionality.
|
||||
## If you are developing locally, you can use something like ngrok to get a publc URL
|
||||
## and tunnel it to your locally running backend.
|
||||
PLATFORM_BASE_URL=http://localhost:3000
|
||||
# For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow for integrations to work.
|
||||
FRONTEND_BASE_URL=http://localhost:3000
|
||||
|
||||
## == INTEGRATION CREDENTIALS == ##
|
||||
# Each set of server side credentials is required for the corresponding 3rd party
|
||||
@@ -71,50 +51,18 @@ GITHUB_CLIENT_SECRET=
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
|
||||
# Twitter (X) OAuth 2.0 with PKCE Configuration
|
||||
# 1. Create a Twitter Developer Account:
|
||||
# - Visit https://developer.x.com/en and sign up
|
||||
# 2. Set up your application:
|
||||
# - Navigate to Developer Portal > Projects > Create Project
|
||||
# - Add a new app to your project
|
||||
# 3. Configure app settings:
|
||||
# - App Permissions: Read + Write + Direct Messages
|
||||
# - App Type: Web App, Automated App or Bot
|
||||
# - OAuth 2.0 Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||
# - Save your Client ID and Client Secret below
|
||||
TWITTER_CLIENT_ID=
|
||||
TWITTER_CLIENT_SECRET=
|
||||
|
||||
# Linear App
|
||||
# Make a new workspace for your OAuth APP -- trust me
|
||||
# https://linear.app/settings/api/applications/new
|
||||
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
|
||||
LINEAR_CLIENT_ID=
|
||||
LINEAR_CLIENT_SECRET=
|
||||
|
||||
# To obtain Todoist API credentials:
|
||||
# 1. Create a Todoist account at todoist.com
|
||||
# 2. Visit the Developer Console: https://developer.todoist.com/appconsole.html
|
||||
# 3. Click "Create new app"
|
||||
# 4. Once created, copy your Client ID and Client Secret below
|
||||
TODOIST_CLIENT_ID=
|
||||
TODOIST_CLIENT_SECRET=
|
||||
|
||||
## ===== OPTIONAL API KEYS ===== ##
|
||||
|
||||
# LLM
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
|
||||
# Reddit
|
||||
# Go to https://www.reddit.com/prefs/apps and create a new app
|
||||
# Choose "script" for the type
|
||||
# Fill in the redirect uri as <your_frontend_url>/auth/integrations/oauth_callback, e.g. http://localhost:3000/auth/integrations/oauth_callback
|
||||
REDDIT_CLIENT_ID=
|
||||
REDDIT_CLIENT_SECRET=
|
||||
REDDIT_USER_AGENT="AutoGPT:1.0 (by /u/autogpt)"
|
||||
REDDIT_USERNAME=
|
||||
REDDIT_PASSWORD=
|
||||
|
||||
# Discord
|
||||
DISCORD_BOT_TOKEN=
|
||||
@@ -150,30 +98,6 @@ REPLICATE_API_KEY=
|
||||
# Ideogram
|
||||
IDEOGRAM_API_KEY=
|
||||
|
||||
# Fal
|
||||
FAL_API_KEY=
|
||||
|
||||
# Exa
|
||||
EXA_API_KEY=
|
||||
|
||||
# E2B
|
||||
E2B_API_KEY=
|
||||
|
||||
# Mem0
|
||||
MEM0_API_KEY=
|
||||
|
||||
# Nvidia
|
||||
NVIDIA_API_KEY=
|
||||
|
||||
# Apollo
|
||||
APOLLO_API_KEY=
|
||||
|
||||
# SmartLead
|
||||
SMARTLEAD_API_KEY=
|
||||
|
||||
# ZeroBounce
|
||||
ZEROBOUNCE_API_KEY=
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_CLOUD_LOGGING=false
|
||||
|
||||
5
autogpt_platform/backend/.gitignore
vendored
5
autogpt_platform/backend/.gitignore
vendored
@@ -5,7 +5,4 @@ dev.db-journal
|
||||
build/
|
||||
config.json
|
||||
secrets/*
|
||||
!secrets/.gitkeep
|
||||
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
!secrets/.gitkeep
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.11.10-slim-bookworm AS builder
|
||||
FROM python:3.11-slim-buster AS builder
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
@@ -6,21 +6,17 @@ ENV PYTHONUNBUFFERED 1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN echo 'Acquire::http::Pipeline-Depth 0;\nAcquire::http::No-Cache true;\nAcquire::BrokenProxy true;\n' > /etc/apt/apt.conf.d/99fixbadproxy
|
||||
|
||||
RUN apt-get update --allow-releaseinfo-change --fix-missing
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get install -y build-essential
|
||||
RUN apt-get install -y libpq5
|
||||
RUN apt-get install -y libz-dev
|
||||
RUN apt-get install -y libssl-dev
|
||||
RUN apt-get install -y postgresql-client
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev libpq5 gettext libz-dev libssl-dev postgresql-client git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV POETRY_HOME=/opt/poetry
|
||||
ENV POETRY_NO_INTERACTION=1
|
||||
ENV POETRY_VIRTUALENVS_CREATE=false
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
ENV POETRY_VERSION=1.8.3 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
POETRY_VIRTUALENVS_CREATE=false \
|
||||
PATH="$POETRY_HOME/bin:$PATH"
|
||||
|
||||
# Upgrade pip and setuptools to fix security vulnerabilities
|
||||
RUN pip3 install --upgrade pip setuptools
|
||||
@@ -31,20 +27,24 @@ RUN pip3 install poetry
|
||||
COPY autogpt_platform/autogpt_libs /app/autogpt_platform/autogpt_libs
|
||||
COPY autogpt_platform/backend/poetry.lock autogpt_platform/backend/pyproject.toml /app/autogpt_platform/backend/
|
||||
WORKDIR /app/autogpt_platform/backend
|
||||
RUN poetry install --no-ansi --no-root
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& poetry install --no-interaction --no-ansi
|
||||
|
||||
# Generate Prisma client
|
||||
COPY autogpt_platform/backend/schema.prisma ./
|
||||
RUN poetry run prisma generate
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& poetry run prisma generate
|
||||
|
||||
FROM python:3.11.10-slim-bookworm AS server_dependencies
|
||||
FROM python:3.11-slim-buster AS server_dependencies
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ENV POETRY_HOME=/opt/poetry \
|
||||
ENV POETRY_VERSION=1.8.3 \
|
||||
POETRY_HOME="/opt/poetry" \
|
||||
POETRY_NO_INTERACTION=1 \
|
||||
POETRY_VIRTUALENVS_CREATE=false
|
||||
ENV PATH=/opt/poetry/bin:$PATH
|
||||
POETRY_VIRTUALENVS_CREATE=false \
|
||||
PATH="$POETRY_HOME/bin:$PATH"
|
||||
|
||||
|
||||
# Upgrade pip and setuptools to fix security vulnerabilities
|
||||
RUN pip3 install --upgrade pip setuptools
|
||||
@@ -71,7 +71,6 @@ WORKDIR /app/autogpt_platform/backend
|
||||
FROM server_dependencies AS server
|
||||
|
||||
COPY autogpt_platform/backend /app/autogpt_platform/backend
|
||||
RUN poetry install --no-ansi --only-root
|
||||
|
||||
ENV DATABASE_URL=""
|
||||
ENV PORT=8000
|
||||
|
||||
@@ -66,17 +66,10 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
|
||||
|
||||
### Starting the server without Docker
|
||||
|
||||
To run the server locally, start in the autogpt_platform folder:
|
||||
|
||||
```sh
|
||||
cd ..
|
||||
```
|
||||
|
||||
Run the following command to run database in docker but the application locally:
|
||||
|
||||
```sh
|
||||
docker compose --profile local up deps --build --detach
|
||||
cd backend
|
||||
poetry run app
|
||||
```
|
||||
|
||||
@@ -207,4 +200,4 @@ To add a new agent block, you need to create a new class that inherits from `Blo
|
||||
* `run` method: the main logic of the block.
|
||||
* `test_input` & `test_output`: the sample input and output data for the block, which will be used to auto-test the block.
|
||||
* You can mock the functions declared in the block using the `test_mock` field for your unit tests.
|
||||
* Once you finish creating the block, you can test it by running `poetry run pytest -s test/block/test_block.py`.
|
||||
* Once you finish creating the block, you can test it by running `pytest -s test/block/test_block.py`.
|
||||
|
||||
@@ -25,7 +25,6 @@ def main(**kwargs):
|
||||
"""
|
||||
|
||||
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
|
||||
from backend.notifications import NotificationManager
|
||||
from backend.server.rest_api import AgentServer
|
||||
from backend.server.ws_api import WebsocketServer
|
||||
|
||||
@@ -33,7 +32,6 @@ def main(**kwargs):
|
||||
DatabaseManager(),
|
||||
ExecutionManager(),
|
||||
ExecutionScheduler(),
|
||||
NotificationManager(),
|
||||
WebsocketServer(),
|
||||
AgentServer(),
|
||||
**kwargs,
|
||||
|
||||
@@ -15,10 +15,10 @@ modules = [
|
||||
if f.is_file() and f.name != "__init__.py"
|
||||
]
|
||||
for module in modules:
|
||||
if not re.match("^[a-z0-9_.]+$", module):
|
||||
if not re.match("^[a-z_.]+$", module):
|
||||
raise ValueError(
|
||||
f"Block module {module} error: module name must be lowercase, "
|
||||
"and contain only alphanumeric characters and underscores."
|
||||
"separated by underscores, and contain only alphabet characters"
|
||||
)
|
||||
|
||||
importlib.import_module(f".{module}", package=__name__)
|
||||
@@ -60,6 +60,13 @@ for block_cls in all_subclasses(Block):
|
||||
input_schema = block.input_schema.model_fields
|
||||
output_schema = block.output_schema.model_fields
|
||||
|
||||
# Prevent duplicate field name in input_schema and output_schema
|
||||
duplicate_field_names = set(input_schema.keys()) & set(output_schema.keys())
|
||||
if duplicate_field_names:
|
||||
raise ValueError(
|
||||
f"{block.name} has duplicate field names in input_schema and output_schema: {duplicate_field_names}"
|
||||
)
|
||||
|
||||
# Make sure `error` field is a string in the output schema
|
||||
if "error" in output_schema and output_schema["error"].annotation is not str:
|
||||
raise ValueError(
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
import logging
|
||||
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockInput,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
BlockType,
|
||||
get_block,
|
||||
)
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_executor_manager_client():
|
||||
from backend.executor import ExecutionManager
|
||||
from backend.util.service import get_service_client
|
||||
|
||||
return get_service_client(ExecutionManager)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_event_bus():
|
||||
from backend.data.execution import RedisExecutionEventBus
|
||||
|
||||
return RedisExecutionEventBus()
|
||||
|
||||
|
||||
class AgentExecutorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
user_id: str = SchemaField(description="User ID")
|
||||
graph_id: str = SchemaField(description="Graph ID")
|
||||
graph_version: int = SchemaField(description="Graph Version")
|
||||
|
||||
data: BlockInput = SchemaField(description="Input data for the graph")
|
||||
input_schema: dict = SchemaField(description="Input schema for the graph")
|
||||
output_schema: dict = SchemaField(description="Output schema for the graph")
|
||||
|
||||
class Output(BlockSchema):
|
||||
pass
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="e189baac-8c20-45a1-94a7-55177ea42565",
|
||||
description="Executes an existing agent inside your agent",
|
||||
input_schema=AgentExecutorBlock.Input,
|
||||
output_schema=AgentExecutorBlock.Output,
|
||||
block_type=BlockType.AGENT,
|
||||
categories={BlockCategory.AGENT},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
executor_manager = get_executor_manager_client()
|
||||
event_bus = get_event_bus()
|
||||
|
||||
graph_exec = executor_manager.add_execution(
|
||||
graph_id=input_data.graph_id,
|
||||
graph_version=input_data.graph_version,
|
||||
user_id=input_data.user_id,
|
||||
data=input_data.data,
|
||||
)
|
||||
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
|
||||
logger.info(f"Starting execution of {log_id}")
|
||||
|
||||
for event in event_bus.listen(
|
||||
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
|
||||
):
|
||||
logger.info(
|
||||
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
|
||||
)
|
||||
|
||||
if not event.node_id:
|
||||
if event.status in [
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
ExecutionStatus.FAILED,
|
||||
]:
|
||||
logger.info(f"Execution {log_id} ended with status {event.status}")
|
||||
break
|
||||
else:
|
||||
continue
|
||||
|
||||
if not event.block_id:
|
||||
logger.warning(f"{log_id} received event without block_id {event}")
|
||||
continue
|
||||
|
||||
block = get_block(event.block_id)
|
||||
if not block or block.block_type != BlockType.OUTPUT:
|
||||
continue
|
||||
|
||||
output_name = event.input_data.get("name")
|
||||
if not output_name:
|
||||
logger.warning(f"{log_id} produced an output with no name {event}")
|
||||
continue
|
||||
|
||||
for output_data in event.output_data.get("output", []):
|
||||
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
|
||||
yield output_name, output_data
|
||||
@@ -1,325 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from pydantic import SecretStr
|
||||
from replicate.helpers import FileOutput
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
|
||||
class ImageSize(str, Enum):
|
||||
"""
|
||||
Semantic sizes that map reliably across all models
|
||||
"""
|
||||
|
||||
SQUARE = "square" # For profile pictures, icons, etc.
|
||||
LANDSCAPE = "landscape" # For traditional photos, scenes
|
||||
PORTRAIT = "portrait" # For vertical photos, portraits
|
||||
WIDE = "wide" # For cinematic, desktop wallpapers
|
||||
TALL = "tall" # For mobile wallpapers, stories
|
||||
|
||||
|
||||
# Mapping semantic sizes to model-specific formats
|
||||
SIZE_TO_SD_RATIO = {
|
||||
ImageSize.SQUARE: "1:1",
|
||||
ImageSize.LANDSCAPE: "4:3",
|
||||
ImageSize.PORTRAIT: "3:4",
|
||||
ImageSize.WIDE: "16:9",
|
||||
ImageSize.TALL: "9:16",
|
||||
}
|
||||
|
||||
SIZE_TO_FLUX_RATIO = {
|
||||
ImageSize.SQUARE: "1:1",
|
||||
ImageSize.LANDSCAPE: "4:3",
|
||||
ImageSize.PORTRAIT: "3:4",
|
||||
ImageSize.WIDE: "16:9",
|
||||
ImageSize.TALL: "9:16",
|
||||
}
|
||||
|
||||
SIZE_TO_FLUX_DIMENSIONS = {
|
||||
ImageSize.SQUARE: (1024, 1024),
|
||||
ImageSize.LANDSCAPE: (1365, 1024),
|
||||
ImageSize.PORTRAIT: (1024, 1365),
|
||||
ImageSize.WIDE: (1440, 810), # Adjusted to maintain 16:9 within 1440 limit
|
||||
ImageSize.TALL: (810, 1440), # Adjusted to maintain 9:16 within 1440 limit
|
||||
}
|
||||
|
||||
SIZE_TO_RECRAFT_DIMENSIONS = {
|
||||
ImageSize.SQUARE: "1024x1024",
|
||||
ImageSize.LANDSCAPE: "1365x1024",
|
||||
ImageSize.PORTRAIT: "1024x1365",
|
||||
ImageSize.WIDE: "1536x1024",
|
||||
ImageSize.TALL: "1024x1536",
|
||||
}
|
||||
|
||||
|
||||
class ImageStyle(str, Enum):
|
||||
"""
|
||||
Complete set of supported styles
|
||||
"""
|
||||
|
||||
ANY = "any"
|
||||
# Realistic image styles
|
||||
REALISTIC = "realistic_image"
|
||||
REALISTIC_BW = "realistic_image/b_and_w"
|
||||
REALISTIC_HDR = "realistic_image/hdr"
|
||||
REALISTIC_NATURAL = "realistic_image/natural_light"
|
||||
REALISTIC_STUDIO = "realistic_image/studio_portrait"
|
||||
REALISTIC_ENTERPRISE = "realistic_image/enterprise"
|
||||
REALISTIC_HARD_FLASH = "realistic_image/hard_flash"
|
||||
REALISTIC_MOTION_BLUR = "realistic_image/motion_blur"
|
||||
# Digital illustration styles
|
||||
DIGITAL_ART = "digital_illustration"
|
||||
PIXEL_ART = "digital_illustration/pixel_art"
|
||||
HAND_DRAWN = "digital_illustration/hand_drawn"
|
||||
GRAIN = "digital_illustration/grain"
|
||||
SKETCH = "digital_illustration/infantile_sketch"
|
||||
POSTER = "digital_illustration/2d_art_poster"
|
||||
POSTER_2 = "digital_illustration/2d_art_poster_2"
|
||||
HANDMADE_3D = "digital_illustration/handmade_3d"
|
||||
HAND_DRAWN_OUTLINE = "digital_illustration/hand_drawn_outline"
|
||||
ENGRAVING_COLOR = "digital_illustration/engraving_color"
|
||||
|
||||
|
||||
class ImageGenModel(str, Enum):
|
||||
"""
|
||||
Available model providers
|
||||
"""
|
||||
|
||||
FLUX = "Flux 1.1 Pro"
|
||||
FLUX_ULTRA = "Flux 1.1 Pro Ultra"
|
||||
RECRAFT = "Recraft v3"
|
||||
SD3_5 = "Stable Diffusion 3.5 Medium"
|
||||
|
||||
|
||||
class AIImageGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.REPLICATE], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Enter your Replicate API key to access the image generation API. You can obtain an API key from https://replicate.com/account/api-tokens.",
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="Text prompt for image generation",
|
||||
placeholder="e.g., 'A red panda using a laptop in a snowy forest'",
|
||||
title="Prompt",
|
||||
)
|
||||
model: ImageGenModel = SchemaField(
|
||||
description="The AI model to use for image generation",
|
||||
default=ImageGenModel.SD3_5,
|
||||
title="Model",
|
||||
)
|
||||
size: ImageSize = SchemaField(
|
||||
description=(
|
||||
"Format of the generated image:\n"
|
||||
"- Square: Perfect for profile pictures, icons\n"
|
||||
"- Landscape: Traditional photo format\n"
|
||||
"- Portrait: Vertical photos, portraits\n"
|
||||
"- Wide: Cinematic format, desktop wallpapers\n"
|
||||
"- Tall: Mobile wallpapers, social media stories"
|
||||
),
|
||||
default=ImageSize.SQUARE,
|
||||
title="Image Format",
|
||||
)
|
||||
style: ImageStyle = SchemaField(
|
||||
description="Visual style for the generated image",
|
||||
default=ImageStyle.ANY,
|
||||
title="Image Style",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
image_url: str = SchemaField(description="URL of the generated image")
|
||||
error: str = SchemaField(description="Error message if generation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ed1ae7a0-b770-4089-b520-1f0005fad19a",
|
||||
description="Generate images using various AI models through a unified interface",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIImageGeneratorBlock.Input,
|
||||
output_schema=AIImageGeneratorBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"prompt": "An octopus using a laptop in a snowy forest with 'AutoGPT' clearly visible on the screen",
|
||||
"model": ImageGenModel.RECRAFT,
|
||||
"size": ImageSize.SQUARE,
|
||||
"style": ImageStyle.REALISTIC,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"image_url",
|
||||
"https://replicate.delivery/generated-image.webp",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_run_client": lambda *args, **kwargs: "https://replicate.delivery/generated-image.webp"
|
||||
},
|
||||
)
|
||||
|
||||
def _run_client(
|
||||
self, credentials: APIKeyCredentials, model_name: str, input_params: dict
|
||||
):
|
||||
try:
|
||||
# Initialize Replicate client
|
||||
client = replicate.Client(api_token=credentials.api_key.get_secret_value())
|
||||
|
||||
# Run the model with input parameters
|
||||
output = client.run(model_name, input=input_params, wait=False)
|
||||
|
||||
# Process output
|
||||
if isinstance(output, list) and len(output) > 0:
|
||||
if isinstance(output[0], FileOutput):
|
||||
result_url = output[0].url
|
||||
else:
|
||||
result_url = output[0]
|
||||
elif isinstance(output, FileOutput):
|
||||
result_url = output.url
|
||||
elif isinstance(output, str):
|
||||
result_url = output
|
||||
else:
|
||||
result_url = None
|
||||
|
||||
return result_url
|
||||
|
||||
except TypeError as e:
|
||||
raise TypeError(f"Error during model execution: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Unexpected error during model execution: {e}")
|
||||
|
||||
def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
|
||||
try:
|
||||
# Handle style-based prompt modification for models without native style support
|
||||
modified_prompt = input_data.prompt
|
||||
if input_data.model not in [ImageGenModel.RECRAFT]:
|
||||
style_prefix = self._style_to_prompt_prefix(input_data.style)
|
||||
modified_prompt = f"{style_prefix} {modified_prompt}".strip()
|
||||
|
||||
if input_data.model == ImageGenModel.SD3_5:
|
||||
# Use Stable Diffusion 3.5 with aspect ratio
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"aspect_ratio": SIZE_TO_SD_RATIO[input_data.size],
|
||||
"output_format": "webp",
|
||||
"output_quality": 90,
|
||||
"steps": 40,
|
||||
"cfg_scale": 7.0,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials,
|
||||
"stability-ai/stable-diffusion-3.5-medium",
|
||||
input_params,
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.FLUX:
|
||||
# Use Flux-specific dimensions with 'jpg' format to avoid ReplicateError
|
||||
width, height = SIZE_TO_FLUX_DIMENSIONS[input_data.size]
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"aspect_ratio": SIZE_TO_FLUX_RATIO[input_data.size],
|
||||
"output_format": "jpg", # Set to jpg for Flux models
|
||||
"output_quality": 90,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials, "black-forest-labs/flux-1.1-pro", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.FLUX_ULTRA:
|
||||
width, height = SIZE_TO_FLUX_DIMENSIONS[input_data.size]
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"aspect_ratio": SIZE_TO_FLUX_RATIO[input_data.size],
|
||||
"output_format": "jpg",
|
||||
"output_quality": 90,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials, "black-forest-labs/flux-1.1-pro-ultra", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.RECRAFT:
|
||||
input_params = {
|
||||
"prompt": input_data.prompt,
|
||||
"size": SIZE_TO_RECRAFT_DIMENSIONS[input_data.size],
|
||||
"style": input_data.style.value,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials, "recraft-ai/recraft-v3", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to generate image: {str(e)}")
|
||||
|
||||
def _style_to_prompt_prefix(self, style: ImageStyle) -> str:
|
||||
"""
|
||||
Convert a style enum to a prompt prefix for models without native style support.
|
||||
"""
|
||||
if style == ImageStyle.ANY:
|
||||
return ""
|
||||
|
||||
style_map = {
|
||||
ImageStyle.REALISTIC: "photorealistic",
|
||||
ImageStyle.REALISTIC_BW: "black and white photograph",
|
||||
ImageStyle.REALISTIC_HDR: "HDR photograph",
|
||||
ImageStyle.REALISTIC_NATURAL: "natural light photograph",
|
||||
ImageStyle.REALISTIC_STUDIO: "studio portrait photograph",
|
||||
ImageStyle.REALISTIC_ENTERPRISE: "enterprise photograph",
|
||||
ImageStyle.REALISTIC_HARD_FLASH: "hard flash photograph",
|
||||
ImageStyle.REALISTIC_MOTION_BLUR: "motion blur photograph",
|
||||
ImageStyle.DIGITAL_ART: "digital art",
|
||||
ImageStyle.PIXEL_ART: "pixel art",
|
||||
ImageStyle.HAND_DRAWN: "hand drawn illustration",
|
||||
ImageStyle.GRAIN: "grainy digital illustration",
|
||||
ImageStyle.SKETCH: "sketchy illustration",
|
||||
ImageStyle.POSTER: "2D art poster",
|
||||
ImageStyle.POSTER_2: "alternate 2D art poster",
|
||||
ImageStyle.HANDMADE_3D: "handmade 3D illustration",
|
||||
ImageStyle.HAND_DRAWN_OUTLINE: "hand drawn outline illustration",
|
||||
ImageStyle.ENGRAVING_COLOR: "color engraving illustration",
|
||||
}
|
||||
|
||||
style_text = style_map.get(style, "")
|
||||
return f"{style_text} of" if style_text else ""
|
||||
|
||||
def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
|
||||
try:
|
||||
url = self.generate_image(input_data, credentials)
|
||||
if url:
|
||||
yield "image_url", url
|
||||
else:
|
||||
yield "error", "Image generation returned an empty result."
|
||||
except Exception as e:
|
||||
# Capture and return only the message of the exception, avoiding serialization of non-serializable objects
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
# Test credentials stay the same
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="replicate",
|
||||
api_key=SecretStr("mock-replicate-api-key"),
|
||||
title="Mock Replicate API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
@@ -1,227 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="replicate",
|
||||
api_key=SecretStr("mock-replicate-api-key"),
|
||||
title="Mock Replicate API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
# Model version enum
|
||||
class MusicGenModelVersion(str, Enum):
|
||||
STEREO_LARGE = "stereo-large"
|
||||
MELODY_LARGE = "melody-large"
|
||||
LARGE = "large"
|
||||
|
||||
|
||||
# Audio format enum
|
||||
class AudioFormat(str, Enum):
|
||||
WAV = "wav"
|
||||
MP3 = "mp3"
|
||||
|
||||
|
||||
# Normalization strategy enum
|
||||
class NormalizationStrategy(str, Enum):
|
||||
LOUDNESS = "loudness"
|
||||
CLIP = "clip"
|
||||
PEAK = "peak"
|
||||
RMS = "rms"
|
||||
|
||||
|
||||
class AIMusicGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.REPLICATE], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="The Replicate integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="A description of the music you want to generate",
|
||||
placeholder="e.g., 'An upbeat electronic dance track with heavy bass'",
|
||||
title="Prompt",
|
||||
)
|
||||
music_gen_model_version: MusicGenModelVersion = SchemaField(
|
||||
description="Model to use for generation",
|
||||
default=MusicGenModelVersion.STEREO_LARGE,
|
||||
title="Model Version",
|
||||
)
|
||||
duration: int = SchemaField(
|
||||
description="Duration of the generated audio in seconds",
|
||||
default=8,
|
||||
title="Duration",
|
||||
)
|
||||
temperature: float = SchemaField(
|
||||
description="Controls the 'conservativeness' of the sampling process. Higher temperature means more diversity",
|
||||
default=1.0,
|
||||
title="Temperature",
|
||||
)
|
||||
top_k: int = SchemaField(
|
||||
description="Reduces sampling to the k most likely tokens",
|
||||
default=250,
|
||||
title="Top K",
|
||||
)
|
||||
top_p: float = SchemaField(
|
||||
description="Reduces sampling to tokens with cumulative probability of p. When set to 0 (default), top_k sampling is used",
|
||||
default=0.0,
|
||||
title="Top P",
|
||||
)
|
||||
classifier_free_guidance: int = SchemaField(
|
||||
description="Increases the influence of inputs on the output. Higher values produce lower-variance outputs that adhere more closely to inputs",
|
||||
default=3,
|
||||
title="Classifier Free Guidance",
|
||||
)
|
||||
output_format: AudioFormat = SchemaField(
|
||||
description="Output format for generated audio",
|
||||
default=AudioFormat.WAV,
|
||||
title="Output Format",
|
||||
)
|
||||
normalization_strategy: NormalizationStrategy = SchemaField(
|
||||
description="Strategy for normalizing audio",
|
||||
default=NormalizationStrategy.LOUDNESS,
|
||||
title="Normalization Strategy",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: str = SchemaField(description="URL of the generated audio file")
|
||||
error: str = SchemaField(description="Error message if the model run failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="44f6c8ad-d75c-4ae1-8209-aad1c0326928",
|
||||
description="This block generates music using Meta's MusicGen model on Replicate.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIMusicGeneratorBlock.Input,
|
||||
output_schema=AIMusicGeneratorBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"prompt": "An upbeat electronic dance track with heavy bass",
|
||||
"music_gen_model_version": MusicGenModelVersion.STEREO_LARGE,
|
||||
"duration": 8,
|
||||
"temperature": 1.0,
|
||||
"top_k": 250,
|
||||
"top_p": 0.0,
|
||||
"classifier_free_guidance": 3,
|
||||
"output_format": AudioFormat.WAV,
|
||||
"normalization_strategy": NormalizationStrategy.LOUDNESS,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
"https://replicate.com/output/generated-audio-url.wav",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"run_model": lambda api_key, music_gen_model_version, prompt, duration, temperature, top_k, top_p, classifier_free_guidance, output_format, normalization_strategy: "https://replicate.com/output/generated-audio-url.wav",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
max_retries = 3
|
||||
retry_delay = 5 # seconds
|
||||
last_error = None
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
logger.debug(
|
||||
f"[AIMusicGeneratorBlock] - Running model (attempt {attempt + 1})"
|
||||
)
|
||||
result = self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
music_gen_model_version=input_data.music_gen_model_version,
|
||||
prompt=input_data.prompt,
|
||||
duration=input_data.duration,
|
||||
temperature=input_data.temperature,
|
||||
top_k=input_data.top_k,
|
||||
top_p=input_data.top_p,
|
||||
classifier_free_guidance=input_data.classifier_free_guidance,
|
||||
output_format=input_data.output_format,
|
||||
normalization_strategy=input_data.normalization_strategy,
|
||||
)
|
||||
if result and result != "No output received":
|
||||
yield "result", result
|
||||
return
|
||||
else:
|
||||
last_error = "Model returned empty or invalid response"
|
||||
raise ValueError(last_error)
|
||||
except Exception as e:
|
||||
last_error = f"Unexpected error: {str(e)}"
|
||||
logger.error(f"[AIMusicGeneratorBlock] - Error: {last_error}")
|
||||
if attempt < max_retries - 1:
|
||||
time.sleep(retry_delay)
|
||||
continue
|
||||
|
||||
# If we've exhausted all retries, yield the error
|
||||
yield "error", f"Failed after {max_retries} attempts. Last error: {last_error}"
|
||||
|
||||
def run_model(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
music_gen_model_version: MusicGenModelVersion,
|
||||
prompt: str,
|
||||
duration: int,
|
||||
temperature: float,
|
||||
top_k: int,
|
||||
top_p: float,
|
||||
classifier_free_guidance: int,
|
||||
output_format: AudioFormat,
|
||||
normalization_strategy: NormalizationStrategy,
|
||||
):
|
||||
# Initialize Replicate client with the API key
|
||||
client = replicate.Client(api_token=api_key.get_secret_value())
|
||||
|
||||
# Run the model with parameters
|
||||
output = client.run(
|
||||
"meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb",
|
||||
input={
|
||||
"prompt": prompt,
|
||||
"music_gen_model_version": music_gen_model_version,
|
||||
"duration": duration,
|
||||
"temperature": temperature,
|
||||
"top_k": top_k,
|
||||
"top_p": top_p,
|
||||
"classifier_free_guidance": classifier_free_guidance,
|
||||
"output_format": output_format,
|
||||
"normalization_strategy": normalization_strategy,
|
||||
},
|
||||
)
|
||||
|
||||
# Handle the output
|
||||
if isinstance(output, list) and len(output) > 0:
|
||||
result_url = output[0] # If output is a list, get the first element
|
||||
elif isinstance(output, str):
|
||||
result_url = output # If output is a string, use it directly
|
||||
else:
|
||||
result_url = (
|
||||
"No output received" # Fallback message if output is not as expected
|
||||
)
|
||||
|
||||
return result_url
|
||||
@@ -3,17 +3,12 @@ import time
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.request import requests
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -141,11 +136,13 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class AIShortformVideoCreatorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.REVID], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="The revid.ai integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
credentials: CredentialsMetaInput[Literal["revid"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="revid",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The revid.ai integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
)
|
||||
script: str = SchemaField(
|
||||
description="""1. Use short and punctuated sentences\n\n2. Use linebreaks to create a new clip\n\n3. Text outside of brackets is spoken by the AI, and [text between brackets] will be used to guide the visual generation. For example, [close-up of a cat] will show a close-up of a cat.""",
|
||||
@@ -220,6 +217,7 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
url = "https://webhook.site/token"
|
||||
headers = {"Accept": "application/json", "Content-Type": "application/json"}
|
||||
response = requests.post(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
webhook_data = response.json()
|
||||
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
|
||||
|
||||
@@ -230,12 +228,14 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
logger.debug(
|
||||
f"API Response Status Code: {response.status_code}, Content: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
|
||||
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
|
||||
headers = {"key": api_key.get_secret_value()}
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def wait_for_video(
|
||||
|
||||
@@ -1,108 +0,0 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from backend.blocks.apollo._auth import ApolloCredentials
|
||||
from backend.blocks.apollo.models import (
|
||||
Contact,
|
||||
Organization,
|
||||
SearchOrganizationsRequest,
|
||||
SearchOrganizationsResponse,
|
||||
SearchPeopleRequest,
|
||||
SearchPeopleResponse,
|
||||
)
|
||||
from backend.util.request import Requests
|
||||
|
||||
logger = logging.getLogger(name=__name__)
|
||||
|
||||
|
||||
class ApolloClient:
|
||||
"""Client for the Apollo API"""
|
||||
|
||||
API_URL = "https://api.apollo.io/api/v1"
|
||||
|
||||
def __init__(self, credentials: ApolloCredentials):
|
||||
self.credentials = credentials
|
||||
self.requests = Requests()
|
||||
|
||||
def _get_headers(self) -> dict[str, str]:
|
||||
return {"x-api-key": self.credentials.api_key.get_secret_value()}
|
||||
|
||||
def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
|
||||
"""Search for people in Apollo"""
|
||||
response = self.requests.get(
|
||||
f"{self.API_URL}/mixed_people/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchPeopleResponse(**response.json())
|
||||
if parsed_response.pagination.total_entries == 0:
|
||||
return []
|
||||
|
||||
people = parsed_response.people
|
||||
|
||||
# handle pagination
|
||||
if (
|
||||
query.max_results is not None
|
||||
and query.max_results < parsed_response.pagination.total_entries
|
||||
and len(people) < query.max_results
|
||||
):
|
||||
while (
|
||||
len(people) < query.max_results
|
||||
and query.page < parsed_response.pagination.total_pages
|
||||
and len(parsed_response.people) > 0
|
||||
):
|
||||
query.page += 1
|
||||
response = self.requests.get(
|
||||
f"{self.API_URL}/mixed_people/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchPeopleResponse(**response.json())
|
||||
people.extend(parsed_response.people[: query.max_results - len(people)])
|
||||
|
||||
logger.info(f"Found {len(people)} people")
|
||||
return people[: query.max_results] if query.max_results else people
|
||||
|
||||
def search_organizations(
|
||||
self, query: SearchOrganizationsRequest
|
||||
) -> List[Organization]:
|
||||
"""Search for organizations in Apollo"""
|
||||
response = self.requests.get(
|
||||
f"{self.API_URL}/mixed_companies/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchOrganizationsResponse(**response.json())
|
||||
if parsed_response.pagination.total_entries == 0:
|
||||
return []
|
||||
|
||||
organizations = parsed_response.organizations
|
||||
|
||||
# handle pagination
|
||||
if (
|
||||
query.max_results is not None
|
||||
and query.max_results < parsed_response.pagination.total_entries
|
||||
and len(organizations) < query.max_results
|
||||
):
|
||||
while (
|
||||
len(organizations) < query.max_results
|
||||
and query.page < parsed_response.pagination.total_pages
|
||||
and len(parsed_response.organizations) > 0
|
||||
):
|
||||
query.page += 1
|
||||
response = self.requests.get(
|
||||
f"{self.API_URL}/mixed_companies/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchOrganizationsResponse(**response.json())
|
||||
organizations.extend(
|
||||
parsed_response.organizations[
|
||||
: query.max_results - len(organizations)
|
||||
]
|
||||
)
|
||||
|
||||
logger.info(f"Found {len(organizations)} organizations")
|
||||
return (
|
||||
organizations[: query.max_results] if query.max_results else organizations
|
||||
)
|
||||
@@ -1,35 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
ApolloCredentials = APIKeyCredentials
|
||||
ApolloCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.APOLLO],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="apollo",
|
||||
api_key=SecretStr("mock-apollo-api-key"),
|
||||
title="Mock Apollo API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
def ApolloCredentialsField() -> ApolloCredentialsInput:
|
||||
"""
|
||||
Creates a Apollo credentials input on a block.
|
||||
"""
|
||||
return CredentialsField(
|
||||
description="The Apollo integration can be used with an API Key.",
|
||||
)
|
||||
@@ -1,543 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class PrimaryPhone(BaseModel):
|
||||
"""A primary phone in Apollo"""
|
||||
|
||||
number: str
|
||||
source: str
|
||||
sanitized_number: str
|
||||
|
||||
|
||||
class SenorityLevels(str, Enum):
|
||||
"""Seniority levels in Apollo"""
|
||||
|
||||
OWNER = "owner"
|
||||
FOUNDER = "founder"
|
||||
C_SUITE = "c_suite"
|
||||
PARTNER = "partner"
|
||||
VP = "vp"
|
||||
HEAD = "head"
|
||||
DIRECTOR = "director"
|
||||
MANAGER = "manager"
|
||||
SENIOR = "senior"
|
||||
ENTRY = "entry"
|
||||
INTERN = "intern"
|
||||
|
||||
|
||||
class ContactEmailStatuses(str, Enum):
|
||||
"""Contact email statuses in Apollo"""
|
||||
|
||||
VERIFIED = "verified"
|
||||
UNVERIFIED = "unverified"
|
||||
LIKELY_TO_ENGAGE = "likely_to_engage"
|
||||
UNAVAILABLE = "unavailable"
|
||||
|
||||
|
||||
class RuleConfigStatus(BaseModel):
|
||||
"""A rule config status in Apollo"""
|
||||
|
||||
_id: str
|
||||
created_at: str
|
||||
rule_action_config_id: str
|
||||
rule_config_id: str
|
||||
status_cd: str
|
||||
updated_at: str
|
||||
id: str
|
||||
key: str
|
||||
|
||||
|
||||
class ContactCampaignStatus(BaseModel):
|
||||
"""A contact campaign status in Apollo"""
|
||||
|
||||
id: str
|
||||
emailer_campaign_id: str
|
||||
send_email_from_user_id: str
|
||||
inactive_reason: str
|
||||
status: str
|
||||
added_at: str
|
||||
added_by_user_id: str
|
||||
finished_at: str
|
||||
paused_at: str
|
||||
auto_unpause_at: str
|
||||
send_email_from_email_address: str
|
||||
send_email_from_email_account_id: str
|
||||
manually_set_unpause: str
|
||||
failure_reason: str
|
||||
current_step_id: str
|
||||
in_response_to_emailer_message_id: str
|
||||
cc_emails: str
|
||||
bcc_emails: str
|
||||
to_emails: str
|
||||
|
||||
|
||||
class Account(BaseModel):
|
||||
"""An account in Apollo"""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
website_url: str
|
||||
blog_url: str
|
||||
angellist_url: str
|
||||
linkedin_url: str
|
||||
twitter_url: str
|
||||
facebook_url: str
|
||||
primary_phone: PrimaryPhone
|
||||
languages: list[str]
|
||||
alexa_ranking: int
|
||||
phone: str
|
||||
linkedin_uid: str
|
||||
founded_year: int
|
||||
publicly_traded_symbol: str
|
||||
publicly_traded_exchange: str
|
||||
logo_url: str
|
||||
chrunchbase_url: str
|
||||
primary_domain: str
|
||||
domain: str
|
||||
team_id: str
|
||||
organization_id: str
|
||||
account_stage_id: str
|
||||
source: str
|
||||
original_source: str
|
||||
creator_id: str
|
||||
owner_id: str
|
||||
created_at: str
|
||||
phone_status: str
|
||||
hubspot_id: str
|
||||
salesforce_id: str
|
||||
crm_owner_id: str
|
||||
parent_account_id: str
|
||||
sanitized_phone: str
|
||||
# no listed type on the API docs
|
||||
account_playbook_statues: list[Any]
|
||||
account_rule_config_statuses: list[RuleConfigStatus]
|
||||
existence_level: str
|
||||
label_ids: list[str]
|
||||
typed_custom_fields: Any
|
||||
custom_field_errors: Any
|
||||
modality: str
|
||||
source_display_name: str
|
||||
salesforce_record_id: str
|
||||
crm_record_url: str
|
||||
|
||||
|
||||
class ContactEmail(BaseModel):
|
||||
"""A contact email in Apollo"""
|
||||
|
||||
email: str = ""
|
||||
email_md5: str = ""
|
||||
email_sha256: str = ""
|
||||
email_status: str = ""
|
||||
email_source: str = ""
|
||||
extrapolated_email_confidence: str = ""
|
||||
position: int = 0
|
||||
email_from_customer: str = ""
|
||||
free_domain: bool = True
|
||||
|
||||
|
||||
class EmploymentHistory(BaseModel):
|
||||
"""An employment history in Apollo"""
|
||||
|
||||
class Config:
|
||||
extra = "allow"
|
||||
arbitrary_types_allowed = True
|
||||
from_attributes = True
|
||||
populate_by_name = True
|
||||
|
||||
_id: Optional[str] = None
|
||||
created_at: Optional[str] = None
|
||||
current: Optional[bool] = None
|
||||
degree: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
emails: Optional[str] = None
|
||||
end_date: Optional[str] = None
|
||||
grade_level: Optional[str] = None
|
||||
kind: Optional[str] = None
|
||||
major: Optional[str] = None
|
||||
organization_id: Optional[str] = None
|
||||
organization_name: Optional[str] = None
|
||||
raw_address: Optional[str] = None
|
||||
start_date: Optional[str] = None
|
||||
title: Optional[str] = None
|
||||
updated_at: Optional[str] = None
|
||||
id: Optional[str] = None
|
||||
key: Optional[str] = None
|
||||
|
||||
|
||||
class Breadcrumb(BaseModel):
|
||||
"""A breadcrumb in Apollo"""
|
||||
|
||||
label: Optional[str] = "N/A"
|
||||
signal_field_name: Optional[str] = "N/A"
|
||||
value: str | list | None = "N/A"
|
||||
display_name: Optional[str] = "N/A"
|
||||
|
||||
|
||||
class TypedCustomField(BaseModel):
|
||||
"""A typed custom field in Apollo"""
|
||||
|
||||
id: Optional[str] = "N/A"
|
||||
value: Optional[str] = "N/A"
|
||||
|
||||
|
||||
class Pagination(BaseModel):
|
||||
"""Pagination in Apollo"""
|
||||
|
||||
class Config:
|
||||
extra = "allow" # Allow extra fields
|
||||
arbitrary_types_allowed = True # Allow any type
|
||||
from_attributes = True # Allow from_orm
|
||||
populate_by_name = True # Allow field aliases to work both ways
|
||||
|
||||
page: int = 0
|
||||
per_page: int = 0
|
||||
total_entries: int = 0
|
||||
total_pages: int = 0
|
||||
|
||||
|
||||
class DialerFlags(BaseModel):
|
||||
"""A dialer flags in Apollo"""
|
||||
|
||||
country_name: str
|
||||
country_enabled: bool
|
||||
high_risk_calling_enabled: bool
|
||||
potential_high_risk_number: bool
|
||||
|
||||
|
||||
class PhoneNumber(BaseModel):
|
||||
"""A phone number in Apollo"""
|
||||
|
||||
raw_number: str = ""
|
||||
sanitized_number: str = ""
|
||||
type: str = ""
|
||||
position: int = 0
|
||||
status: str = ""
|
||||
dnc_status: str = ""
|
||||
dnc_other_info: str = ""
|
||||
dailer_flags: DialerFlags = DialerFlags(
|
||||
country_name="",
|
||||
country_enabled=True,
|
||||
high_risk_calling_enabled=True,
|
||||
potential_high_risk_number=True,
|
||||
)
|
||||
|
||||
|
||||
class Organization(BaseModel):
|
||||
"""An organization in Apollo"""
|
||||
|
||||
class Config:
|
||||
extra = "allow"
|
||||
arbitrary_types_allowed = True
|
||||
from_attributes = True
|
||||
populate_by_name = True
|
||||
|
||||
id: Optional[str] = "N/A"
|
||||
name: Optional[str] = "N/A"
|
||||
website_url: Optional[str] = "N/A"
|
||||
blog_url: Optional[str] = "N/A"
|
||||
angellist_url: Optional[str] = "N/A"
|
||||
linkedin_url: Optional[str] = "N/A"
|
||||
twitter_url: Optional[str] = "N/A"
|
||||
facebook_url: Optional[str] = "N/A"
|
||||
primary_phone: Optional[PrimaryPhone] = PrimaryPhone(
|
||||
number="N/A", source="N/A", sanitized_number="N/A"
|
||||
)
|
||||
languages: list[str] = []
|
||||
alexa_ranking: Optional[int] = 0
|
||||
phone: Optional[str] = "N/A"
|
||||
linkedin_uid: Optional[str] = "N/A"
|
||||
founded_year: Optional[int] = 0
|
||||
publicly_traded_symbol: Optional[str] = "N/A"
|
||||
publicly_traded_exchange: Optional[str] = "N/A"
|
||||
logo_url: Optional[str] = "N/A"
|
||||
chrunchbase_url: Optional[str] = "N/A"
|
||||
primary_domain: Optional[str] = "N/A"
|
||||
sanitized_phone: Optional[str] = "N/A"
|
||||
owned_by_organization_id: Optional[str] = "N/A"
|
||||
intent_strength: Optional[str] = "N/A"
|
||||
show_intent: bool = True
|
||||
has_intent_signal_account: Optional[bool] = True
|
||||
intent_signal_account: Optional[str] = "N/A"
|
||||
|
||||
|
||||
class Contact(BaseModel):
|
||||
"""A contact in Apollo"""
|
||||
|
||||
class Config:
|
||||
extra = "allow"
|
||||
arbitrary_types_allowed = True
|
||||
from_attributes = True
|
||||
populate_by_name = True
|
||||
|
||||
contact_roles: list[Any] = []
|
||||
id: Optional[str] = None
|
||||
first_name: Optional[str] = None
|
||||
last_name: Optional[str] = None
|
||||
name: Optional[str] = None
|
||||
linkedin_url: Optional[str] = None
|
||||
title: Optional[str] = None
|
||||
contact_stage_id: Optional[str] = None
|
||||
owner_id: Optional[str] = None
|
||||
creator_id: Optional[str] = None
|
||||
person_id: Optional[str] = None
|
||||
email_needs_tickling: bool = True
|
||||
organization_name: Optional[str] = None
|
||||
source: Optional[str] = None
|
||||
original_source: Optional[str] = None
|
||||
organization_id: Optional[str] = None
|
||||
headline: Optional[str] = None
|
||||
photo_url: Optional[str] = None
|
||||
present_raw_address: Optional[str] = None
|
||||
linkededin_uid: Optional[str] = None
|
||||
extrapolated_email_confidence: Optional[float] = None
|
||||
salesforce_id: Optional[str] = None
|
||||
salesforce_lead_id: Optional[str] = None
|
||||
salesforce_contact_id: Optional[str] = None
|
||||
saleforce_account_id: Optional[str] = None
|
||||
crm_owner_id: Optional[str] = None
|
||||
created_at: Optional[str] = None
|
||||
emailer_campaign_ids: list[str] = []
|
||||
direct_dial_status: Optional[str] = None
|
||||
direct_dial_enrichment_failed_at: Optional[str] = None
|
||||
email_status: Optional[str] = None
|
||||
email_source: Optional[str] = None
|
||||
account_id: Optional[str] = None
|
||||
last_activity_date: Optional[str] = None
|
||||
hubspot_vid: Optional[str] = None
|
||||
hubspot_company_id: Optional[str] = None
|
||||
crm_id: Optional[str] = None
|
||||
sanitized_phone: Optional[str] = None
|
||||
merged_crm_ids: Optional[str] = None
|
||||
updated_at: Optional[str] = None
|
||||
queued_for_crm_push: bool = True
|
||||
suggested_from_rule_engine_config_id: Optional[str] = None
|
||||
email_unsubscribed: Optional[str] = None
|
||||
label_ids: list[Any] = []
|
||||
has_pending_email_arcgate_request: bool = True
|
||||
has_email_arcgate_request: bool = True
|
||||
existence_level: Optional[str] = None
|
||||
email: Optional[str] = None
|
||||
email_from_customer: Optional[str] = None
|
||||
typed_custom_fields: list[TypedCustomField] = []
|
||||
custom_field_errors: Any = None
|
||||
salesforce_record_id: Optional[str] = None
|
||||
crm_record_url: Optional[str] = None
|
||||
email_status_unavailable_reason: Optional[str] = None
|
||||
email_true_status: Optional[str] = None
|
||||
updated_email_true_status: bool = True
|
||||
contact_rule_config_statuses: list[RuleConfigStatus] = []
|
||||
source_display_name: Optional[str] = None
|
||||
twitter_url: Optional[str] = None
|
||||
contact_campaign_statuses: list[ContactCampaignStatus] = []
|
||||
state: Optional[str] = None
|
||||
city: Optional[str] = None
|
||||
country: Optional[str] = None
|
||||
account: Optional[Account] = None
|
||||
contact_emails: list[ContactEmail] = []
|
||||
organization: Optional[Organization] = None
|
||||
employment_history: list[EmploymentHistory] = []
|
||||
time_zone: Optional[str] = None
|
||||
intent_strength: Optional[str] = None
|
||||
show_intent: bool = True
|
||||
phone_numbers: list[PhoneNumber] = []
|
||||
account_phone_note: Optional[str] = None
|
||||
free_domain: bool = True
|
||||
is_likely_to_engage: bool = True
|
||||
email_domain_catchall: bool = True
|
||||
contact_job_change_event: Optional[str] = None
|
||||
|
||||
|
||||
class SearchOrganizationsRequest(BaseModel):
|
||||
"""Request for Apollo's search organizations API"""
|
||||
|
||||
organization_num_empoloyees_range: list[int] = SchemaField(
|
||||
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
|
||||
default=[0, 1000000],
|
||||
)
|
||||
|
||||
organization_locations: list[str] = SchemaField(
|
||||
description="""The location of the company headquarters. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
|
||||
|
||||
To exclude companies based on location, use the organization_not_locations parameter.
|
||||
""",
|
||||
default=[],
|
||||
)
|
||||
organizations_not_locations: list[str] = SchemaField(
|
||||
description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
|
||||
|
||||
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
|
||||
""",
|
||||
default=[],
|
||||
)
|
||||
q_organization_keyword_tags: list[str] = SchemaField(
|
||||
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry."""
|
||||
)
|
||||
q_organization_name: str = SchemaField(
|
||||
description="""Filter search results to include a specific company name.
|
||||
|
||||
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible."""
|
||||
)
|
||||
organization_ids: list[str] = SchemaField(
|
||||
description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, identify the values for organization_id when you call this endpoint.""",
|
||||
default=[],
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=50000,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
page: int = SchemaField(
|
||||
description="""The page number of the Apollo data that you want to retrieve.
|
||||
|
||||
Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
|
||||
default=1,
|
||||
)
|
||||
per_page: int = SchemaField(
|
||||
description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
|
||||
|
||||
Use the page parameter to search the different pages of data.""",
|
||||
default=100,
|
||||
)
|
||||
|
||||
|
||||
class SearchOrganizationsResponse(BaseModel):
|
||||
"""Response from Apollo's search organizations API"""
|
||||
|
||||
breadcrumbs: list[Breadcrumb] = []
|
||||
partial_results_only: bool = True
|
||||
has_join: bool = True
|
||||
disable_eu_prospecting: bool = True
|
||||
partial_results_limit: int = 0
|
||||
pagination: Pagination = Pagination(
|
||||
page=0, per_page=0, total_entries=0, total_pages=0
|
||||
)
|
||||
# no listed type on the API docs
|
||||
accounts: list[Any] = []
|
||||
organizations: list[Organization] = []
|
||||
models_ids: list[str] = []
|
||||
num_fetch_result: Optional[str] = "N/A"
|
||||
derived_params: Optional[str] = "N/A"
|
||||
|
||||
|
||||
class SearchPeopleRequest(BaseModel):
|
||||
"""Request for Apollo's search people API"""
|
||||
|
||||
person_titles: list[str] = SchemaField(
|
||||
description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
|
||||
|
||||
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
|
||||
|
||||
Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
|
||||
""",
|
||||
default=[],
|
||||
placeholder="marketing manager",
|
||||
)
|
||||
person_locations: list[str] = SchemaField(
|
||||
description="""The location where people live. You can search across cities, US states, and countries.
|
||||
|
||||
To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
|
||||
default=[],
|
||||
)
|
||||
person_seniorities: list[SenorityLevels] = SchemaField(
|
||||
description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
|
||||
|
||||
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
|
||||
|
||||
Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
|
||||
|
||||
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
|
||||
default=[],
|
||||
)
|
||||
organization_locations: list[str] = SchemaField(
|
||||
description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
|
||||
|
||||
To find people based on their personal location, use the person_locations parameter.""",
|
||||
default=[],
|
||||
)
|
||||
q_organization_domains: list[str] = SchemaField(
|
||||
description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
|
||||
|
||||
You can add multiple domains to search across companies.
|
||||
|
||||
Examples: apollo.io and microsoft.com""",
|
||||
default=[],
|
||||
)
|
||||
contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
|
||||
description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
|
||||
default=[],
|
||||
)
|
||||
organization_ids: list[str] = SchemaField(
|
||||
description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
|
||||
default=[],
|
||||
)
|
||||
organization_num_empoloyees_range: list[int] = SchemaField(
|
||||
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
|
||||
default=[],
|
||||
)
|
||||
q_keywords: str = SchemaField(
|
||||
description="""A string of words over which we want to filter the results""",
|
||||
default="",
|
||||
)
|
||||
page: int = SchemaField(
|
||||
description="""The page number of the Apollo data that you want to retrieve.
|
||||
|
||||
Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
|
||||
default=1,
|
||||
)
|
||||
per_page: int = SchemaField(
|
||||
description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
|
||||
|
||||
Use the page parameter to search the different pages of data.""",
|
||||
default=100,
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=50000,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
|
||||
class SearchPeopleResponse(BaseModel):
|
||||
"""Response from Apollo's search people API"""
|
||||
|
||||
class Config:
|
||||
extra = "allow" # Allow extra fields
|
||||
arbitrary_types_allowed = True # Allow any type
|
||||
from_attributes = True # Allow from_orm
|
||||
populate_by_name = True # Allow field aliases to work both ways
|
||||
|
||||
breadcrumbs: list[Breadcrumb] = []
|
||||
partial_results_only: bool = True
|
||||
has_join: bool = True
|
||||
disable_eu_prospecting: bool = True
|
||||
partial_results_limit: int = 0
|
||||
pagination: Pagination = Pagination(
|
||||
page=0, per_page=0, total_entries=0, total_pages=0
|
||||
)
|
||||
contacts: list[Contact] = []
|
||||
people: list[Contact] = []
|
||||
model_ids: list[str] = []
|
||||
num_fetch_result: Optional[str] = "N/A"
|
||||
derived_params: Optional[str] = "N/A"
|
||||
@@ -1,219 +0,0 @@
|
||||
from backend.blocks.apollo._api import ApolloClient
|
||||
from backend.blocks.apollo._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
ApolloCredentials,
|
||||
ApolloCredentialsInput,
|
||||
)
|
||||
from backend.blocks.apollo.models import (
|
||||
Organization,
|
||||
PrimaryPhone,
|
||||
SearchOrganizationsRequest,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class SearchOrganizationsBlock(Block):
|
||||
"""Search for organizations in Apollo"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
organization_num_empoloyees_range: list[int] = SchemaField(
|
||||
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
|
||||
default=[0, 1000000],
|
||||
)
|
||||
|
||||
organization_locations: list[str] = SchemaField(
|
||||
description="""The location of the company headquarters. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
|
||||
|
||||
To exclude companies based on location, use the organization_not_locations parameter.
|
||||
""",
|
||||
default=[],
|
||||
)
|
||||
organizations_not_locations: list[str] = SchemaField(
|
||||
description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
|
||||
|
||||
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
|
||||
""",
|
||||
default=[],
|
||||
)
|
||||
q_organization_keyword_tags: list[str] = SchemaField(
|
||||
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""",
|
||||
default=[],
|
||||
)
|
||||
q_organization_name: str = SchemaField(
|
||||
description="""Filter search results to include a specific company name.
|
||||
|
||||
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
organization_ids: list[str] = SchemaField(
|
||||
description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, identify the values for organization_id when you call this endpoint.""",
|
||||
default=[],
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=50000,
|
||||
advanced=True,
|
||||
)
|
||||
credentials: ApolloCredentialsInput = SchemaField(
|
||||
description="Apollo credentials",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
organizations: list[Organization] = SchemaField(
|
||||
description="List of organizations found",
|
||||
default=[],
|
||||
)
|
||||
organization: Organization = SchemaField(
|
||||
description="Each found organization, one at a time",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the search failed",
|
||||
default="",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3d71270d-599e-4148-9b95-71b35d2f44f0",
|
||||
description="Search for organizations in Apollo",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchOrganizationsBlock.Input,
|
||||
output_schema=SearchOrganizationsBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={"query": "Google", "credentials": TEST_CREDENTIALS_INPUT},
|
||||
test_output=[
|
||||
(
|
||||
"organization",
|
||||
Organization(
|
||||
id="1",
|
||||
name="Google",
|
||||
website_url="https://google.com",
|
||||
blog_url="https://google.com/blog",
|
||||
angellist_url="https://angel.co/google",
|
||||
linkedin_url="https://linkedin.com/company/google",
|
||||
twitter_url="https://twitter.com/google",
|
||||
facebook_url="https://facebook.com/google",
|
||||
primary_phone=PrimaryPhone(
|
||||
source="google",
|
||||
number="1234567890",
|
||||
sanitized_number="1234567890",
|
||||
),
|
||||
languages=["en"],
|
||||
alexa_ranking=1000,
|
||||
phone="1234567890",
|
||||
linkedin_uid="1234567890",
|
||||
founded_year=2000,
|
||||
publicly_traded_symbol="GOOGL",
|
||||
publicly_traded_exchange="NASDAQ",
|
||||
logo_url="https://google.com/logo.png",
|
||||
chrunchbase_url="https://chrunchbase.com/google",
|
||||
primary_domain="google.com",
|
||||
sanitized_phone="1234567890",
|
||||
owned_by_organization_id="1",
|
||||
intent_strength="strong",
|
||||
show_intent=True,
|
||||
has_intent_signal_account=True,
|
||||
intent_signal_account="1",
|
||||
),
|
||||
),
|
||||
(
|
||||
"organizations",
|
||||
[
|
||||
Organization(
|
||||
id="1",
|
||||
name="Google",
|
||||
website_url="https://google.com",
|
||||
blog_url="https://google.com/blog",
|
||||
angellist_url="https://angel.co/google",
|
||||
linkedin_url="https://linkedin.com/company/google",
|
||||
twitter_url="https://twitter.com/google",
|
||||
facebook_url="https://facebook.com/google",
|
||||
primary_phone=PrimaryPhone(
|
||||
source="google",
|
||||
number="1234567890",
|
||||
sanitized_number="1234567890",
|
||||
),
|
||||
languages=["en"],
|
||||
alexa_ranking=1000,
|
||||
phone="1234567890",
|
||||
linkedin_uid="1234567890",
|
||||
founded_year=2000,
|
||||
publicly_traded_symbol="GOOGL",
|
||||
publicly_traded_exchange="NASDAQ",
|
||||
logo_url="https://google.com/logo.png",
|
||||
chrunchbase_url="https://chrunchbase.com/google",
|
||||
primary_domain="google.com",
|
||||
sanitized_phone="1234567890",
|
||||
owned_by_organization_id="1",
|
||||
intent_strength="strong",
|
||||
show_intent=True,
|
||||
has_intent_signal_account=True,
|
||||
intent_signal_account="1",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"search_organizations": lambda *args, **kwargs: [
|
||||
Organization(
|
||||
id="1",
|
||||
name="Google",
|
||||
website_url="https://google.com",
|
||||
blog_url="https://google.com/blog",
|
||||
angellist_url="https://angel.co/google",
|
||||
linkedin_url="https://linkedin.com/company/google",
|
||||
twitter_url="https://twitter.com/google",
|
||||
facebook_url="https://facebook.com/google",
|
||||
primary_phone=PrimaryPhone(
|
||||
source="google",
|
||||
number="1234567890",
|
||||
sanitized_number="1234567890",
|
||||
),
|
||||
languages=["en"],
|
||||
alexa_ranking=1000,
|
||||
phone="1234567890",
|
||||
linkedin_uid="1234567890",
|
||||
founded_year=2000,
|
||||
publicly_traded_symbol="GOOGL",
|
||||
publicly_traded_exchange="NASDAQ",
|
||||
logo_url="https://google.com/logo.png",
|
||||
chrunchbase_url="https://chrunchbase.com/google",
|
||||
primary_domain="google.com",
|
||||
sanitized_phone="1234567890",
|
||||
owned_by_organization_id="1",
|
||||
intent_strength="strong",
|
||||
show_intent=True,
|
||||
has_intent_signal_account=True,
|
||||
intent_signal_account="1",
|
||||
)
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_organizations(
|
||||
query: SearchOrganizationsRequest, credentials: ApolloCredentials
|
||||
) -> list[Organization]:
|
||||
client = ApolloClient(credentials)
|
||||
return client.search_organizations(query)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: ApolloCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
query = SearchOrganizationsRequest(
|
||||
**input_data.model_dump(exclude={"credentials"})
|
||||
)
|
||||
organizations = self.search_organizations(query, credentials)
|
||||
for organization in organizations:
|
||||
yield "organization", organization
|
||||
yield "organizations", organizations
|
||||
@@ -1,394 +0,0 @@
|
||||
from backend.blocks.apollo._api import ApolloClient
|
||||
from backend.blocks.apollo._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
ApolloCredentials,
|
||||
ApolloCredentialsInput,
|
||||
)
|
||||
from backend.blocks.apollo.models import (
|
||||
Contact,
|
||||
ContactEmailStatuses,
|
||||
SearchPeopleRequest,
|
||||
SenorityLevels,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class SearchPeopleBlock(Block):
|
||||
"""Search for people in Apollo"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
person_titles: list[str] = SchemaField(
|
||||
description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
|
||||
|
||||
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
|
||||
|
||||
Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
|
||||
""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
person_locations: list[str] = SchemaField(
|
||||
description="""The location where people live. You can search across cities, US states, and countries.
|
||||
|
||||
To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
person_seniorities: list[SenorityLevels] = SchemaField(
|
||||
description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
|
||||
|
||||
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
|
||||
|
||||
Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
|
||||
|
||||
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
organization_locations: list[str] = SchemaField(
|
||||
description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
|
||||
|
||||
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
|
||||
|
||||
To find people based on their personal location, use the person_locations parameter.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
q_organization_domains: list[str] = SchemaField(
|
||||
description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
|
||||
|
||||
You can add multiple domains to search across companies.
|
||||
|
||||
Examples: apollo.io and microsoft.com""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
|
||||
description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
organization_ids: list[str] = SchemaField(
|
||||
description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
|
||||
|
||||
To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
organization_num_empoloyees_range: list[int] = SchemaField(
|
||||
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
|
||||
|
||||
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
q_keywords: str = SchemaField(
|
||||
description="""A string of words over which we want to filter the results""",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
|
||||
default=100,
|
||||
ge=1,
|
||||
le=50000,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
credentials: ApolloCredentialsInput = SchemaField(
|
||||
description="Apollo credentials",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
people: list[Contact] = SchemaField(
|
||||
description="List of people found",
|
||||
default=[],
|
||||
)
|
||||
person: Contact = SchemaField(
|
||||
description="Each found person, one at a time",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the search failed",
|
||||
default="",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c2adb3aa-5aae-488d-8a6e-4eb8c23e2ed6",
|
||||
description="Search for people in Apollo",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchPeopleBlock.Input,
|
||||
output_schema=SearchPeopleBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={"credentials": TEST_CREDENTIALS_INPUT},
|
||||
test_output=[
|
||||
(
|
||||
"person",
|
||||
Contact(
|
||||
contact_roles=[],
|
||||
id="1",
|
||||
name="John Doe",
|
||||
first_name="John",
|
||||
last_name="Doe",
|
||||
linkedin_url="https://www.linkedin.com/in/johndoe",
|
||||
title="Software Engineer",
|
||||
organization_name="Google",
|
||||
organization_id="123456",
|
||||
contact_stage_id="1",
|
||||
owner_id="1",
|
||||
creator_id="1",
|
||||
person_id="1",
|
||||
email_needs_tickling=True,
|
||||
source="apollo",
|
||||
original_source="apollo",
|
||||
headline="Software Engineer",
|
||||
photo_url="https://www.linkedin.com/in/johndoe",
|
||||
present_raw_address="123 Main St, Anytown, USA",
|
||||
linkededin_uid="123456",
|
||||
extrapolated_email_confidence=0.8,
|
||||
salesforce_id="123456",
|
||||
salesforce_lead_id="123456",
|
||||
salesforce_contact_id="123456",
|
||||
saleforce_account_id="123456",
|
||||
crm_owner_id="123456",
|
||||
created_at="2021-01-01",
|
||||
emailer_campaign_ids=[],
|
||||
direct_dial_status="active",
|
||||
direct_dial_enrichment_failed_at="2021-01-01",
|
||||
email_status="active",
|
||||
email_source="apollo",
|
||||
account_id="123456",
|
||||
last_activity_date="2021-01-01",
|
||||
hubspot_vid="123456",
|
||||
hubspot_company_id="123456",
|
||||
crm_id="123456",
|
||||
sanitized_phone="123456",
|
||||
merged_crm_ids="123456",
|
||||
updated_at="2021-01-01",
|
||||
queued_for_crm_push=True,
|
||||
suggested_from_rule_engine_config_id="123456",
|
||||
email_unsubscribed=None,
|
||||
label_ids=[],
|
||||
has_pending_email_arcgate_request=True,
|
||||
has_email_arcgate_request=True,
|
||||
existence_level=None,
|
||||
email=None,
|
||||
email_from_customer=None,
|
||||
typed_custom_fields=[],
|
||||
custom_field_errors=None,
|
||||
salesforce_record_id=None,
|
||||
crm_record_url=None,
|
||||
email_status_unavailable_reason=None,
|
||||
email_true_status=None,
|
||||
updated_email_true_status=True,
|
||||
contact_rule_config_statuses=[],
|
||||
source_display_name=None,
|
||||
twitter_url=None,
|
||||
contact_campaign_statuses=[],
|
||||
state=None,
|
||||
city=None,
|
||||
country=None,
|
||||
account=None,
|
||||
contact_emails=[],
|
||||
organization=None,
|
||||
employment_history=[],
|
||||
time_zone=None,
|
||||
intent_strength=None,
|
||||
show_intent=True,
|
||||
phone_numbers=[],
|
||||
account_phone_note=None,
|
||||
free_domain=True,
|
||||
is_likely_to_engage=True,
|
||||
email_domain_catchall=True,
|
||||
contact_job_change_event=None,
|
||||
),
|
||||
),
|
||||
(
|
||||
"people",
|
||||
[
|
||||
Contact(
|
||||
contact_roles=[],
|
||||
id="1",
|
||||
name="John Doe",
|
||||
first_name="John",
|
||||
last_name="Doe",
|
||||
linkedin_url="https://www.linkedin.com/in/johndoe",
|
||||
title="Software Engineer",
|
||||
organization_name="Google",
|
||||
organization_id="123456",
|
||||
contact_stage_id="1",
|
||||
owner_id="1",
|
||||
creator_id="1",
|
||||
person_id="1",
|
||||
email_needs_tickling=True,
|
||||
source="apollo",
|
||||
original_source="apollo",
|
||||
headline="Software Engineer",
|
||||
photo_url="https://www.linkedin.com/in/johndoe",
|
||||
present_raw_address="123 Main St, Anytown, USA",
|
||||
linkededin_uid="123456",
|
||||
extrapolated_email_confidence=0.8,
|
||||
salesforce_id="123456",
|
||||
salesforce_lead_id="123456",
|
||||
salesforce_contact_id="123456",
|
||||
saleforce_account_id="123456",
|
||||
crm_owner_id="123456",
|
||||
created_at="2021-01-01",
|
||||
emailer_campaign_ids=[],
|
||||
direct_dial_status="active",
|
||||
direct_dial_enrichment_failed_at="2021-01-01",
|
||||
email_status="active",
|
||||
email_source="apollo",
|
||||
account_id="123456",
|
||||
last_activity_date="2021-01-01",
|
||||
hubspot_vid="123456",
|
||||
hubspot_company_id="123456",
|
||||
crm_id="123456",
|
||||
sanitized_phone="123456",
|
||||
merged_crm_ids="123456",
|
||||
updated_at="2021-01-01",
|
||||
queued_for_crm_push=True,
|
||||
suggested_from_rule_engine_config_id="123456",
|
||||
email_unsubscribed=None,
|
||||
label_ids=[],
|
||||
has_pending_email_arcgate_request=True,
|
||||
has_email_arcgate_request=True,
|
||||
existence_level=None,
|
||||
email=None,
|
||||
email_from_customer=None,
|
||||
typed_custom_fields=[],
|
||||
custom_field_errors=None,
|
||||
salesforce_record_id=None,
|
||||
crm_record_url=None,
|
||||
email_status_unavailable_reason=None,
|
||||
email_true_status=None,
|
||||
updated_email_true_status=True,
|
||||
contact_rule_config_statuses=[],
|
||||
source_display_name=None,
|
||||
twitter_url=None,
|
||||
contact_campaign_statuses=[],
|
||||
state=None,
|
||||
city=None,
|
||||
country=None,
|
||||
account=None,
|
||||
contact_emails=[],
|
||||
organization=None,
|
||||
employment_history=[],
|
||||
time_zone=None,
|
||||
intent_strength=None,
|
||||
show_intent=True,
|
||||
phone_numbers=[],
|
||||
account_phone_note=None,
|
||||
free_domain=True,
|
||||
is_likely_to_engage=True,
|
||||
email_domain_catchall=True,
|
||||
contact_job_change_event=None,
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"search_people": lambda query, credentials: [
|
||||
Contact(
|
||||
id="1",
|
||||
name="John Doe",
|
||||
first_name="John",
|
||||
last_name="Doe",
|
||||
linkedin_url="https://www.linkedin.com/in/johndoe",
|
||||
title="Software Engineer",
|
||||
organization_name="Google",
|
||||
organization_id="123456",
|
||||
contact_stage_id="1",
|
||||
owner_id="1",
|
||||
creator_id="1",
|
||||
person_id="1",
|
||||
email_needs_tickling=True,
|
||||
source="apollo",
|
||||
original_source="apollo",
|
||||
headline="Software Engineer",
|
||||
photo_url="https://www.linkedin.com/in/johndoe",
|
||||
present_raw_address="123 Main St, Anytown, USA",
|
||||
linkededin_uid="123456",
|
||||
extrapolated_email_confidence=0.8,
|
||||
salesforce_id="123456",
|
||||
salesforce_lead_id="123456",
|
||||
salesforce_contact_id="123456",
|
||||
saleforce_account_id="123456",
|
||||
crm_owner_id="123456",
|
||||
created_at="2021-01-01",
|
||||
emailer_campaign_ids=[],
|
||||
direct_dial_status="active",
|
||||
direct_dial_enrichment_failed_at="2021-01-01",
|
||||
email_status="active",
|
||||
email_source="apollo",
|
||||
account_id="123456",
|
||||
last_activity_date="2021-01-01",
|
||||
hubspot_vid="123456",
|
||||
hubspot_company_id="123456",
|
||||
crm_id="123456",
|
||||
sanitized_phone="123456",
|
||||
merged_crm_ids="123456",
|
||||
updated_at="2021-01-01",
|
||||
queued_for_crm_push=True,
|
||||
suggested_from_rule_engine_config_id="123456",
|
||||
email_unsubscribed=None,
|
||||
label_ids=[],
|
||||
has_pending_email_arcgate_request=True,
|
||||
has_email_arcgate_request=True,
|
||||
existence_level=None,
|
||||
email=None,
|
||||
email_from_customer=None,
|
||||
typed_custom_fields=[],
|
||||
custom_field_errors=None,
|
||||
salesforce_record_id=None,
|
||||
crm_record_url=None,
|
||||
email_status_unavailable_reason=None,
|
||||
email_true_status=None,
|
||||
updated_email_true_status=True,
|
||||
contact_rule_config_statuses=[],
|
||||
source_display_name=None,
|
||||
twitter_url=None,
|
||||
contact_campaign_statuses=[],
|
||||
state=None,
|
||||
city=None,
|
||||
country=None,
|
||||
account=None,
|
||||
contact_emails=[],
|
||||
organization=None,
|
||||
employment_history=[],
|
||||
time_zone=None,
|
||||
intent_strength=None,
|
||||
show_intent=True,
|
||||
phone_numbers=[],
|
||||
account_phone_note=None,
|
||||
free_domain=True,
|
||||
is_likely_to_engage=True,
|
||||
email_domain_catchall=True,
|
||||
contact_job_change_event=None,
|
||||
),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_people(
|
||||
query: SearchPeopleRequest, credentials: ApolloCredentials
|
||||
) -> list[Contact]:
|
||||
client = ApolloClient(credentials)
|
||||
return client.search_people(query)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: ApolloCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
|
||||
query = SearchPeopleRequest(**input_data.model_dump(exclude={"credentials"}))
|
||||
people = self.search_people(query, credentials)
|
||||
for person in people:
|
||||
yield "person", person
|
||||
yield "people", people
|
||||
@@ -1,50 +1,13 @@
|
||||
import enum
|
||||
import re
|
||||
from typing import Any, List
|
||||
|
||||
from jinja2 import BaseLoader, Environment
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.file import MediaFile, store_media_file
|
||||
from backend.util.mock import MockObject
|
||||
from backend.util.text import TextFormatter
|
||||
from backend.util.type import convert
|
||||
|
||||
formatter = TextFormatter()
|
||||
|
||||
|
||||
class FileStoreBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
file_in: MediaFile = SchemaField(
|
||||
description="The file to store in the temporary directory, it can be a URL, data URI, or local path."
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
file_out: MediaFile = SchemaField(
|
||||
description="The relative path to the stored file in the temporary directory."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="cbb50872-625b-42f0-8203-a2ae78242d8a",
|
||||
description="Stores the input file in the temporary directory.",
|
||||
categories={BlockCategory.BASIC, BlockCategory.MULTIMEDIA},
|
||||
input_schema=FileStoreBlock.Input,
|
||||
output_schema=FileStoreBlock.Output,
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
graph_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
file_path = store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.file_in,
|
||||
return_content=False,
|
||||
)
|
||||
yield "file_out", file_path
|
||||
jinja = Environment(loader=BaseLoader())
|
||||
|
||||
|
||||
class StoreValueBlock(Block):
|
||||
@@ -185,12 +148,9 @@ class AgentInputBlock(Block):
|
||||
description="The value to be passed as input.",
|
||||
default=None,
|
||||
)
|
||||
title: str | None = SchemaField(
|
||||
description="The title of the input.", default=None, advanced=True
|
||||
)
|
||||
description: str | None = SchemaField(
|
||||
description: str = SchemaField(
|
||||
description="The description of the input.",
|
||||
default=None,
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
placeholder_values: List[Any] = SchemaField(
|
||||
@@ -203,16 +163,6 @@ class AgentInputBlock(Block):
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
advanced: bool = SchemaField(
|
||||
description="Whether to show the input in the advanced section, if the field is not required.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
secret: bool = SchemaField(
|
||||
description="Whether the input should be treated as a secret.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: Any = SchemaField(description="The value passed as input.")
|
||||
@@ -245,7 +195,6 @@ class AgentInputBlock(Block):
|
||||
],
|
||||
categories={BlockCategory.INPUT, BlockCategory.BASIC},
|
||||
block_type=BlockType.INPUT,
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
@@ -256,53 +205,43 @@ class AgentOutputBlock(Block):
|
||||
"""
|
||||
Records the output of the graph for users to see.
|
||||
|
||||
Attributes:
|
||||
recorded_value: The value to be recorded as output.
|
||||
name: The name of the output.
|
||||
description: The description of the output.
|
||||
fmt_string: The format string to be used to format the recorded_value.
|
||||
|
||||
Outputs:
|
||||
output: The formatted recorded_value if fmt_string is provided and the recorded_value
|
||||
can be formatted, otherwise the raw recorded_value.
|
||||
|
||||
Behavior:
|
||||
If `format` is provided and the `value` is of a type that can be formatted,
|
||||
the block attempts to format the recorded_value using the `format`.
|
||||
If formatting fails or no `format` is provided, the raw `value` is output.
|
||||
If fmt_string is provided and the recorded_value is of a type that can be formatted,
|
||||
the block attempts to format the recorded_value using the fmt_string.
|
||||
If formatting fails or no fmt_string is provided, the raw recorded_value is output.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
value: Any = SchemaField(
|
||||
description="The value to be recorded as output.",
|
||||
default=None,
|
||||
advanced=False,
|
||||
)
|
||||
value: Any = SchemaField(description="The value to be recorded as output.")
|
||||
name: str = SchemaField(description="The name of the output.")
|
||||
title: str | None = SchemaField(
|
||||
description="The title of the output.",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
description: str | None = SchemaField(
|
||||
description: str = SchemaField(
|
||||
description="The description of the output.",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
format: str = SchemaField(
|
||||
description="The format string to be used to format the recorded_value. Use Jinja2 syntax.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
advanced: bool = SchemaField(
|
||||
description="Whether to treat the output as advanced.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
secret: bool = SchemaField(
|
||||
description="Whether the output should be treated as a secret.",
|
||||
default=False,
|
||||
format: str = SchemaField(
|
||||
description="The format string to be used to format the recorded_value.",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
output: Any = SchemaField(description="The value recorded as output.")
|
||||
name: Any = SchemaField(description="The name of the value recorded as output.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
description="Stores the output of the graph for users to see.",
|
||||
description=("Stores the output of the graph for users to see."),
|
||||
input_schema=AgentOutputBlock.Input,
|
||||
output_schema=AgentOutputBlock.Output,
|
||||
test_input=[
|
||||
@@ -332,7 +271,6 @@ class AgentOutputBlock(Block):
|
||||
],
|
||||
categories={BlockCategory.OUTPUT, BlockCategory.BASIC},
|
||||
block_type=BlockType.OUTPUT,
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
@@ -342,38 +280,27 @@ class AgentOutputBlock(Block):
|
||||
"""
|
||||
if input_data.format:
|
||||
try:
|
||||
yield "output", formatter.format_string(
|
||||
input_data.format, {input_data.name: input_data.value}
|
||||
)
|
||||
fmt = re.sub(r"(?<!{){[ a-zA-Z0-9_]+}", r"{\g<0>}", input_data.format)
|
||||
template = jinja.from_string(fmt)
|
||||
yield "output", template.render({input_data.name: input_data.value})
|
||||
except Exception as e:
|
||||
yield "output", f"Error: {e}, {input_data.value}"
|
||||
else:
|
||||
yield "output", input_data.value
|
||||
yield "name", input_data.name
|
||||
|
||||
|
||||
class AddToDictionaryBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
dictionary: dict[Any, Any] = SchemaField(
|
||||
default={},
|
||||
dictionary: dict | None = SchemaField(
|
||||
default=None,
|
||||
description="The dictionary to add the entry to. If not provided, a new dictionary will be created.",
|
||||
placeholder='{"key1": "value1", "key2": "value2"}',
|
||||
)
|
||||
key: str = SchemaField(
|
||||
default="",
|
||||
description="The key for the new entry.",
|
||||
placeholder="new_key",
|
||||
advanced=False,
|
||||
description="The key for the new entry.", placeholder="new_key"
|
||||
)
|
||||
value: Any = SchemaField(
|
||||
default=None,
|
||||
description="The value for the new entry.",
|
||||
placeholder="new_value",
|
||||
advanced=False,
|
||||
)
|
||||
entries: dict[Any, Any] = SchemaField(
|
||||
default={},
|
||||
description="The entries to add to the dictionary. This is the batch version of the `key` and `value` fields.",
|
||||
advanced=True,
|
||||
description="The value for the new entry.", placeholder="new_value"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -396,10 +323,6 @@ class AddToDictionaryBlock(Block):
|
||||
"value": "new_value",
|
||||
},
|
||||
{"key": "first_key", "value": "first_value"},
|
||||
{
|
||||
"dictionary": {"existing_key": "existing_value"},
|
||||
"entries": {"new_key": "new_value", "first_key": "first_value"},
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
@@ -407,49 +330,38 @@ class AddToDictionaryBlock(Block):
|
||||
{"existing_key": "existing_value", "new_key": "new_value"},
|
||||
),
|
||||
("updated_dictionary", {"first_key": "first_value"}),
|
||||
(
|
||||
"updated_dictionary",
|
||||
{
|
||||
"existing_key": "existing_value",
|
||||
"new_key": "new_value",
|
||||
"first_key": "first_value",
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
updated_dict = input_data.dictionary.copy()
|
||||
# If no dictionary is provided, create a new one
|
||||
if input_data.dictionary is None:
|
||||
updated_dict = {}
|
||||
else:
|
||||
# Create a copy of the input dictionary to avoid modifying the original
|
||||
updated_dict = input_data.dictionary.copy()
|
||||
|
||||
if input_data.value is not None and input_data.key:
|
||||
updated_dict[input_data.key] = input_data.value
|
||||
|
||||
for key, value in input_data.entries.items():
|
||||
updated_dict[key] = value
|
||||
# Add the new key-value pair
|
||||
updated_dict[input_data.key] = input_data.value
|
||||
|
||||
yield "updated_dictionary", updated_dict
|
||||
|
||||
|
||||
class AddToListBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
list: List[Any] = SchemaField(
|
||||
default=[],
|
||||
advanced=False,
|
||||
list: List[Any] | None = SchemaField(
|
||||
default=None,
|
||||
description="The list to add the entry to. If not provided, a new list will be created.",
|
||||
placeholder='[1, "string", {"key": "value"}]',
|
||||
)
|
||||
entry: Any = SchemaField(
|
||||
description="The entry to add to the list. Can be of any type (string, int, dict, etc.).",
|
||||
advanced=False,
|
||||
default=None,
|
||||
)
|
||||
entries: List[Any] = SchemaField(
|
||||
default=[],
|
||||
description="The entries to add to the list. This is the batch version of the `entry` field.",
|
||||
advanced=True,
|
||||
placeholder='{"new_key": "new_value"}',
|
||||
)
|
||||
position: int | None = SchemaField(
|
||||
default=None,
|
||||
description="The position to insert the new entry. If not provided, the entry will be appended to the end of the list.",
|
||||
placeholder="0",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -473,12 +385,6 @@ class AddToListBlock(Block):
|
||||
},
|
||||
{"entry": "first_entry"},
|
||||
{"list": ["a", "b", "c"], "entry": "d"},
|
||||
{
|
||||
"entry": "e",
|
||||
"entries": ["f", "g"],
|
||||
"list": ["a", "b"],
|
||||
"position": 1,
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
@@ -492,66 +398,26 @@ class AddToListBlock(Block):
|
||||
),
|
||||
("updated_list", ["first_entry"]),
|
||||
("updated_list", ["a", "b", "c", "d"]),
|
||||
("updated_list", ["a", "f", "g", "e", "b"]),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
entries_added = input_data.entries.copy()
|
||||
if input_data.entry:
|
||||
entries_added.append(input_data.entry)
|
||||
|
||||
updated_list = input_data.list.copy()
|
||||
if (pos := input_data.position) is not None:
|
||||
updated_list = updated_list[:pos] + entries_added + updated_list[pos:]
|
||||
# If no list is provided, create a new one
|
||||
if input_data.list is None:
|
||||
updated_list = []
|
||||
else:
|
||||
updated_list += entries_added
|
||||
# Create a copy of the input list to avoid modifying the original
|
||||
updated_list = input_data.list.copy()
|
||||
|
||||
# Add the new entry
|
||||
if input_data.position is None:
|
||||
updated_list.append(input_data.entry)
|
||||
else:
|
||||
updated_list.insert(input_data.position, input_data.entry)
|
||||
|
||||
yield "updated_list", updated_list
|
||||
|
||||
|
||||
class FindInListBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
list: List[Any] = SchemaField(description="The list to search in.")
|
||||
value: Any = SchemaField(description="The value to search for.")
|
||||
|
||||
class Output(BlockSchema):
|
||||
index: int = SchemaField(description="The index of the value in the list.")
|
||||
found: bool = SchemaField(
|
||||
description="Whether the value was found in the list."
|
||||
)
|
||||
not_found_value: Any = SchemaField(
|
||||
description="The value that was not found in the list."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5e2c6d0a-1e37-489f-b1d0-8e1812b23333",
|
||||
description="Finds the index of the value in the list.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=FindInListBlock.Input,
|
||||
output_schema=FindInListBlock.Output,
|
||||
test_input=[
|
||||
{"list": [1, 2, 3, 4, 5], "value": 3},
|
||||
{"list": [1, 2, 3, 4, 5], "value": 6},
|
||||
],
|
||||
test_output=[
|
||||
("index", 2),
|
||||
("found", True),
|
||||
("found", False),
|
||||
("not_found_value", 6),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
yield "index", input_data.list.index(input_data.value)
|
||||
yield "found", True
|
||||
except ValueError:
|
||||
yield "found", False
|
||||
yield "not_found_value", input_data.value
|
||||
|
||||
|
||||
class NoteBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(description="The text to display in the sticky note.")
|
||||
@@ -575,145 +441,3 @@ class NoteBlock(Block):
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "output", input_data.text
|
||||
|
||||
|
||||
class CreateDictionaryBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
values: dict[str, Any] = SchemaField(
|
||||
description="Key-value pairs to create the dictionary with",
|
||||
placeholder="e.g., {'name': 'Alice', 'age': 25}",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
dictionary: dict[str, Any] = SchemaField(
|
||||
description="The created dictionary containing the specified key-value pairs"
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if dictionary creation failed"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="b924ddf4-de4f-4b56-9a85-358930dcbc91",
|
||||
description="Creates a dictionary with the specified key-value pairs. Use this when you know all the values you want to add upfront.",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=CreateDictionaryBlock.Input,
|
||||
output_schema=CreateDictionaryBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"values": {"name": "Alice", "age": 25, "city": "New York"},
|
||||
},
|
||||
{
|
||||
"values": {"numbers": [1, 2, 3], "active": True, "score": 95.5},
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
"dictionary",
|
||||
{"name": "Alice", "age": 25, "city": "New York"},
|
||||
),
|
||||
(
|
||||
"dictionary",
|
||||
{"numbers": [1, 2, 3], "active": True, "score": 95.5},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
# The values are already validated by Pydantic schema
|
||||
yield "dictionary", input_data.values
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to create dictionary: {str(e)}"
|
||||
|
||||
|
||||
class CreateListBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
values: List[Any] = SchemaField(
|
||||
description="A list of values to be combined into a new list.",
|
||||
placeholder="e.g., ['Alice', 25, True]",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
list: List[Any] = SchemaField(
|
||||
description="The created list containing the specified values."
|
||||
)
|
||||
error: str = SchemaField(description="Error message if list creation failed.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a912d5c7-6e00-4542-b2a9-8034136930e4",
|
||||
description="Creates a list with the specified values. Use this when you know all the values you want to add upfront.",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=CreateListBlock.Input,
|
||||
output_schema=CreateListBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"values": ["Alice", 25, True],
|
||||
},
|
||||
{
|
||||
"values": [1, 2, 3, "four", {"key": "value"}],
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
"list",
|
||||
["Alice", 25, True],
|
||||
),
|
||||
(
|
||||
"list",
|
||||
[1, 2, 3, "four", {"key": "value"}],
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
# The values are already validated by Pydantic schema
|
||||
yield "list", input_data.values
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to create list: {str(e)}"
|
||||
|
||||
|
||||
class TypeOptions(enum.Enum):
|
||||
STRING = "string"
|
||||
NUMBER = "number"
|
||||
BOOLEAN = "boolean"
|
||||
LIST = "list"
|
||||
DICTIONARY = "dictionary"
|
||||
|
||||
|
||||
class UniversalTypeConverterBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
value: Any = SchemaField(
|
||||
description="The value to convert to a universal type."
|
||||
)
|
||||
type: TypeOptions = SchemaField(description="The type to convert the value to.")
|
||||
|
||||
class Output(BlockSchema):
|
||||
value: Any = SchemaField(description="The converted value.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="95d1b990-ce13-4d88-9737-ba5c2070c97b",
|
||||
description="This block is used to convert a value to a universal type.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=UniversalTypeConverterBlock.Input,
|
||||
output_schema=UniversalTypeConverterBlock.Output,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
converted_value = convert(
|
||||
input_data.value,
|
||||
{
|
||||
TypeOptions.STRING: str,
|
||||
TypeOptions.NUMBER: float,
|
||||
TypeOptions.BOOLEAN: bool,
|
||||
TypeOptions.LIST: list,
|
||||
TypeOptions.DICTIONARY: dict,
|
||||
}[input_data.type],
|
||||
)
|
||||
yield "value", converted_value
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to convert value: {str(e)}"
|
||||
|
||||
@@ -71,24 +71,11 @@ class ConditionBlock(Block):
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
operator = input_data.operator
|
||||
|
||||
value1 = input_data.value1
|
||||
if isinstance(value1, str):
|
||||
try:
|
||||
value1 = float(value1.strip())
|
||||
except ValueError:
|
||||
value1 = value1.strip()
|
||||
|
||||
operator = input_data.operator
|
||||
value2 = input_data.value2
|
||||
if isinstance(value2, str):
|
||||
try:
|
||||
value2 = float(value2.strip())
|
||||
except ValueError:
|
||||
value2 = value2.strip()
|
||||
|
||||
yes_value = input_data.yes_value if input_data.yes_value is not None else value1
|
||||
no_value = input_data.no_value if input_data.no_value is not None else value2
|
||||
no_value = input_data.no_value if input_data.no_value is not None else value1
|
||||
|
||||
comparison_funcs = {
|
||||
ComparisonOperator.EQUAL: lambda a, b: a == b,
|
||||
@@ -99,91 +86,17 @@ class ConditionBlock(Block):
|
||||
ComparisonOperator.LESS_THAN_OR_EQUAL: lambda a, b: a <= b,
|
||||
}
|
||||
|
||||
result = comparison_funcs[operator](value1, value2)
|
||||
try:
|
||||
result = comparison_funcs[operator](value1, value2)
|
||||
|
||||
yield "result", result
|
||||
yield "result", result
|
||||
|
||||
if result:
|
||||
yield "yes_output", yes_value
|
||||
else:
|
||||
yield "no_output", no_value
|
||||
if result:
|
||||
yield "yes_output", yes_value
|
||||
else:
|
||||
yield "no_output", no_value
|
||||
|
||||
|
||||
class IfInputMatchesBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
input: Any = SchemaField(
|
||||
description="The input to match against",
|
||||
placeholder="For example: 10 or 'hello' or True",
|
||||
)
|
||||
value: Any = SchemaField(
|
||||
description="The value to output if the input matches",
|
||||
placeholder="For example: 'Greater' or 20 or False",
|
||||
)
|
||||
yes_value: Any = SchemaField(
|
||||
description="The value to output if the input matches",
|
||||
placeholder="For example: 'Greater' or 20 or False",
|
||||
default=None,
|
||||
)
|
||||
no_value: Any = SchemaField(
|
||||
description="The value to output if the input does not match",
|
||||
placeholder="For example: 'Greater' or 20 or False",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: bool = SchemaField(
|
||||
description="The result of the condition evaluation (True or False)"
|
||||
)
|
||||
yes_output: Any = SchemaField(
|
||||
description="The output value if the condition is true"
|
||||
)
|
||||
no_output: Any = SchemaField(
|
||||
description="The output value if the condition is false"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="6dbbc4b3-ca6c-42b6-b508-da52d23e13f2",
|
||||
input_schema=IfInputMatchesBlock.Input,
|
||||
output_schema=IfInputMatchesBlock.Output,
|
||||
description="Handles conditional logic based on comparison operators",
|
||||
categories={BlockCategory.LOGIC},
|
||||
test_input=[
|
||||
{
|
||||
"input": 10,
|
||||
"value": 10,
|
||||
"yes_value": "Greater",
|
||||
"no_value": "Not greater",
|
||||
},
|
||||
{
|
||||
"input": 10,
|
||||
"value": 20,
|
||||
"yes_value": "Greater",
|
||||
"no_value": "Not greater",
|
||||
},
|
||||
{
|
||||
"input": 10,
|
||||
"value": None,
|
||||
"yes_value": "Yes",
|
||||
"no_value": "No",
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
("result", True),
|
||||
("yes_output", "Greater"),
|
||||
("result", False),
|
||||
("no_output", "Not greater"),
|
||||
("result", False),
|
||||
("no_output", "No"),
|
||||
# ("result", True),
|
||||
# ("yes_output", "Yes"),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
if input_data.input == input_data.value or input_data.input is input_data.value:
|
||||
yield "result", True
|
||||
yield "yes_output", input_data.yes_value
|
||||
else:
|
||||
yield "result", False
|
||||
yield "no_output", input_data.no_value
|
||||
except Exception:
|
||||
yield "result", None
|
||||
yield "yes_output", None
|
||||
yield "no_output", None
|
||||
|
||||
@@ -1,457 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
from e2b_code_interpreter import Sandbox
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="e2b",
|
||||
api_key=SecretStr("mock-e2b-api-key"),
|
||||
title="Mock E2B API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class ProgrammingLanguage(Enum):
|
||||
PYTHON = "python"
|
||||
JAVASCRIPT = "js"
|
||||
BASH = "bash"
|
||||
R = "r"
|
||||
JAVA = "java"
|
||||
|
||||
|
||||
class CodeExecutionBlock(Block):
|
||||
# TODO : Add support to upload and download files
|
||||
# Currently, You can customized the CPU and Memory, only by creating a pre customized sandbox template
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.E2B], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
|
||||
)
|
||||
|
||||
# Todo : Option to run commond in background
|
||||
setup_commands: list[str] = SchemaField(
|
||||
description=(
|
||||
"Shell commands to set up the sandbox before running the code. "
|
||||
"You can use `curl` or `git` to install your desired Debian based "
|
||||
"package manager. `pip` and `npm` are pre-installed.\n\n"
|
||||
"These commands are executed with `sh`, in the foreground."
|
||||
),
|
||||
placeholder="pip install cowsay",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
code: str = SchemaField(
|
||||
description="Code to execute in the sandbox",
|
||||
placeholder="print('Hello, World!')",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
language: ProgrammingLanguage = SchemaField(
|
||||
description="Programming language to execute",
|
||||
default=ProgrammingLanguage.PYTHON,
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
timeout: int = SchemaField(
|
||||
description="Execution timeout in seconds", default=300
|
||||
)
|
||||
|
||||
template_id: str = SchemaField(
|
||||
description=(
|
||||
"You can use an E2B sandbox template by entering its ID here. "
|
||||
"Check out the E2B docs for more details: "
|
||||
"[E2B - Sandbox template](https://e2b.dev/docs/sandbox-template)"
|
||||
),
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: str = SchemaField(description="Response from code execution")
|
||||
stdout_logs: str = SchemaField(
|
||||
description="Standard output logs from execution"
|
||||
)
|
||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||
error: str = SchemaField(description="Error message if execution failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="0b02b072-abe7-11ef-8372-fb5d162dd712",
|
||||
description="Executes code in an isolated sandbox environment with internet access.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=CodeExecutionBlock.Input,
|
||||
output_schema=CodeExecutionBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"code": "print('Hello World')",
|
||||
"language": ProgrammingLanguage.PYTHON.value,
|
||||
"setup_commands": [],
|
||||
"timeout": 300,
|
||||
"template_id": "",
|
||||
},
|
||||
test_output=[
|
||||
("response", "Hello World"),
|
||||
("stdout_logs", "Hello World\n"),
|
||||
],
|
||||
test_mock={
|
||||
"execute_code": lambda code, language, setup_commands, timeout, api_key, template_id: (
|
||||
"Hello World",
|
||||
"Hello World\n",
|
||||
"",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def execute_code(
|
||||
self,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
setup_commands: list[str],
|
||||
timeout: int,
|
||||
api_key: str,
|
||||
template_id: str,
|
||||
):
|
||||
try:
|
||||
sandbox = None
|
||||
if template_id:
|
||||
sandbox = Sandbox(
|
||||
template=template_id, api_key=api_key, timeout=timeout
|
||||
)
|
||||
else:
|
||||
sandbox = Sandbox(api_key=api_key, timeout=timeout)
|
||||
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not created")
|
||||
|
||||
# Running setup commands
|
||||
for cmd in setup_commands:
|
||||
sandbox.commands.run(cmd)
|
||||
|
||||
# Executing the code
|
||||
execution = sandbox.run_code(
|
||||
code,
|
||||
language=language.value,
|
||||
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
|
||||
)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
|
||||
response = execution.text
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return response, stdout_logs, stderr_logs
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
response, stdout_logs, stderr_logs = self.execute_code(
|
||||
input_data.code,
|
||||
input_data.language,
|
||||
input_data.setup_commands,
|
||||
input_data.timeout,
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.template_id,
|
||||
)
|
||||
|
||||
if response:
|
||||
yield "response", response
|
||||
if stdout_logs:
|
||||
yield "stdout_logs", stdout_logs
|
||||
if stderr_logs:
|
||||
yield "stderr_logs", stderr_logs
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class InstantiationBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.E2B], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
|
||||
)
|
||||
|
||||
# Todo : Option to run commond in background
|
||||
setup_commands: list[str] = SchemaField(
|
||||
description=(
|
||||
"Shell commands to set up the sandbox before running the code. "
|
||||
"You can use `curl` or `git` to install your desired Debian based "
|
||||
"package manager. `pip` and `npm` are pre-installed.\n\n"
|
||||
"These commands are executed with `sh`, in the foreground."
|
||||
),
|
||||
placeholder="pip install cowsay",
|
||||
default=[],
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
setup_code: str = SchemaField(
|
||||
description="Code to execute in the sandbox",
|
||||
placeholder="print('Hello, World!')",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
language: ProgrammingLanguage = SchemaField(
|
||||
description="Programming language to execute",
|
||||
default=ProgrammingLanguage.PYTHON,
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
timeout: int = SchemaField(
|
||||
description="Execution timeout in seconds", default=300
|
||||
)
|
||||
|
||||
template_id: str = SchemaField(
|
||||
description=(
|
||||
"You can use an E2B sandbox template by entering its ID here. "
|
||||
"Check out the E2B docs for more details: "
|
||||
"[E2B - Sandbox template](https://e2b.dev/docs/sandbox-template)"
|
||||
),
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
sandbox_id: str = SchemaField(description="ID of the sandbox instance")
|
||||
response: str = SchemaField(description="Response from code execution")
|
||||
stdout_logs: str = SchemaField(
|
||||
description="Standard output logs from execution"
|
||||
)
|
||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||
error: str = SchemaField(description="Error message if execution failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ff0861c9-1726-4aec-9e5b-bf53f3622112",
|
||||
description="Instantiate an isolated sandbox environment with internet access where to execute code in.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=InstantiationBlock.Input,
|
||||
output_schema=InstantiationBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"setup_code": "print('Hello World')",
|
||||
"language": ProgrammingLanguage.PYTHON.value,
|
||||
"setup_commands": [],
|
||||
"timeout": 300,
|
||||
"template_id": "",
|
||||
},
|
||||
test_output=[
|
||||
("sandbox_id", str),
|
||||
("response", "Hello World"),
|
||||
("stdout_logs", "Hello World\n"),
|
||||
],
|
||||
test_mock={
|
||||
"execute_code": lambda setup_code, language, setup_commands, timeout, api_key, template_id: (
|
||||
"sandbox_id",
|
||||
"Hello World",
|
||||
"Hello World\n",
|
||||
"",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
sandbox_id, response, stdout_logs, stderr_logs = self.execute_code(
|
||||
input_data.setup_code,
|
||||
input_data.language,
|
||||
input_data.setup_commands,
|
||||
input_data.timeout,
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.template_id,
|
||||
)
|
||||
if sandbox_id:
|
||||
yield "sandbox_id", sandbox_id
|
||||
else:
|
||||
yield "error", "Sandbox ID not found"
|
||||
if response:
|
||||
yield "response", response
|
||||
if stdout_logs:
|
||||
yield "stdout_logs", stdout_logs
|
||||
if stderr_logs:
|
||||
yield "stderr_logs", stderr_logs
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
def execute_code(
|
||||
self,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
setup_commands: list[str],
|
||||
timeout: int,
|
||||
api_key: str,
|
||||
template_id: str,
|
||||
):
|
||||
try:
|
||||
sandbox = None
|
||||
if template_id:
|
||||
sandbox = Sandbox(
|
||||
template=template_id, api_key=api_key, timeout=timeout
|
||||
)
|
||||
else:
|
||||
sandbox = Sandbox(api_key=api_key, timeout=timeout)
|
||||
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not created")
|
||||
|
||||
# Running setup commands
|
||||
for cmd in setup_commands:
|
||||
sandbox.commands.run(cmd)
|
||||
|
||||
# Executing the code
|
||||
execution = sandbox.run_code(
|
||||
code,
|
||||
language=language.value,
|
||||
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
|
||||
)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
|
||||
response = execution.text
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return sandbox.sandbox_id, response, stdout_logs, stderr_logs
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
|
||||
class StepExecutionBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.E2B], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
|
||||
)
|
||||
|
||||
sandbox_id: str = SchemaField(
|
||||
description="ID of the sandbox instance to execute the code in",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
step_code: str = SchemaField(
|
||||
description="Code to execute in the sandbox",
|
||||
placeholder="print('Hello, World!')",
|
||||
default="",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
language: ProgrammingLanguage = SchemaField(
|
||||
description="Programming language to execute",
|
||||
default=ProgrammingLanguage.PYTHON,
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: str = SchemaField(description="Response from code execution")
|
||||
stdout_logs: str = SchemaField(
|
||||
description="Standard output logs from execution"
|
||||
)
|
||||
stderr_logs: str = SchemaField(description="Standard error logs from execution")
|
||||
error: str = SchemaField(description="Error message if execution failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="82b59b8e-ea10-4d57-9161-8b169b0adba6",
|
||||
description="Execute code in a previously instantiated sandbox environment.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=StepExecutionBlock.Input,
|
||||
output_schema=StepExecutionBlock.Output,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"sandbox_id": "sandbox_id",
|
||||
"step_code": "print('Hello World')",
|
||||
"language": ProgrammingLanguage.PYTHON.value,
|
||||
},
|
||||
test_output=[
|
||||
("response", "Hello World"),
|
||||
("stdout_logs", "Hello World\n"),
|
||||
],
|
||||
test_mock={
|
||||
"execute_step_code": lambda sandbox_id, step_code, language, api_key: (
|
||||
"Hello World",
|
||||
"Hello World\n",
|
||||
"",
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def execute_step_code(
|
||||
self,
|
||||
sandbox_id: str,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
api_key: str,
|
||||
):
|
||||
try:
|
||||
sandbox = Sandbox.connect(sandbox_id=sandbox_id, api_key=api_key)
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not found")
|
||||
|
||||
# Executing the code
|
||||
execution = sandbox.run_code(code, language=language.value)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
|
||||
response = execution.text
|
||||
stdout_logs = "".join(execution.logs.stdout)
|
||||
stderr_logs = "".join(execution.logs.stderr)
|
||||
|
||||
return response, stdout_logs, stderr_logs
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
response, stdout_logs, stderr_logs = self.execute_step_code(
|
||||
input_data.sandbox_id,
|
||||
input_data.step_code,
|
||||
input_data.language,
|
||||
credentials.api_key.get_secret_value(),
|
||||
)
|
||||
|
||||
if response:
|
||||
yield "response", response
|
||||
if stdout_logs:
|
||||
yield "stdout_logs", stdout_logs
|
||||
if stderr_logs:
|
||||
yield "stderr_logs", stderr_logs
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,110 +0,0 @@
|
||||
import re
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class CodeExtractionBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(
|
||||
description="Text containing code blocks to extract (e.g., AI response)",
|
||||
placeholder="Enter text containing code blocks",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
html: str = SchemaField(description="Extracted HTML code")
|
||||
css: str = SchemaField(description="Extracted CSS code")
|
||||
javascript: str = SchemaField(description="Extracted JavaScript code")
|
||||
python: str = SchemaField(description="Extracted Python code")
|
||||
sql: str = SchemaField(description="Extracted SQL code")
|
||||
java: str = SchemaField(description="Extracted Java code")
|
||||
cpp: str = SchemaField(description="Extracted C++ code")
|
||||
csharp: str = SchemaField(description="Extracted C# code")
|
||||
json_code: str = SchemaField(description="Extracted JSON code")
|
||||
bash: str = SchemaField(description="Extracted Bash code")
|
||||
php: str = SchemaField(description="Extracted PHP code")
|
||||
ruby: str = SchemaField(description="Extracted Ruby code")
|
||||
yaml: str = SchemaField(description="Extracted YAML code")
|
||||
markdown: str = SchemaField(description="Extracted Markdown code")
|
||||
typescript: str = SchemaField(description="Extracted TypeScript code")
|
||||
xml: str = SchemaField(description="Extracted XML code")
|
||||
remaining_text: str = SchemaField(
|
||||
description="Remaining text after code extraction"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d3a7d896-3b78-4f44-8b4b-48fbf4f0bcd8",
|
||||
description="Extracts code blocks from text and identifies their programming languages",
|
||||
categories={BlockCategory.TEXT},
|
||||
input_schema=CodeExtractionBlock.Input,
|
||||
output_schema=CodeExtractionBlock.Output,
|
||||
test_input={
|
||||
"text": "Here's a Python example:\n```python\nprint('Hello World')\n```\nAnd some HTML:\n```html\n<h1>Title</h1>\n```"
|
||||
},
|
||||
test_output=[
|
||||
("html", "<h1>Title</h1>"),
|
||||
("python", "print('Hello World')"),
|
||||
("remaining_text", "Here's a Python example:\nAnd some HTML:"),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# List of supported programming languages with mapped aliases
|
||||
language_aliases = {
|
||||
"html": ["html", "htm"],
|
||||
"css": ["css"],
|
||||
"javascript": ["javascript", "js"],
|
||||
"python": ["python", "py"],
|
||||
"sql": ["sql"],
|
||||
"java": ["java"],
|
||||
"cpp": ["cpp", "c++"],
|
||||
"csharp": ["csharp", "c#", "cs"],
|
||||
"json_code": ["json"],
|
||||
"bash": ["bash", "shell", "sh"],
|
||||
"php": ["php"],
|
||||
"ruby": ["ruby", "rb"],
|
||||
"yaml": ["yaml", "yml"],
|
||||
"markdown": ["markdown", "md"],
|
||||
"typescript": ["typescript", "ts"],
|
||||
"xml": ["xml"],
|
||||
}
|
||||
|
||||
# Extract code for each language
|
||||
for canonical_name, aliases in language_aliases.items():
|
||||
code = ""
|
||||
# Try each alias for the language
|
||||
for alias in aliases:
|
||||
code_for_alias = self.extract_code(input_data.text, alias)
|
||||
if code_for_alias:
|
||||
code = code + "\n\n" + code_for_alias if code else code_for_alias
|
||||
|
||||
if code: # Only yield if there's actual code content
|
||||
yield canonical_name, code
|
||||
|
||||
# Remove all code blocks from the text to get remaining text
|
||||
pattern = (
|
||||
r"```(?:"
|
||||
+ "|".join(
|
||||
re.escape(alias)
|
||||
for aliases in language_aliases.values()
|
||||
for alias in aliases
|
||||
)
|
||||
+ r")\s+[\s\S]*?```"
|
||||
)
|
||||
|
||||
remaining_text = re.sub(pattern, "", input_data.text).strip()
|
||||
remaining_text = re.sub(r"\n\s*\n", "\n", remaining_text)
|
||||
|
||||
if remaining_text: # Only yield if there's remaining text
|
||||
yield "remaining_text", remaining_text
|
||||
|
||||
def extract_code(self, text: str, language: str) -> str:
|
||||
# Escape special regex characters in the language string
|
||||
language = re.escape(language)
|
||||
# Extract all code blocks enclosed in ```language``` blocks
|
||||
pattern = re.compile(rf"```{language}\s+(.*?)```", re.DOTALL | re.IGNORECASE)
|
||||
matches = pattern.finditer(text)
|
||||
# Combine all code blocks for this language with newlines between them
|
||||
code_blocks = [match.group(1).strip() for match in matches]
|
||||
return "\n\n".join(code_blocks) if code_blocks else ""
|
||||
@@ -1,59 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockManualWebhookConfig,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
from backend.integrations.webhooks.compass import CompassWebhookType
|
||||
|
||||
|
||||
class Transcription(BaseModel):
|
||||
text: str
|
||||
speaker: str
|
||||
end: float
|
||||
start: float
|
||||
duration: float
|
||||
|
||||
|
||||
class TranscriptionDataModel(BaseModel):
|
||||
date: str
|
||||
transcription: str
|
||||
transcriptions: list[Transcription]
|
||||
|
||||
|
||||
class CompassAITriggerBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
payload: TranscriptionDataModel = SchemaField(hidden=True)
|
||||
|
||||
class Output(BlockSchema):
|
||||
transcription: str = SchemaField(
|
||||
description="The contents of the compass transcription."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9464a020-ed1d-49e1-990f-7f2ac924a2b7",
|
||||
description="This block will output the contents of the compass transcription.",
|
||||
categories={BlockCategory.HARDWARE},
|
||||
input_schema=CompassAITriggerBlock.Input,
|
||||
output_schema=CompassAITriggerBlock.Output,
|
||||
webhook_config=BlockManualWebhookConfig(
|
||||
provider="compass",
|
||||
webhook_type=CompassWebhookType.TRANSCRIPTION,
|
||||
),
|
||||
test_input=[
|
||||
{"input": "Hello, World!"},
|
||||
{"input": "Hello, World!", "data": "Existing Data"},
|
||||
],
|
||||
# test_output=[
|
||||
# ("output", "Hello, World!"), # No data provided, so trigger is returned
|
||||
# ("output", "Existing Data"), # Data is provided, so data is returned.
|
||||
# ],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "transcription", input_data.payload.transcription
|
||||
@@ -1,43 +0,0 @@
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class WordCharacterCountBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(
|
||||
description="Input text to count words and characters",
|
||||
placeholder="Enter your text here",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
word_count: int = SchemaField(description="Number of words in the input text")
|
||||
character_count: int = SchemaField(
|
||||
description="Number of characters in the input text"
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the counting operation failed"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ab2a782d-22cf-4587-8a70-55b59b3f9f90",
|
||||
description="Counts the number of words and characters in a given text.",
|
||||
categories={BlockCategory.TEXT},
|
||||
input_schema=WordCharacterCountBlock.Input,
|
||||
output_schema=WordCharacterCountBlock.Output,
|
||||
test_input={"text": "Hello, how are you?"},
|
||||
test_output=[("word_count", 4), ("character_count", 19)],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
text = input_data.text
|
||||
word_count = len(text.split())
|
||||
character_count = len(text)
|
||||
|
||||
yield "word_count", word_count
|
||||
yield "character_count", character_count
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -3,24 +3,21 @@ from typing import Literal
|
||||
|
||||
import aiohttp
|
||||
import discord
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
|
||||
DiscordCredentials = CredentialsMetaInput[
|
||||
Literal[ProviderName.DISCORD], Literal["api_key"]
|
||||
]
|
||||
DiscordCredentials = CredentialsMetaInput[Literal["discord"], Literal["api_key"]]
|
||||
|
||||
|
||||
def DiscordCredentialsField() -> DiscordCredentials:
|
||||
return CredentialsField(description="Discord bot token")
|
||||
return CredentialsField(
|
||||
description="Discord bot token",
|
||||
provider="discord",
|
||||
supported_credential_types={"api_key"},
|
||||
)
|
||||
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
|
||||
@@ -1,53 +1,22 @@
|
||||
import smtplib
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, SecretStr
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
UserPasswordCredentials,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
TEST_CREDENTIALS = UserPasswordCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="smtp",
|
||||
username=SecretStr("mock-smtp-username"),
|
||||
password=SecretStr("mock-smtp-password"),
|
||||
title="Mock SMTP credentials",
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
SMTPCredentials = UserPasswordCredentials
|
||||
SMTPCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.SMTP],
|
||||
Literal["user_password"],
|
||||
]
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
|
||||
|
||||
def SMTPCredentialsField() -> SMTPCredentialsInput:
|
||||
return CredentialsField(
|
||||
description="The SMTP integration requires a username and password.",
|
||||
)
|
||||
|
||||
|
||||
class SMTPConfig(BaseModel):
|
||||
class EmailCredentials(BaseModel):
|
||||
smtp_server: str = SchemaField(
|
||||
default="smtp.example.com", description="SMTP server address"
|
||||
default="smtp.gmail.com", description="SMTP server address"
|
||||
)
|
||||
smtp_port: int = SchemaField(default=25, description="SMTP port number")
|
||||
smtp_username: BlockSecret = SecretField(key="smtp_username")
|
||||
smtp_password: BlockSecret = SecretField(key="smtp_password")
|
||||
|
||||
model_config = ConfigDict(title="SMTP Config")
|
||||
model_config = ConfigDict(title="Email Credentials")
|
||||
|
||||
|
||||
class SendEmailBlock(Block):
|
||||
@@ -61,11 +30,10 @@ class SendEmailBlock(Block):
|
||||
body: str = SchemaField(
|
||||
description="Body of the email", placeholder="Enter the email body"
|
||||
)
|
||||
config: SMTPConfig = SchemaField(
|
||||
description="SMTP Config",
|
||||
default=SMTPConfig(),
|
||||
creds: EmailCredentials = SchemaField(
|
||||
description="SMTP credentials",
|
||||
default=EmailCredentials(),
|
||||
)
|
||||
credentials: SMTPCredentialsInput = SMTPCredentialsField()
|
||||
|
||||
class Output(BlockSchema):
|
||||
status: str = SchemaField(description="Status of the email sending operation")
|
||||
@@ -75,6 +43,7 @@ class SendEmailBlock(Block):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
disabled=True,
|
||||
id="4335878a-394e-4e67-adf2-919877ff49ae",
|
||||
description="This block sends an email using the provided SMTP credentials.",
|
||||
categories={BlockCategory.OUTPUT},
|
||||
@@ -84,29 +53,25 @@ class SendEmailBlock(Block):
|
||||
"to_email": "recipient@example.com",
|
||||
"subject": "Test Email",
|
||||
"body": "This is a test email.",
|
||||
"config": {
|
||||
"creds": {
|
||||
"smtp_server": "smtp.gmail.com",
|
||||
"smtp_port": 25,
|
||||
"smtp_username": "your-email@gmail.com",
|
||||
"smtp_password": "your-gmail-password",
|
||||
},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("status", "Email sent successfully")],
|
||||
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def send_email(
|
||||
config: SMTPConfig,
|
||||
to_email: str,
|
||||
subject: str,
|
||||
body: str,
|
||||
credentials: SMTPCredentials,
|
||||
creds: EmailCredentials, to_email: str, subject: str, body: str
|
||||
) -> str:
|
||||
smtp_server = config.smtp_server
|
||||
smtp_port = config.smtp_port
|
||||
smtp_username = credentials.username.get_secret_value()
|
||||
smtp_password = credentials.password.get_secret_value()
|
||||
smtp_server = creds.smtp_server
|
||||
smtp_port = creds.smtp_port
|
||||
smtp_username = creds.smtp_username.get_secret_value()
|
||||
smtp_password = creds.smtp_password.get_secret_value()
|
||||
|
||||
msg = MIMEMultipart()
|
||||
msg["From"] = smtp_username
|
||||
@@ -121,13 +86,10 @@ class SendEmailBlock(Block):
|
||||
|
||||
return "Email sent successfully"
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "status", self.send_email(
|
||||
config=input_data.config,
|
||||
to_email=input_data.to_email,
|
||||
subject=input_data.subject,
|
||||
body=input_data.body,
|
||||
credentials=credentials,
|
||||
input_data.creds,
|
||||
input_data.to_email,
|
||||
input_data.subject,
|
||||
input_data.body,
|
||||
)
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
ExaCredentials = APIKeyCredentials
|
||||
ExaCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.EXA],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="exa",
|
||||
api_key=SecretStr("mock-exa-api-key"),
|
||||
title="Mock Exa API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
def ExaCredentialsField() -> ExaCredentialsInput:
|
||||
"""Creates an Exa credentials input on a block."""
|
||||
return CredentialsField(description="The Exa integration requires an API Key.")
|
||||
@@ -1,87 +0,0 @@
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.blocks.exa._auth import (
|
||||
ExaCredentials,
|
||||
ExaCredentialsField,
|
||||
ExaCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class ContentRetrievalSettings(BaseModel):
|
||||
text: dict = SchemaField(
|
||||
description="Text content settings",
|
||||
default={"maxCharacters": 1000, "includeHtmlTags": False},
|
||||
advanced=True,
|
||||
)
|
||||
highlights: dict = SchemaField(
|
||||
description="Highlight settings",
|
||||
default={
|
||||
"numSentences": 3,
|
||||
"highlightsPerUrl": 3,
|
||||
"query": "",
|
||||
},
|
||||
advanced=True,
|
||||
)
|
||||
summary: dict = SchemaField(
|
||||
description="Summary settings",
|
||||
default={"query": ""},
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
|
||||
class ExaContentsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: ExaCredentialsInput = ExaCredentialsField()
|
||||
ids: List[str] = SchemaField(
|
||||
description="Array of document IDs obtained from searches",
|
||||
)
|
||||
contents: ContentRetrievalSettings = SchemaField(
|
||||
description="Content retrieval settings",
|
||||
default=ContentRetrievalSettings(),
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: list = SchemaField(
|
||||
description="List of document contents",
|
||||
default=[],
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c52be83f-f8cd-4180-b243-af35f986b461",
|
||||
description="Retrieves document contents using Exa's contents API",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=ExaContentsBlock.Input,
|
||||
output_schema=ExaContentsBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://api.exa.ai/contents"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"x-api-key": credentials.api_key.get_secret_value(),
|
||||
}
|
||||
|
||||
payload = {
|
||||
"ids": input_data.ids,
|
||||
"text": input_data.contents.text,
|
||||
"highlights": input_data.contents.highlights,
|
||||
"summary": input_data.contents.summary,
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
yield "results", data.get("results", [])
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
yield "results", []
|
||||
@@ -1,54 +0,0 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class TextSettings(BaseModel):
|
||||
max_characters: int = SchemaField(
|
||||
default=1000,
|
||||
description="Maximum number of characters to return",
|
||||
placeholder="1000",
|
||||
)
|
||||
include_html_tags: bool = SchemaField(
|
||||
default=False,
|
||||
description="Whether to include HTML tags in the text",
|
||||
placeholder="False",
|
||||
)
|
||||
|
||||
|
||||
class HighlightSettings(BaseModel):
|
||||
num_sentences: int = SchemaField(
|
||||
default=3,
|
||||
description="Number of sentences per highlight",
|
||||
placeholder="3",
|
||||
)
|
||||
highlights_per_url: int = SchemaField(
|
||||
default=3,
|
||||
description="Number of highlights per URL",
|
||||
placeholder="3",
|
||||
)
|
||||
|
||||
|
||||
class SummarySettings(BaseModel):
|
||||
query: Optional[str] = SchemaField(
|
||||
default="",
|
||||
description="Query string for summarization",
|
||||
placeholder="Enter query",
|
||||
)
|
||||
|
||||
|
||||
class ContentSettings(BaseModel):
|
||||
text: TextSettings = SchemaField(
|
||||
default=TextSettings(),
|
||||
description="Text content settings",
|
||||
)
|
||||
highlights: HighlightSettings = SchemaField(
|
||||
default=HighlightSettings(),
|
||||
description="Highlight settings",
|
||||
)
|
||||
summary: SummarySettings = SchemaField(
|
||||
default=SummarySettings(),
|
||||
description="Summary settings",
|
||||
)
|
||||
@@ -1,143 +0,0 @@
|
||||
from datetime import datetime
|
||||
from typing import List
|
||||
|
||||
from backend.blocks.exa._auth import (
|
||||
ExaCredentials,
|
||||
ExaCredentialsField,
|
||||
ExaCredentialsInput,
|
||||
)
|
||||
from backend.blocks.exa.helpers import ContentSettings
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class ExaSearchBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: ExaCredentialsInput = ExaCredentialsField()
|
||||
query: str = SchemaField(description="The search query")
|
||||
use_auto_prompt: bool = SchemaField(
|
||||
description="Whether to use autoprompt",
|
||||
default=True,
|
||||
advanced=True,
|
||||
)
|
||||
type: str = SchemaField(
|
||||
description="Type of search",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
category: str = SchemaField(
|
||||
description="Category to search within",
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
number_of_results: int = SchemaField(
|
||||
description="Number of results to return",
|
||||
default=10,
|
||||
advanced=True,
|
||||
)
|
||||
include_domains: List[str] = SchemaField(
|
||||
description="Domains to include in search",
|
||||
default=[],
|
||||
)
|
||||
exclude_domains: List[str] = SchemaField(
|
||||
description="Domains to exclude from search",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
start_crawl_date: datetime = SchemaField(
|
||||
description="Start date for crawled content",
|
||||
)
|
||||
end_crawl_date: datetime = SchemaField(
|
||||
description="End date for crawled content",
|
||||
)
|
||||
start_published_date: datetime = SchemaField(
|
||||
description="Start date for published content",
|
||||
)
|
||||
end_published_date: datetime = SchemaField(
|
||||
description="End date for published content",
|
||||
)
|
||||
include_text: List[str] = SchemaField(
|
||||
description="Text patterns to include",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
exclude_text: List[str] = SchemaField(
|
||||
description="Text patterns to exclude",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
contents: ContentSettings = SchemaField(
|
||||
description="Content retrieval settings",
|
||||
default=ContentSettings(),
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: list = SchemaField(
|
||||
description="List of search results",
|
||||
default=[],
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="996cec64-ac40-4dde-982f-b0dc60a5824d",
|
||||
description="Searches the web using Exa's advanced search API",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=ExaSearchBlock.Input,
|
||||
output_schema=ExaSearchBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://api.exa.ai/search"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"x-api-key": credentials.api_key.get_secret_value(),
|
||||
}
|
||||
|
||||
payload = {
|
||||
"query": input_data.query,
|
||||
"useAutoprompt": input_data.use_auto_prompt,
|
||||
"numResults": input_data.number_of_results,
|
||||
"contents": input_data.contents.dict(),
|
||||
}
|
||||
|
||||
date_field_mapping = {
|
||||
"start_crawl_date": "startCrawlDate",
|
||||
"end_crawl_date": "endCrawlDate",
|
||||
"start_published_date": "startPublishedDate",
|
||||
"end_published_date": "endPublishedDate",
|
||||
}
|
||||
|
||||
# Add dates if they exist
|
||||
for input_field, api_field in date_field_mapping.items():
|
||||
value = getattr(input_data, input_field, None)
|
||||
if value:
|
||||
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
|
||||
optional_field_mapping = {
|
||||
"type": "type",
|
||||
"category": "category",
|
||||
"include_domains": "includeDomains",
|
||||
"exclude_domains": "excludeDomains",
|
||||
"include_text": "includeText",
|
||||
"exclude_text": "excludeText",
|
||||
}
|
||||
|
||||
# Add other fields
|
||||
for input_field, api_field in optional_field_mapping.items():
|
||||
value = getattr(input_data, input_field)
|
||||
if value: # Only add non-empty values
|
||||
payload[api_field] = value
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
# Extract just the results array from the response
|
||||
yield "results", data.get("results", [])
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
yield "results", []
|
||||
@@ -1,128 +0,0 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, List
|
||||
|
||||
from backend.blocks.exa._auth import (
|
||||
ExaCredentials,
|
||||
ExaCredentialsField,
|
||||
ExaCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
from .helpers import ContentSettings
|
||||
|
||||
|
||||
class ExaFindSimilarBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: ExaCredentialsInput = ExaCredentialsField()
|
||||
url: str = SchemaField(
|
||||
description="The url for which you would like to find similar links"
|
||||
)
|
||||
number_of_results: int = SchemaField(
|
||||
description="Number of results to return",
|
||||
default=10,
|
||||
advanced=True,
|
||||
)
|
||||
include_domains: List[str] = SchemaField(
|
||||
description="Domains to include in search",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
exclude_domains: List[str] = SchemaField(
|
||||
description="Domains to exclude from search",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
start_crawl_date: datetime = SchemaField(
|
||||
description="Start date for crawled content",
|
||||
)
|
||||
end_crawl_date: datetime = SchemaField(
|
||||
description="End date for crawled content",
|
||||
)
|
||||
start_published_date: datetime = SchemaField(
|
||||
description="Start date for published content",
|
||||
)
|
||||
end_published_date: datetime = SchemaField(
|
||||
description="End date for published content",
|
||||
)
|
||||
include_text: List[str] = SchemaField(
|
||||
description="Text patterns to include (max 1 string, up to 5 words)",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
exclude_text: List[str] = SchemaField(
|
||||
description="Text patterns to exclude (max 1 string, up to 5 words)",
|
||||
default=[],
|
||||
advanced=True,
|
||||
)
|
||||
contents: ContentSettings = SchemaField(
|
||||
description="Content retrieval settings",
|
||||
default=ContentSettings(),
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: List[Any] = SchemaField(
|
||||
description="List of similar documents with title, URL, published date, author, and score",
|
||||
default=[],
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5e7315d1-af61-4a0c-9350-7c868fa7438a",
|
||||
description="Finds similar links using Exa's findSimilar API",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=ExaFindSimilarBlock.Input,
|
||||
output_schema=ExaFindSimilarBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://api.exa.ai/findSimilar"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"x-api-key": credentials.api_key.get_secret_value(),
|
||||
}
|
||||
|
||||
payload = {
|
||||
"url": input_data.url,
|
||||
"numResults": input_data.number_of_results,
|
||||
"contents": input_data.contents.dict(),
|
||||
}
|
||||
|
||||
optional_field_mapping = {
|
||||
"include_domains": "includeDomains",
|
||||
"exclude_domains": "excludeDomains",
|
||||
"include_text": "includeText",
|
||||
"exclude_text": "excludeText",
|
||||
}
|
||||
|
||||
# Add optional fields if they have values
|
||||
for input_field, api_field in optional_field_mapping.items():
|
||||
value = getattr(input_data, input_field)
|
||||
if value: # Only add non-empty values
|
||||
payload[api_field] = value
|
||||
|
||||
date_field_mapping = {
|
||||
"start_crawl_date": "startCrawlDate",
|
||||
"end_crawl_date": "endCrawlDate",
|
||||
"start_published_date": "startPublishedDate",
|
||||
"end_published_date": "endPublishedDate",
|
||||
}
|
||||
|
||||
# Add dates if they exist
|
||||
for input_field, api_field in date_field_mapping.items():
|
||||
value = getattr(input_data, input_field, None)
|
||||
if value:
|
||||
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
yield "results", data.get("results", [])
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
yield "results", []
|
||||
@@ -1,35 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
FalCredentials = APIKeyCredentials
|
||||
FalCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.FAL],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="fal",
|
||||
api_key=SecretStr("mock-fal-api-key"),
|
||||
title="Mock FAL API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
def FalCredentialsField() -> FalCredentialsInput:
|
||||
"""
|
||||
Creates a FAL credentials input on a block.
|
||||
"""
|
||||
return CredentialsField(
|
||||
description="The FAL integration can be used with an API Key.",
|
||||
)
|
||||
@@ -1,199 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
from backend.blocks.fal._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
FalCredentials,
|
||||
FalCredentialsField,
|
||||
FalCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FalModel(str, Enum):
|
||||
MOCHI = "fal-ai/mochi-v1"
|
||||
LUMA = "fal-ai/luma-dream-machine"
|
||||
|
||||
|
||||
class AIVideoGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
prompt: str = SchemaField(
|
||||
description="Description of the video to generate.",
|
||||
placeholder="A dog running in a field.",
|
||||
)
|
||||
model: FalModel = SchemaField(
|
||||
title="FAL Model",
|
||||
default=FalModel.MOCHI,
|
||||
description="The FAL model to use for video generation.",
|
||||
)
|
||||
credentials: FalCredentialsInput = FalCredentialsField()
|
||||
|
||||
class Output(BlockSchema):
|
||||
video_url: str = SchemaField(description="The URL of the generated video.")
|
||||
error: str = SchemaField(
|
||||
description="Error message if video generation failed."
|
||||
)
|
||||
logs: list[str] = SchemaField(
|
||||
description="Generation progress logs.", optional=True
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="530cf046-2ce0-4854-ae2c-659db17c7a46",
|
||||
description="Generate videos using FAL AI models.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"prompt": "A dog running in a field.",
|
||||
"model": FalModel.MOCHI,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("video_url", "https://fal.media/files/example/video.mp4")],
|
||||
test_mock={
|
||||
"generate_video": lambda *args, **kwargs: "https://fal.media/files/example/video.mp4"
|
||||
},
|
||||
)
|
||||
|
||||
def _get_headers(self, api_key: str) -> dict[str, str]:
|
||||
"""Get headers for FAL API requests."""
|
||||
return {
|
||||
"Authorization": f"Key {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
def _submit_request(
|
||||
self, url: str, headers: dict[str, str], data: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Submit a request to the FAL API."""
|
||||
try:
|
||||
response = httpx.post(url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except httpx.HTTPError as e:
|
||||
logger.error(f"FAL API request failed: {str(e)}")
|
||||
raise RuntimeError(f"Failed to submit request: {str(e)}")
|
||||
|
||||
def _poll_status(self, status_url: str, headers: dict[str, str]) -> dict[str, Any]:
|
||||
"""Poll the status endpoint until completion or failure."""
|
||||
try:
|
||||
response = httpx.get(status_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except httpx.HTTPError as e:
|
||||
logger.error(f"Failed to get status: {str(e)}")
|
||||
raise RuntimeError(f"Failed to get status: {str(e)}")
|
||||
|
||||
def generate_video(self, input_data: Input, credentials: FalCredentials) -> str:
|
||||
"""Generate video using the specified FAL model."""
|
||||
base_url = "https://queue.fal.run"
|
||||
api_key = credentials.api_key.get_secret_value()
|
||||
headers = self._get_headers(api_key)
|
||||
|
||||
# Submit generation request
|
||||
submit_url = f"{base_url}/{input_data.model.value}"
|
||||
submit_data = {"prompt": input_data.prompt}
|
||||
|
||||
seen_logs = set()
|
||||
|
||||
try:
|
||||
# Submit request to queue
|
||||
submit_response = httpx.post(submit_url, headers=headers, json=submit_data)
|
||||
submit_response.raise_for_status()
|
||||
request_data = submit_response.json()
|
||||
|
||||
# Get request_id and urls from initial response
|
||||
request_id = request_data.get("request_id")
|
||||
status_url = request_data.get("status_url")
|
||||
result_url = request_data.get("response_url")
|
||||
|
||||
if not all([request_id, status_url, result_url]):
|
||||
raise ValueError("Missing required data in submission response")
|
||||
|
||||
# Poll for status with exponential backoff
|
||||
max_attempts = 30
|
||||
attempt = 0
|
||||
base_wait_time = 5
|
||||
|
||||
while attempt < max_attempts:
|
||||
status_response = httpx.get(f"{status_url}?logs=1", headers=headers)
|
||||
status_response.raise_for_status()
|
||||
status_data = status_response.json()
|
||||
|
||||
# Process new logs only
|
||||
logs = status_data.get("logs", [])
|
||||
if logs and isinstance(logs, list):
|
||||
for log in logs:
|
||||
if isinstance(log, dict):
|
||||
# Create a unique key for this log entry
|
||||
log_key = (
|
||||
f"{log.get('timestamp', '')}-{log.get('message', '')}"
|
||||
)
|
||||
if log_key not in seen_logs:
|
||||
seen_logs.add(log_key)
|
||||
message = log.get("message", "")
|
||||
if message:
|
||||
logger.debug(
|
||||
f"[FAL Generation] [{log.get('level', 'INFO')}] [{log.get('source', '')}] [{log.get('timestamp', '')}] {message}"
|
||||
)
|
||||
|
||||
status = status_data.get("status")
|
||||
if status == "COMPLETED":
|
||||
# Get the final result
|
||||
result_response = httpx.get(result_url, headers=headers)
|
||||
result_response.raise_for_status()
|
||||
result_data = result_response.json()
|
||||
|
||||
if "video" not in result_data or not isinstance(
|
||||
result_data["video"], dict
|
||||
):
|
||||
raise ValueError("Invalid response format - missing video data")
|
||||
|
||||
video_url = result_data["video"].get("url")
|
||||
if not video_url:
|
||||
raise ValueError("No video URL in response")
|
||||
|
||||
return video_url
|
||||
|
||||
elif status == "FAILED":
|
||||
error_msg = status_data.get("error", "No error details provided")
|
||||
raise RuntimeError(f"Video generation failed: {error_msg}")
|
||||
elif status == "IN_QUEUE":
|
||||
position = status_data.get("queue_position", "unknown")
|
||||
logger.debug(
|
||||
f"[FAL Generation] Status: In queue, position: {position}"
|
||||
)
|
||||
elif status == "IN_PROGRESS":
|
||||
logger.debug(
|
||||
"[FAL Generation] Status: Request is being processed..."
|
||||
)
|
||||
else:
|
||||
logger.info(f"[FAL Generation] Status: Unknown status: {status}")
|
||||
|
||||
wait_time = min(base_wait_time * (2**attempt), 60) # Cap at 60 seconds
|
||||
time.sleep(wait_time)
|
||||
attempt += 1
|
||||
|
||||
raise RuntimeError("Maximum polling attempts reached")
|
||||
|
||||
except httpx.HTTPError as e:
|
||||
raise RuntimeError(f"API request failed: {str(e)}")
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: FalCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
video_url = self.generate_video(input_data, credentials)
|
||||
yield "video_url", video_url
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
yield "error", error_message
|
||||
@@ -1,49 +0,0 @@
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from backend.blocks.github._auth import (
|
||||
GithubCredentials,
|
||||
GithubFineGrainedAPICredentials,
|
||||
)
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
def _convert_to_api_url(url: str) -> str:
|
||||
"""
|
||||
Converts a standard GitHub URL to the corresponding GitHub API URL.
|
||||
Handles repository URLs, issue URLs, pull request URLs, and more.
|
||||
"""
|
||||
parsed_url = urlparse(url)
|
||||
path_parts = parsed_url.path.strip("/").split("/")
|
||||
|
||||
if len(path_parts) >= 2:
|
||||
owner, repo = path_parts[0], path_parts[1]
|
||||
api_base = f"https://api.github.com/repos/{owner}/{repo}"
|
||||
|
||||
if len(path_parts) > 2:
|
||||
additional_path = "/".join(path_parts[2:])
|
||||
api_url = f"{api_base}/{additional_path}"
|
||||
else:
|
||||
# Repository base URL
|
||||
api_url = api_base
|
||||
else:
|
||||
raise ValueError("Invalid GitHub URL format.")
|
||||
|
||||
return api_url
|
||||
|
||||
|
||||
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
|
||||
return {
|
||||
"Authorization": credentials.auth_header(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
|
||||
def get_api(
|
||||
credentials: GithubCredentials | GithubFineGrainedAPICredentials,
|
||||
convert_urls: bool = True,
|
||||
) -> Requests:
|
||||
return Requests(
|
||||
trusted_origins=["https://api.github.com", "https://github.com"],
|
||||
extra_url_validator=_convert_to_api_url if convert_urls else None,
|
||||
extra_headers=_get_headers(credentials),
|
||||
)
|
||||
@@ -1,14 +1,12 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import (
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
OAuth2Credentials,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
secrets = Secrets()
|
||||
@@ -18,15 +16,10 @@ GITHUB_OAUTH_IS_CONFIGURED = bool(
|
||||
|
||||
GithubCredentials = APIKeyCredentials | OAuth2Credentials
|
||||
GithubCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.GITHUB],
|
||||
Literal["github"],
|
||||
Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"],
|
||||
]
|
||||
|
||||
GithubFineGrainedAPICredentials = APIKeyCredentials
|
||||
GithubFineGrainedAPICredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.GITHUB], Literal["api_key"]
|
||||
]
|
||||
|
||||
|
||||
def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
|
||||
"""
|
||||
@@ -36,16 +29,10 @@ def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
|
||||
scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes))
|
||||
""" # noqa
|
||||
return CredentialsField(
|
||||
required_scopes={scope},
|
||||
description="The GitHub integration can be used with OAuth, "
|
||||
"or any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
|
||||
|
||||
def GithubFineGrainedAPICredentialsField(
|
||||
scope: str,
|
||||
) -> GithubFineGrainedAPICredentialsInput:
|
||||
return CredentialsField(
|
||||
provider="github",
|
||||
supported_credential_types=(
|
||||
{"api_key", "oauth2"} if GITHUB_OAUTH_IS_CONFIGURED else {"api_key"}
|
||||
),
|
||||
required_scopes={scope},
|
||||
description="The GitHub integration can be used with OAuth, "
|
||||
"or any API key with sufficient permissions for the blocks it is used on.",
|
||||
@@ -65,18 +52,3 @@ TEST_CREDENTIALS_INPUT = {
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
TEST_FINE_GRAINED_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="github",
|
||||
api_key=SecretStr("mock-github-api-key"),
|
||||
title="Mock GitHub API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_FINE_GRAINED_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_FINE_GRAINED_CREDENTIALS.provider,
|
||||
"id": TEST_FINE_GRAINED_CREDENTIALS.id,
|
||||
"type": TEST_FINE_GRAINED_CREDENTIALS.type,
|
||||
"title": TEST_FINE_GRAINED_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
@@ -1,356 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
GithubCredentials,
|
||||
GithubCredentialsField,
|
||||
GithubCredentialsInput,
|
||||
)
|
||||
|
||||
|
||||
# queued, in_progress, completed, waiting, requested, pending
|
||||
class ChecksStatus(Enum):
|
||||
QUEUED = "queued"
|
||||
IN_PROGRESS = "in_progress"
|
||||
COMPLETED = "completed"
|
||||
WAITING = "waiting"
|
||||
REQUESTED = "requested"
|
||||
PENDING = "pending"
|
||||
|
||||
|
||||
class ChecksConclusion(Enum):
|
||||
SUCCESS = "success"
|
||||
FAILURE = "failure"
|
||||
NEUTRAL = "neutral"
|
||||
CANCELLED = "cancelled"
|
||||
TIMED_OUT = "timed_out"
|
||||
ACTION_REQUIRED = "action_required"
|
||||
SKIPPED = "skipped"
|
||||
|
||||
|
||||
class GithubCreateCheckRunBlock(Block):
|
||||
"""Block for creating a new check run on a GitHub repository."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo:status")
|
||||
repo_url: str = SchemaField(
|
||||
description="URL of the GitHub repository",
|
||||
placeholder="https://github.com/owner/repo",
|
||||
)
|
||||
name: str = SchemaField(
|
||||
description="The name of the check run (e.g., 'code-coverage')",
|
||||
)
|
||||
head_sha: str = SchemaField(
|
||||
description="The SHA of the commit to check",
|
||||
)
|
||||
status: ChecksStatus = SchemaField(
|
||||
description="Current status of the check run",
|
||||
default=ChecksStatus.QUEUED,
|
||||
)
|
||||
conclusion: Optional[ChecksConclusion] = SchemaField(
|
||||
description="The final conclusion of the check (required if status is completed)",
|
||||
default=None,
|
||||
)
|
||||
details_url: str = SchemaField(
|
||||
description="The URL for the full details of the check",
|
||||
default="",
|
||||
)
|
||||
output_title: str = SchemaField(
|
||||
description="Title of the check run output",
|
||||
default="",
|
||||
)
|
||||
output_summary: str = SchemaField(
|
||||
description="Summary of the check run output",
|
||||
default="",
|
||||
)
|
||||
output_text: str = SchemaField(
|
||||
description="Detailed text of the check run output",
|
||||
default="",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
class CheckRunResult(BaseModel):
|
||||
id: int
|
||||
html_url: str
|
||||
status: str
|
||||
|
||||
check_run: CheckRunResult = SchemaField(
|
||||
description="Details of the created check run"
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if check run creation failed"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="2f45e89a-3b7d-4f22-b89e-6c4f5c7e1234",
|
||||
description="Creates a new check run for a specific commit in a GitHub repository",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCreateCheckRunBlock.Input,
|
||||
output_schema=GithubCreateCheckRunBlock.Output,
|
||||
test_input={
|
||||
"repo_url": "https://github.com/owner/repo",
|
||||
"name": "test-check",
|
||||
"head_sha": "ce587453ced02b1526dfb4cb910479d431683101",
|
||||
"status": ChecksStatus.COMPLETED.value,
|
||||
"conclusion": ChecksConclusion.SUCCESS.value,
|
||||
"output_title": "Test Results",
|
||||
"output_summary": "All tests passed",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
# requires a github app not available to oauth in our current system
|
||||
disabled=True,
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"check_run",
|
||||
{
|
||||
"id": 4,
|
||||
"html_url": "https://github.com/owner/repo/runs/4",
|
||||
"status": "completed",
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"create_check_run": lambda *args, **kwargs: {
|
||||
"id": 4,
|
||||
"html_url": "https://github.com/owner/repo/runs/4",
|
||||
"status": "completed",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_check_run(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
name: str,
|
||||
head_sha: str,
|
||||
status: ChecksStatus,
|
||||
conclusion: Optional[ChecksConclusion] = None,
|
||||
details_url: Optional[str] = None,
|
||||
output_title: Optional[str] = None,
|
||||
output_summary: Optional[str] = None,
|
||||
output_text: Optional[str] = None,
|
||||
) -> dict:
|
||||
api = get_api(credentials)
|
||||
|
||||
class CheckRunData(BaseModel):
|
||||
name: str
|
||||
head_sha: str
|
||||
status: str
|
||||
conclusion: Optional[str] = None
|
||||
details_url: Optional[str] = None
|
||||
output: Optional[dict[str, str]] = None
|
||||
|
||||
data = CheckRunData(
|
||||
name=name,
|
||||
head_sha=head_sha,
|
||||
status=status.value,
|
||||
)
|
||||
|
||||
if conclusion:
|
||||
data.conclusion = conclusion.value
|
||||
|
||||
if details_url:
|
||||
data.details_url = details_url
|
||||
|
||||
if output_title or output_summary or output_text:
|
||||
output_data = {
|
||||
"title": output_title or "",
|
||||
"summary": output_summary or "",
|
||||
"text": output_text or "",
|
||||
}
|
||||
data.output = output_data
|
||||
|
||||
check_runs_url = f"{repo_url}/check-runs"
|
||||
response = api.post(check_runs_url)
|
||||
result = response.json()
|
||||
|
||||
return {
|
||||
"id": result["id"],
|
||||
"html_url": result["html_url"],
|
||||
"status": result["status"],
|
||||
}
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self.create_check_run(
|
||||
credentials=credentials,
|
||||
repo_url=input_data.repo_url,
|
||||
name=input_data.name,
|
||||
head_sha=input_data.head_sha,
|
||||
status=input_data.status,
|
||||
conclusion=input_data.conclusion,
|
||||
details_url=input_data.details_url,
|
||||
output_title=input_data.output_title,
|
||||
output_summary=input_data.output_summary,
|
||||
output_text=input_data.output_text,
|
||||
)
|
||||
yield "check_run", result
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GithubUpdateCheckRunBlock(Block):
|
||||
"""Block for updating an existing check run on a GitHub repository."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo:status")
|
||||
repo_url: str = SchemaField(
|
||||
description="URL of the GitHub repository",
|
||||
placeholder="https://github.com/owner/repo",
|
||||
)
|
||||
check_run_id: int = SchemaField(
|
||||
description="The ID of the check run to update",
|
||||
)
|
||||
status: ChecksStatus = SchemaField(
|
||||
description="New status of the check run",
|
||||
)
|
||||
conclusion: ChecksConclusion = SchemaField(
|
||||
description="The final conclusion of the check (required if status is completed)",
|
||||
)
|
||||
output_title: Optional[str] = SchemaField(
|
||||
description="New title of the check run output",
|
||||
default=None,
|
||||
)
|
||||
output_summary: Optional[str] = SchemaField(
|
||||
description="New summary of the check run output",
|
||||
default=None,
|
||||
)
|
||||
output_text: Optional[str] = SchemaField(
|
||||
description="New detailed text of the check run output",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
class CheckRunResult(BaseModel):
|
||||
id: int
|
||||
html_url: str
|
||||
status: str
|
||||
conclusion: Optional[str]
|
||||
|
||||
check_run: CheckRunResult = SchemaField(
|
||||
description="Details of the updated check run"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if check run update failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8a23c567-9d01-4e56-b789-0c12d3e45678", # Generated UUID
|
||||
description="Updates an existing check run in a GitHub repository",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubUpdateCheckRunBlock.Input,
|
||||
output_schema=GithubUpdateCheckRunBlock.Output,
|
||||
# requires a github app not available to oauth in our current system
|
||||
disabled=True,
|
||||
test_input={
|
||||
"repo_url": "https://github.com/owner/repo",
|
||||
"check_run_id": 4,
|
||||
"status": ChecksStatus.COMPLETED.value,
|
||||
"conclusion": ChecksConclusion.SUCCESS.value,
|
||||
"output_title": "Updated Results",
|
||||
"output_summary": "All tests passed after retry",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"check_run",
|
||||
{
|
||||
"id": 4,
|
||||
"html_url": "https://github.com/owner/repo/runs/4",
|
||||
"status": "completed",
|
||||
"conclusion": "success",
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"update_check_run": lambda *args, **kwargs: {
|
||||
"id": 4,
|
||||
"html_url": "https://github.com/owner/repo/runs/4",
|
||||
"status": "completed",
|
||||
"conclusion": "success",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_check_run(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
check_run_id: int,
|
||||
status: ChecksStatus,
|
||||
conclusion: Optional[ChecksConclusion] = None,
|
||||
output_title: Optional[str] = None,
|
||||
output_summary: Optional[str] = None,
|
||||
output_text: Optional[str] = None,
|
||||
) -> dict:
|
||||
api = get_api(credentials)
|
||||
|
||||
class UpdateCheckRunData(BaseModel):
|
||||
status: str
|
||||
conclusion: Optional[str] = None
|
||||
output: Optional[dict[str, str]] = None
|
||||
|
||||
data = UpdateCheckRunData(
|
||||
status=status.value,
|
||||
)
|
||||
|
||||
if conclusion:
|
||||
data.conclusion = conclusion.value
|
||||
|
||||
if output_title or output_summary or output_text:
|
||||
output_data = {
|
||||
"title": output_title or "",
|
||||
"summary": output_summary or "",
|
||||
"text": output_text or "",
|
||||
}
|
||||
data.output = output_data
|
||||
|
||||
check_run_url = f"{repo_url}/check-runs/{check_run_id}"
|
||||
response = api.patch(check_run_url)
|
||||
result = response.json()
|
||||
|
||||
return {
|
||||
"id": result["id"],
|
||||
"html_url": result["html_url"],
|
||||
"status": result["status"],
|
||||
"conclusion": result.get("conclusion"),
|
||||
}
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self.update_check_run(
|
||||
credentials=credentials,
|
||||
repo_url=input_data.repo_url,
|
||||
check_run_id=input_data.check_run_id,
|
||||
status=input_data.status,
|
||||
conclusion=input_data.conclusion,
|
||||
output_title=input_data.output_title,
|
||||
output_summary=input_data.output_summary,
|
||||
output_text=input_data.output_text,
|
||||
)
|
||||
yield "check_run", result
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,700 +0,0 @@
|
||||
{
|
||||
"action": "synchronize",
|
||||
"number": 8358,
|
||||
"pull_request": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358",
|
||||
"id": 2128918491,
|
||||
"node_id": "PR_kwDOJKSTjM5-5Lfb",
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358",
|
||||
"diff_url": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358.diff",
|
||||
"patch_url": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358.patch",
|
||||
"issue_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358",
|
||||
"number": 8358,
|
||||
"state": "open",
|
||||
"locked": false,
|
||||
"title": "feat(platform, blocks): Webhook-triggered blocks",
|
||||
"user": {
|
||||
"login": "Pwuts",
|
||||
"id": 12185583,
|
||||
"node_id": "MDQ6VXNlcjEyMTg1NTgz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Pwuts",
|
||||
"html_url": "https://github.com/Pwuts",
|
||||
"followers_url": "https://api.github.com/users/Pwuts/followers",
|
||||
"following_url": "https://api.github.com/users/Pwuts/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Pwuts/orgs",
|
||||
"repos_url": "https://api.github.com/users/Pwuts/repos",
|
||||
"events_url": "https://api.github.com/users/Pwuts/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Pwuts/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"body": "- Resolves #8352\r\n\r\n## Changes 🏗️\r\n\r\n- feat(blocks): Add GitHub Pull Request Trigger block\r\n\r\n### feat(platform): Add support for Webhook-triggered blocks\r\n- ⚠️ Add `PLATFORM_BASE_URL` setting\r\n\r\n- Add webhook config option and `BlockType.WEBHOOK` to `Block`\r\n - Add check to `Block.__init__` to enforce type and shape of webhook event filter\r\n - Add check to `Block.__init__` to enforce `payload` input on webhook blocks\r\n\r\n- Add `Webhook` model + CRUD functions in `backend.data.integrations` to represent webhooks created by our system\r\n - Add `IntegrationWebhook` to DB schema + reference `AgentGraphNode.webhook_id`\r\n - Add `set_node_webhook(..)` in `backend.data.graph`\r\n\r\n- Add webhook-related endpoints:\r\n - `POST /integrations/{provider}/webhooks/{webhook_id}/ingress` endpoint, to receive webhook payloads, and for all associated nodes create graph executions\r\n - Add `Node.is_triggered_by_event_type(..)` helper method\r\n - `POST /integrations/{provider}/webhooks/{webhook_id}/ping` endpoint, to allow testing a webhook\r\n - Add `WebhookEvent` + pub/sub functions in `backend.data.integrations`\r\n\r\n- Add `backend.integrations.webhooks` module, including:\r\n - `graph_lifecycle_hooks`, e.g. `on_graph_activate(..)`, to handle corresponding webhook creation etc.\r\n - Add calls to these hooks in the graph create/update endpoints\r\n - `BaseWebhooksManager` + `GithubWebhooksManager` to handle creating + registering, removing + deregistering, and retrieving existing webhooks, and validating incoming payloads\r\n\r\n### Other improvements\r\n- fix(blocks): Allow having an input and output pin with the same name\r\n- feat(blocks): Allow hiding inputs (e.g. `payload`) with `SchemaField(hidden=True)`\r\n- feat(backend/data): Add `graph_id`, `graph_version` to `Node`; `user_id` to `GraphMeta`\r\n - Add `Creatable` versions of `Node`, `GraphMeta` and `Graph` without these properties\r\n - Add `graph_from_creatable(..)` helper function in `backend.data.graph`\r\n- refactor(backend/data): Make `RedisEventQueue` generic\r\n- refactor(frontend): Deduplicate & clean up code for different block types in `generateInputHandles(..)` in `CustomNode`\r\n- refactor(backend): Remove unused subgraph functionality\r\n\r\n## How it works\r\n- When a graph is created, the `on_graph_activate` and `on_node_activate` hooks are called on the graph and its nodes\r\n- If a webhook-triggered node has presets for all the relevant inputs, `on_node_activate` will get/create a suitable webhook and link it by setting `AgentGraphNode.webhook_id`\r\n - `on_node_activate` uses `webhook_manager.get_suitable_webhook(..)`, which tries to find a suitable webhook (with matching requirements) or creates it if none exists yet\r\n- When a graph is deactivated (in favor of a newer/other version) or deleted, `on_graph_deactivate` and `on_node_deactivate` are called on the graph and its nodes to clean up webhooks that are no longer in use\r\n- When a valid webhook payload is received, two things happen:\r\n 1. It is broadcast on the Redis channel `webhooks/{webhook_id}/{event_type}`\r\n 2. Graph executions are initiated for all nodes triggered by this webhook\r\n\r\n## TODO\r\n- [ ] #8537\r\n- [x] #8538\r\n- [ ] #8357\r\n- [ ] ~~#8554~~ can be done in a follow-up PR\r\n- [ ] Test test test!\r\n- [ ] Add note on `repo` input of webhook blocks that the credentials used must have the right permissions for the given organization/repo\r\n- [x] Implement proper detection and graceful handling of webhook creation failing due to insufficient permissions. This should give a clear message to the user to e.g. \"give the app access to this organization in your settings\".\r\n- [ ] Nice-to-have: make a button on webhook blocks to trigger a ping and check its result. The API endpoints for this is already implemented.",
|
||||
"created_at": "2024-10-16T22:13:47Z",
|
||||
"updated_at": "2024-11-11T18:34:54Z",
|
||||
"closed_at": null,
|
||||
"merged_at": null,
|
||||
"merge_commit_sha": "cbfd0cdd8db52cdd5a3b7ce088fc0ab4617a652e",
|
||||
"assignee": {
|
||||
"login": "Pwuts",
|
||||
"id": 12185583,
|
||||
"node_id": "MDQ6VXNlcjEyMTg1NTgz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Pwuts",
|
||||
"html_url": "https://github.com/Pwuts",
|
||||
"followers_url": "https://api.github.com/users/Pwuts/followers",
|
||||
"following_url": "https://api.github.com/users/Pwuts/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Pwuts/orgs",
|
||||
"repos_url": "https://api.github.com/users/Pwuts/repos",
|
||||
"events_url": "https://api.github.com/users/Pwuts/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Pwuts/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"assignees": [
|
||||
{
|
||||
"login": "Pwuts",
|
||||
"id": 12185583,
|
||||
"node_id": "MDQ6VXNlcjEyMTg1NTgz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Pwuts",
|
||||
"html_url": "https://github.com/Pwuts",
|
||||
"followers_url": "https://api.github.com/users/Pwuts/followers",
|
||||
"following_url": "https://api.github.com/users/Pwuts/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Pwuts/orgs",
|
||||
"repos_url": "https://api.github.com/users/Pwuts/repos",
|
||||
"events_url": "https://api.github.com/users/Pwuts/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Pwuts/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
}
|
||||
],
|
||||
"requested_reviewers": [
|
||||
{
|
||||
"login": "kcze",
|
||||
"id": 34861343,
|
||||
"node_id": "MDQ6VXNlcjM0ODYxMzQz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/34861343?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/kcze",
|
||||
"html_url": "https://github.com/kcze",
|
||||
"followers_url": "https://api.github.com/users/kcze/followers",
|
||||
"following_url": "https://api.github.com/users/kcze/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/kcze/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/kcze/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/kcze/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/kcze/orgs",
|
||||
"repos_url": "https://api.github.com/users/kcze/repos",
|
||||
"events_url": "https://api.github.com/users/kcze/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/kcze/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
}
|
||||
],
|
||||
"requested_teams": [
|
||||
{
|
||||
"name": "DevOps",
|
||||
"id": 9547361,
|
||||
"node_id": "T_kwDOB8roIc4Aka5h",
|
||||
"slug": "devops",
|
||||
"description": "",
|
||||
"privacy": "closed",
|
||||
"notification_setting": "notifications_enabled",
|
||||
"url": "https://api.github.com/organizations/130738209/team/9547361",
|
||||
"html_url": "https://github.com/orgs/Significant-Gravitas/teams/devops",
|
||||
"members_url": "https://api.github.com/organizations/130738209/team/9547361/members{/member}",
|
||||
"repositories_url": "https://api.github.com/organizations/130738209/team/9547361/repos",
|
||||
"permission": "pull",
|
||||
"parent": null
|
||||
}
|
||||
],
|
||||
"labels": [
|
||||
{
|
||||
"id": 5272676214,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABOkandg",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/documentation",
|
||||
"name": "documentation",
|
||||
"color": "0075ca",
|
||||
"default": true,
|
||||
"description": "Improvements or additions to documentation"
|
||||
},
|
||||
{
|
||||
"id": 5410633769,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABQn-4KQ",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/size/xl",
|
||||
"name": "size/xl",
|
||||
"color": "E751DD",
|
||||
"default": false,
|
||||
"description": ""
|
||||
},
|
||||
{
|
||||
"id": 6892322271,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABmtB93w",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/Review%20effort%20[1-5]:%204",
|
||||
"name": "Review effort [1-5]: 4",
|
||||
"color": "d1bcf9",
|
||||
"default": false,
|
||||
"description": null
|
||||
},
|
||||
{
|
||||
"id": 7218433025,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABrkCMAQ",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/platform/frontend",
|
||||
"name": "platform/frontend",
|
||||
"color": "033C07",
|
||||
"default": false,
|
||||
"description": "AutoGPT Platform - Front end"
|
||||
},
|
||||
{
|
||||
"id": 7219356193,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABrk6iIQ",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/platform/backend",
|
||||
"name": "platform/backend",
|
||||
"color": "ededed",
|
||||
"default": false,
|
||||
"description": "AutoGPT Platform - Back end"
|
||||
},
|
||||
{
|
||||
"id": 7515330106,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABv_LWOg",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/platform/blocks",
|
||||
"name": "platform/blocks",
|
||||
"color": "eb5757",
|
||||
"default": false,
|
||||
"description": null
|
||||
}
|
||||
],
|
||||
"milestone": null,
|
||||
"draft": false,
|
||||
"commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/commits",
|
||||
"review_comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/comments",
|
||||
"review_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/comments{/number}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358/comments",
|
||||
"statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/8f708a2b60463eec10747d8f45dead35b5a45bd0",
|
||||
"head": {
|
||||
"label": "Significant-Gravitas:reinier/open-1961-implement-github-on-pull-request-block",
|
||||
"ref": "reinier/open-1961-implement-github-on-pull-request-block",
|
||||
"sha": "8f708a2b60463eec10747d8f45dead35b5a45bd0",
|
||||
"user": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"repo": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"forks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/forks",
|
||||
"keys_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/teams",
|
||||
"hooks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/events",
|
||||
"assignees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tags",
|
||||
"blobs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscription",
|
||||
"commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/merges",
|
||||
"archive_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/downloads",
|
||||
"issues_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases{/id}",
|
||||
"deployments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/deployments",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-11-11T18:16:29Z",
|
||||
"pushed_at": "2024-11-11T18:34:52Z",
|
||||
"git_url": "git://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"ssh_url": "git@github.com:Significant-Gravitas/AutoGPT.git",
|
||||
"clone_url": "https://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"svn_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"homepage": "https://agpt.co",
|
||||
"size": 181894,
|
||||
"stargazers_count": 168203,
|
||||
"watchers_count": 168203,
|
||||
"language": "Python",
|
||||
"has_issues": true,
|
||||
"has_projects": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"has_discussions": true,
|
||||
"forks_count": 44376,
|
||||
"mirror_url": null,
|
||||
"archived": false,
|
||||
"disabled": false,
|
||||
"open_issues_count": 189,
|
||||
"license": {
|
||||
"key": "other",
|
||||
"name": "Other",
|
||||
"spdx_id": "NOASSERTION",
|
||||
"url": null,
|
||||
"node_id": "MDc6TGljZW5zZTA="
|
||||
},
|
||||
"allow_forking": true,
|
||||
"is_template": false,
|
||||
"web_commit_signoff_required": false,
|
||||
"topics": [
|
||||
"ai",
|
||||
"artificial-intelligence",
|
||||
"autonomous-agents",
|
||||
"gpt-4",
|
||||
"openai",
|
||||
"python"
|
||||
],
|
||||
"visibility": "public",
|
||||
"forks": 44376,
|
||||
"open_issues": 189,
|
||||
"watchers": 168203,
|
||||
"default_branch": "master",
|
||||
"allow_squash_merge": true,
|
||||
"allow_merge_commit": false,
|
||||
"allow_rebase_merge": false,
|
||||
"allow_auto_merge": true,
|
||||
"delete_branch_on_merge": true,
|
||||
"allow_update_branch": true,
|
||||
"use_squash_pr_title_as_default": true,
|
||||
"squash_merge_commit_message": "COMMIT_MESSAGES",
|
||||
"squash_merge_commit_title": "PR_TITLE",
|
||||
"merge_commit_message": "BLANK",
|
||||
"merge_commit_title": "PR_TITLE"
|
||||
}
|
||||
},
|
||||
"base": {
|
||||
"label": "Significant-Gravitas:dev",
|
||||
"ref": "dev",
|
||||
"sha": "0b5b95eff5e18c1e162d2b30b66a7be2bed1cbc2",
|
||||
"user": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"repo": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"forks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/forks",
|
||||
"keys_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/teams",
|
||||
"hooks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/events",
|
||||
"assignees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tags",
|
||||
"blobs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscription",
|
||||
"commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/merges",
|
||||
"archive_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/downloads",
|
||||
"issues_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases{/id}",
|
||||
"deployments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/deployments",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-11-11T18:16:29Z",
|
||||
"pushed_at": "2024-11-11T18:34:52Z",
|
||||
"git_url": "git://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"ssh_url": "git@github.com:Significant-Gravitas/AutoGPT.git",
|
||||
"clone_url": "https://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"svn_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"homepage": "https://agpt.co",
|
||||
"size": 181894,
|
||||
"stargazers_count": 168203,
|
||||
"watchers_count": 168203,
|
||||
"language": "Python",
|
||||
"has_issues": true,
|
||||
"has_projects": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"has_discussions": true,
|
||||
"forks_count": 44376,
|
||||
"mirror_url": null,
|
||||
"archived": false,
|
||||
"disabled": false,
|
||||
"open_issues_count": 189,
|
||||
"license": {
|
||||
"key": "other",
|
||||
"name": "Other",
|
||||
"spdx_id": "NOASSERTION",
|
||||
"url": null,
|
||||
"node_id": "MDc6TGljZW5zZTA="
|
||||
},
|
||||
"allow_forking": true,
|
||||
"is_template": false,
|
||||
"web_commit_signoff_required": false,
|
||||
"topics": [
|
||||
"ai",
|
||||
"artificial-intelligence",
|
||||
"autonomous-agents",
|
||||
"gpt-4",
|
||||
"openai",
|
||||
"python"
|
||||
],
|
||||
"visibility": "public",
|
||||
"forks": 44376,
|
||||
"open_issues": 189,
|
||||
"watchers": 168203,
|
||||
"default_branch": "master",
|
||||
"allow_squash_merge": true,
|
||||
"allow_merge_commit": false,
|
||||
"allow_rebase_merge": false,
|
||||
"allow_auto_merge": true,
|
||||
"delete_branch_on_merge": true,
|
||||
"allow_update_branch": true,
|
||||
"use_squash_pr_title_as_default": true,
|
||||
"squash_merge_commit_message": "COMMIT_MESSAGES",
|
||||
"squash_merge_commit_title": "PR_TITLE",
|
||||
"merge_commit_message": "BLANK",
|
||||
"merge_commit_title": "PR_TITLE"
|
||||
}
|
||||
},
|
||||
"_links": {
|
||||
"self": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358"
|
||||
},
|
||||
"issue": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358"
|
||||
},
|
||||
"comments": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358/comments"
|
||||
},
|
||||
"review_comments": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/comments"
|
||||
},
|
||||
"review_comment": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/comments{/number}"
|
||||
},
|
||||
"commits": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/commits"
|
||||
},
|
||||
"statuses": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/8f708a2b60463eec10747d8f45dead35b5a45bd0"
|
||||
}
|
||||
},
|
||||
"author_association": "MEMBER",
|
||||
"auto_merge": null,
|
||||
"active_lock_reason": null,
|
||||
"merged": false,
|
||||
"mergeable": null,
|
||||
"rebaseable": null,
|
||||
"mergeable_state": "unknown",
|
||||
"merged_by": null,
|
||||
"comments": 12,
|
||||
"review_comments": 29,
|
||||
"maintainer_can_modify": false,
|
||||
"commits": 62,
|
||||
"additions": 1674,
|
||||
"deletions": 331,
|
||||
"changed_files": 36
|
||||
},
|
||||
"before": "f40aef87672203f47bbbd53f83fae0964c5624da",
|
||||
"after": "8f708a2b60463eec10747d8f45dead35b5a45bd0",
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"forks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/forks",
|
||||
"keys_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/teams",
|
||||
"hooks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/events",
|
||||
"assignees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tags",
|
||||
"blobs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscription",
|
||||
"commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/merges",
|
||||
"archive_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/downloads",
|
||||
"issues_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases{/id}",
|
||||
"deployments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/deployments",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-11-11T18:16:29Z",
|
||||
"pushed_at": "2024-11-11T18:34:52Z",
|
||||
"git_url": "git://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"ssh_url": "git@github.com:Significant-Gravitas/AutoGPT.git",
|
||||
"clone_url": "https://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"svn_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"homepage": "https://agpt.co",
|
||||
"size": 181894,
|
||||
"stargazers_count": 168203,
|
||||
"watchers_count": 168203,
|
||||
"language": "Python",
|
||||
"has_issues": true,
|
||||
"has_projects": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"has_discussions": true,
|
||||
"forks_count": 44376,
|
||||
"mirror_url": null,
|
||||
"archived": false,
|
||||
"disabled": false,
|
||||
"open_issues_count": 189,
|
||||
"license": {
|
||||
"key": "other",
|
||||
"name": "Other",
|
||||
"spdx_id": "NOASSERTION",
|
||||
"url": null,
|
||||
"node_id": "MDc6TGljZW5zZTA="
|
||||
},
|
||||
"allow_forking": true,
|
||||
"is_template": false,
|
||||
"web_commit_signoff_required": false,
|
||||
"topics": [
|
||||
"ai",
|
||||
"artificial-intelligence",
|
||||
"autonomous-agents",
|
||||
"gpt-4",
|
||||
"openai",
|
||||
"python"
|
||||
],
|
||||
"visibility": "public",
|
||||
"forks": 44376,
|
||||
"open_issues": 189,
|
||||
"watchers": 168203,
|
||||
"default_branch": "master",
|
||||
"custom_properties": {
|
||||
|
||||
}
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"repos_url": "https://api.github.com/orgs/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/orgs/Significant-Gravitas/events",
|
||||
"hooks_url": "https://api.github.com/orgs/Significant-Gravitas/hooks",
|
||||
"issues_url": "https://api.github.com/orgs/Significant-Gravitas/issues",
|
||||
"members_url": "https://api.github.com/orgs/Significant-Gravitas/members{/member}",
|
||||
"public_members_url": "https://api.github.com/orgs/Significant-Gravitas/public_members{/member}",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"enterprise": {
|
||||
"id": 149607,
|
||||
"slug": "significant-gravitas",
|
||||
"name": "Significant Gravitas",
|
||||
"node_id": "E_kgDOAAJIZw",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/b/149607?v=4",
|
||||
"description": "The creators of AutoGPT",
|
||||
"website_url": "discord.gg/autogpt",
|
||||
"html_url": "https://github.com/enterprises/significant-gravitas",
|
||||
"created_at": "2024-04-18T17:43:53Z",
|
||||
"updated_at": "2024-10-23T16:59:55Z"
|
||||
},
|
||||
"sender": {
|
||||
"login": "Pwuts",
|
||||
"id": 12185583,
|
||||
"node_id": "MDQ6VXNlcjEyMTg1NTgz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Pwuts",
|
||||
"html_url": "https://github.com/Pwuts",
|
||||
"followers_url": "https://api.github.com/users/Pwuts/followers",
|
||||
"following_url": "https://api.github.com/users/Pwuts/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Pwuts/orgs",
|
||||
"repos_url": "https://api.github.com/users/Pwuts/repos",
|
||||
"events_url": "https://api.github.com/users/Pwuts/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Pwuts/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -15,10 +13,6 @@ from ._auth import (
|
||||
)
|
||||
|
||||
|
||||
def is_github_url(url: str) -> bool:
|
||||
return urlparse(url).netloc == "github.com"
|
||||
|
||||
|
||||
# --8<-- [start:GithubCommentBlockExample]
|
||||
class GithubCommentBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
@@ -46,27 +40,15 @@ class GithubCommentBlock(Block):
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCommentBlock.Input,
|
||||
output_schema=GithubCommentBlock.Output,
|
||||
test_input=[
|
||||
{
|
||||
"issue_url": "https://github.com/owner/repo/issues/1",
|
||||
"comment": "This is a test comment.",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
{
|
||||
"issue_url": "https://github.com/owner/repo/pull/1",
|
||||
"comment": "This is a test comment.",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
],
|
||||
test_input={
|
||||
"issue_url": "https://github.com/owner/repo/issues/1",
|
||||
"comment": "This is a test comment.",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("id", 1337),
|
||||
("url", "https://github.com/owner/repo/issues/1#issuecomment-1337"),
|
||||
("id", 1337),
|
||||
(
|
||||
"url",
|
||||
"https://github.com/owner/repo/issues/1#issuecomment-1337",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"post_comment": lambda *args, **kwargs: (
|
||||
@@ -80,12 +62,27 @@ class GithubCommentBlock(Block):
|
||||
def post_comment(
|
||||
credentials: GithubCredentials, issue_url: str, body_text: str
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials)
|
||||
if "/pull/" in issue_url:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
+ "/comments"
|
||||
)
|
||||
else:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos") + "/comments"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"body": body_text}
|
||||
if "pull" in issue_url:
|
||||
issue_url = issue_url.replace("pull", "issues")
|
||||
comments_url = issue_url + "/comments"
|
||||
response = api.post(comments_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
comment = response.json()
|
||||
return comment["id"], comment["html_url"]
|
||||
|
||||
@@ -159,10 +156,16 @@ class GithubMakeIssueBlock(Block):
|
||||
def create_issue(
|
||||
credentials: GithubCredentials, repo_url: str, title: str, body: str
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials)
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/issues"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"title": title, "body": body}
|
||||
issues_url = repo_url + "/issues"
|
||||
response = api.post(issues_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
issue = response.json()
|
||||
return issue["number"], issue["html_url"]
|
||||
|
||||
@@ -229,12 +232,21 @@ class GithubReadIssueBlock(Block):
|
||||
def read_issue(
|
||||
credentials: GithubCredentials, issue_url: str
|
||||
) -> tuple[str, str, str]:
|
||||
api = get_api(credentials)
|
||||
response = api.get(issue_url)
|
||||
api_url = issue_url.replace("github.com", "api.github.com/repos")
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
title = data.get("title", "No title found")
|
||||
body = data.get("body", "No body content found")
|
||||
user = data.get("user", {}).get("login", "No user found")
|
||||
|
||||
return title, body, user
|
||||
|
||||
def run(
|
||||
@@ -248,12 +260,9 @@ class GithubReadIssueBlock(Block):
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
)
|
||||
if title:
|
||||
yield "title", title
|
||||
if body:
|
||||
yield "body", body
|
||||
if user:
|
||||
yield "user", user
|
||||
yield "title", title
|
||||
yield "body", body
|
||||
yield "user", user
|
||||
|
||||
|
||||
class GithubListIssuesBlock(Block):
|
||||
@@ -309,13 +318,20 @@ class GithubListIssuesBlock(Block):
|
||||
def list_issues(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.IssueItem]:
|
||||
api = get_api(credentials)
|
||||
issues_url = repo_url + "/issues"
|
||||
response = api.get(issues_url)
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/issues"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
issues: list[GithubListIssuesBlock.Output.IssueItem] = [
|
||||
{"title": issue["title"], "url": issue["html_url"]} for issue in data
|
||||
]
|
||||
|
||||
return issues
|
||||
|
||||
def run(
|
||||
@@ -369,10 +385,28 @@ class GithubAddLabelBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def add_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
|
||||
api = get_api(credentials)
|
||||
# Convert the provided GitHub URL to the API URL
|
||||
if "/pull/" in issue_url:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
+ "/labels"
|
||||
)
|
||||
else:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos") + "/labels"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"labels": [label]}
|
||||
labels_url = issue_url + "/labels"
|
||||
api.post(labels_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Label added successfully"
|
||||
|
||||
def run(
|
||||
@@ -429,9 +463,31 @@ class GithubRemoveLabelBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def remove_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
|
||||
api = get_api(credentials)
|
||||
label_url = issue_url + f"/labels/{label}"
|
||||
api.delete(label_url)
|
||||
# Convert the provided GitHub URL to the API URL
|
||||
if "/pull/" in issue_url:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
+ f"/labels/{label}"
|
||||
)
|
||||
else:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos")
|
||||
+ f"/labels/{label}"
|
||||
)
|
||||
|
||||
# Log the constructed API URL for debugging
|
||||
print(f"Constructed API URL: {api_url}")
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.delete(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Label removed successfully"
|
||||
|
||||
def run(
|
||||
@@ -494,10 +550,23 @@ class GithubAssignIssueBlock(Block):
|
||||
issue_url: str,
|
||||
assignee: str,
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
assignees_url = issue_url + "/assignees"
|
||||
# Extracting repo path and issue number from the issue URL
|
||||
repo_path, issue_number = issue_url.replace("https://github.com/", "").split(
|
||||
"/issues/"
|
||||
)
|
||||
api_url = (
|
||||
f"https://api.github.com/repos/{repo_path}/issues/{issue_number}/assignees"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"assignees": [assignee]}
|
||||
api.post(assignees_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Issue assigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -560,10 +629,23 @@ class GithubUnassignIssueBlock(Block):
|
||||
issue_url: str,
|
||||
assignee: str,
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
assignees_url = issue_url + "/assignees"
|
||||
# Extracting repo path and issue number from the issue URL
|
||||
repo_path, issue_number = issue_url.replace("https://github.com/", "").split(
|
||||
"/issues/"
|
||||
)
|
||||
api_url = (
|
||||
f"https://api.github.com/repos/{repo_path}/issues/{issue_number}/assignees"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"assignees": [assignee]}
|
||||
api.delete(assignees_url, json=data)
|
||||
|
||||
response = requests.delete(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Issue unassigned successfully"
|
||||
|
||||
def run(
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import re
|
||||
|
||||
import requests
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -66,13 +64,20 @@ class GithubListPullRequestsBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def list_prs(credentials: GithubCredentials, repo_url: str) -> list[Output.PRItem]:
|
||||
api = get_api(credentials)
|
||||
pulls_url = repo_url + "/pulls"
|
||||
response = api.get(pulls_url)
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/pulls"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
pull_requests: list[GithubListPullRequestsBlock.Output.PRItem] = [
|
||||
{"title": pr["title"], "url": pr["html_url"]} for pr in data
|
||||
]
|
||||
|
||||
return pull_requests
|
||||
|
||||
def run(
|
||||
@@ -105,11 +110,7 @@ class GithubMakePullRequestBlock(Block):
|
||||
placeholder="Enter the pull request body",
|
||||
)
|
||||
head: str = SchemaField(
|
||||
description=(
|
||||
"The name of the branch where your changes are implemented. "
|
||||
"For cross-repository pull requests in the same network, "
|
||||
"namespace head with a user like this: username:branch."
|
||||
),
|
||||
description="The name of the branch where your changes are implemented. For cross-repository pull requests in the same network, namespace head with a user like this: username:branch.",
|
||||
placeholder="Enter the head branch",
|
||||
)
|
||||
base: str = SchemaField(
|
||||
@@ -161,10 +162,17 @@ class GithubMakePullRequestBlock(Block):
|
||||
head: str,
|
||||
base: str,
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials)
|
||||
pulls_url = repo_url + "/pulls"
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/pulls"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"title": title, "body": body, "head": head, "base": base}
|
||||
response = api.post(pulls_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
pr_data = response.json()
|
||||
return pr_data["number"], pr_data["html_url"]
|
||||
|
||||
@@ -186,8 +194,13 @@ class GithubMakePullRequestBlock(Block):
|
||||
)
|
||||
yield "number", number
|
||||
yield "url", url
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
if http_err.response.status_code == 422:
|
||||
error_details = http_err.response.json()
|
||||
error_message = error_details.get("message", "Unknown error")
|
||||
else:
|
||||
error_message = str(http_err)
|
||||
raise RuntimeError(f"Failed to create pull request: {error_message}")
|
||||
|
||||
|
||||
class GithubReadPullRequestBlock(Block):
|
||||
@@ -200,7 +213,6 @@ class GithubReadPullRequestBlock(Block):
|
||||
include_pr_changes: bool = SchemaField(
|
||||
description="Whether to include the changes made in the pull request",
|
||||
default=False,
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -243,21 +255,42 @@ class GithubReadPullRequestBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def read_pr(credentials: GithubCredentials, pr_url: str) -> tuple[str, str, str]:
|
||||
api = get_api(credentials)
|
||||
# Adjust the URL to access the issue endpoint for PR metadata
|
||||
issue_url = pr_url.replace("/pull/", "/issues/")
|
||||
response = api.get(issue_url)
|
||||
api_url = pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
title = data.get("title", "No title found")
|
||||
body = data.get("body", "No body content found")
|
||||
author = data.get("user", {}).get("login", "No user found")
|
||||
|
||||
return title, body, author
|
||||
|
||||
@staticmethod
|
||||
def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str:
|
||||
api = get_api(credentials)
|
||||
files_url = prepare_pr_api_url(pr_url=pr_url, path="files")
|
||||
response = api.get(files_url)
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/files"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
files = response.json()
|
||||
changes = []
|
||||
for file in files:
|
||||
@@ -265,6 +298,7 @@ class GithubReadPullRequestBlock(Block):
|
||||
patch = file.get("patch")
|
||||
if filename and patch:
|
||||
changes.append(f"File: {filename}\n{patch}")
|
||||
|
||||
return "\n\n".join(changes)
|
||||
|
||||
def run(
|
||||
@@ -333,10 +367,23 @@ class GithubAssignPRReviewerBlock(Block):
|
||||
def assign_reviewer(
|
||||
credentials: GithubCredentials, pr_url: str, reviewer: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
|
||||
# Convert the PR URL to the appropriate API endpoint
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/requested_reviewers"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"reviewers": [reviewer]}
|
||||
api.post(reviewers_url, json=data)
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Reviewer assigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -353,8 +400,17 @@ class GithubAssignPRReviewerBlock(Block):
|
||||
input_data.reviewer,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
if http_err.response.status_code == 422:
|
||||
error_msg = (
|
||||
"Failed to assign reviewer: "
|
||||
f"The reviewer '{input_data.reviewer}' may not have permission "
|
||||
"or the pull request is not in a valid state. "
|
||||
f"Detailed error: {http_err.response.text}"
|
||||
)
|
||||
else:
|
||||
error_msg = f"HTTP error: {http_err} - {http_err.response.text}"
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
|
||||
class GithubUnassignPRReviewerBlock(Block):
|
||||
@@ -400,10 +456,21 @@ class GithubUnassignPRReviewerBlock(Block):
|
||||
def unassign_reviewer(
|
||||
credentials: GithubCredentials, pr_url: str, reviewer: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/requested_reviewers"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
data = {"reviewers": [reviewer]}
|
||||
api.delete(reviewers_url, json=data)
|
||||
|
||||
response = requests.delete(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Reviewer unassigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -413,15 +480,12 @@ class GithubUnassignPRReviewerBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.unassign_reviewer(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
input_data.reviewer,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
status = self.unassign_reviewer(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
input_data.reviewer,
|
||||
)
|
||||
yield "status", status
|
||||
|
||||
|
||||
class GithubListPRReviewersBlock(Block):
|
||||
@@ -480,14 +544,26 @@ class GithubListPRReviewersBlock(Block):
|
||||
def list_reviewers(
|
||||
credentials: GithubCredentials, pr_url: str
|
||||
) -> list[Output.ReviewerItem]:
|
||||
api = get_api(credentials)
|
||||
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
|
||||
response = api.get(reviewers_url)
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/requested_reviewers"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
reviewers: list[GithubListPRReviewersBlock.Output.ReviewerItem] = [
|
||||
{"username": reviewer["login"], "url": reviewer["html_url"]}
|
||||
for reviewer in data.get("users", [])
|
||||
]
|
||||
|
||||
return reviewers
|
||||
|
||||
def run(
|
||||
@@ -502,14 +578,3 @@ class GithubListPRReviewersBlock(Block):
|
||||
input_data.pr_url,
|
||||
)
|
||||
yield from (("reviewer", reviewer) for reviewer in reviewers)
|
||||
|
||||
|
||||
def prepare_pr_api_url(pr_url: str, path: str) -> str:
|
||||
# Pattern to capture the base repository URL and the pull request number
|
||||
pattern = r"^(?:https?://)?([^/]+/[^/]+/[^/]+)/pull/(\d+)"
|
||||
match = re.match(pattern, pr_url)
|
||||
if not match:
|
||||
return pr_url
|
||||
|
||||
base_url, pr_number = match.groups()
|
||||
return f"{base_url}/pulls/{pr_number}/{path}"
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import base64
|
||||
|
||||
import requests
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -68,11 +68,17 @@ class GithubListTagsBlock(Block):
|
||||
def list_tags(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.TagItem]:
|
||||
api = get_api(credentials)
|
||||
tags_url = repo_url + "/tags"
|
||||
response = api.get(tags_url)
|
||||
data = response.json()
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/tags"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
tags: list[GithubListTagsBlock.Output.TagItem] = [
|
||||
{
|
||||
"name": tag["name"],
|
||||
@@ -80,6 +86,7 @@ class GithubListTagsBlock(Block):
|
||||
}
|
||||
for tag in data
|
||||
]
|
||||
|
||||
return tags
|
||||
|
||||
def run(
|
||||
@@ -150,18 +157,20 @@ class GithubListBranchesBlock(Block):
|
||||
def list_branches(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.BranchItem]:
|
||||
api = get_api(credentials)
|
||||
branches_url = repo_url + "/branches"
|
||||
response = api.get(branches_url)
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/branches"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
branches: list[GithubListBranchesBlock.Output.BranchItem] = [
|
||||
{
|
||||
"name": branch["name"],
|
||||
"url": f"https://github.com/{repo_path}/tree/{branch['name']}",
|
||||
}
|
||||
for branch in data
|
||||
{"name": branch["name"], "url": branch["commit"]["url"]} for branch in data
|
||||
]
|
||||
|
||||
return branches
|
||||
|
||||
def run(
|
||||
@@ -237,8 +246,6 @@ class GithubListDiscussionsBlock(Block):
|
||||
def list_discussions(
|
||||
credentials: GithubCredentials, repo_url: str, num_discussions: int
|
||||
) -> list[Output.DiscussionItem]:
|
||||
api = get_api(credentials)
|
||||
# GitHub GraphQL API endpoint is different; we'll use api.post with custom URL
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
owner, repo = repo_path.split("/")
|
||||
query = """
|
||||
@@ -254,15 +261,24 @@ class GithubListDiscussionsBlock(Block):
|
||||
}
|
||||
"""
|
||||
variables = {"owner": owner, "repo": repo, "num": num_discussions}
|
||||
response = api.post(
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://api.github.com/graphql",
|
||||
json={"query": query, "variables": variables},
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
discussions: list[GithubListDiscussionsBlock.Output.DiscussionItem] = [
|
||||
{"title": discussion["title"], "url": discussion["url"]}
|
||||
for discussion in data["data"]["repository"]["discussions"]["nodes"]
|
||||
]
|
||||
|
||||
return discussions
|
||||
|
||||
def run(
|
||||
@@ -332,13 +348,21 @@ class GithubListReleasesBlock(Block):
|
||||
def list_releases(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.ReleaseItem]:
|
||||
api = get_api(credentials)
|
||||
releases_url = repo_url + "/releases"
|
||||
response = api.get(releases_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/releases"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
releases: list[GithubListReleasesBlock.Output.ReleaseItem] = [
|
||||
{"name": release["name"], "url": release["html_url"]} for release in data
|
||||
]
|
||||
|
||||
return releases
|
||||
|
||||
def run(
|
||||
@@ -408,9 +432,16 @@ class GithubReadFileBlock(Block):
|
||||
def read_file(
|
||||
credentials: GithubCredentials, repo_url: str, file_path: str, branch: str
|
||||
) -> tuple[str, int]:
|
||||
api = get_api(credentials)
|
||||
content_url = repo_url + f"/contents/{file_path}?ref={branch}"
|
||||
response = api.get(content_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/contents/{file_path}?ref={branch}"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
content = response.json()
|
||||
|
||||
if isinstance(content, list):
|
||||
@@ -518,33 +549,46 @@ class GithubReadFolderBlock(Block):
|
||||
def read_folder(
|
||||
credentials: GithubCredentials, repo_url: str, folder_path: str, branch: str
|
||||
) -> tuple[list[Output.FileEntry], list[Output.DirEntry]]:
|
||||
api = get_api(credentials)
|
||||
contents_url = repo_url + f"/contents/{folder_path}?ref={branch}"
|
||||
response = api.get(contents_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/contents/{folder_path}?ref={branch}"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
content = response.json()
|
||||
|
||||
if not isinstance(content, list):
|
||||
if isinstance(content, list):
|
||||
# Multiple entries of different types exist at this path
|
||||
if not (dir := next((d for d in content if d["type"] == "dir"), None)):
|
||||
raise TypeError("Not a folder")
|
||||
content = dir
|
||||
|
||||
if content["type"] != "dir":
|
||||
raise TypeError("Not a folder")
|
||||
|
||||
files = [
|
||||
GithubReadFolderBlock.Output.FileEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
size=entry["size"],
|
||||
)
|
||||
for entry in content
|
||||
if entry["type"] == "file"
|
||||
]
|
||||
dirs = [
|
||||
GithubReadFolderBlock.Output.DirEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
)
|
||||
for entry in content
|
||||
if entry["type"] == "dir"
|
||||
]
|
||||
|
||||
return files, dirs
|
||||
return (
|
||||
[
|
||||
GithubReadFolderBlock.Output.FileEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
size=entry["size"],
|
||||
)
|
||||
for entry in content["entries"]
|
||||
if entry["type"] == "file"
|
||||
],
|
||||
[
|
||||
GithubReadFolderBlock.Output.DirEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
)
|
||||
for entry in content["entries"]
|
||||
if entry["type"] == "dir"
|
||||
],
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
@@ -612,16 +656,26 @@ class GithubMakeBranchBlock(Block):
|
||||
new_branch: str,
|
||||
source_branch: str,
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
# Get the SHA of the source branch
|
||||
ref_url = repo_url + f"/git/refs/heads/{source_branch}"
|
||||
response = api.get(ref_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
ref_api_url = (
|
||||
f"https://api.github.com/repos/{repo_path}/git/refs/heads/{source_branch}"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(ref_api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
sha = response.json()["object"]["sha"]
|
||||
|
||||
# Create the new branch
|
||||
create_ref_url = repo_url + "/git/refs"
|
||||
create_branch_api_url = f"https://api.github.com/repos/{repo_path}/git/refs"
|
||||
data = {"ref": f"refs/heads/{new_branch}", "sha": sha}
|
||||
response = api.post(create_ref_url, json=data)
|
||||
|
||||
response = requests.post(create_branch_api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Branch created successfully"
|
||||
|
||||
def run(
|
||||
@@ -681,9 +735,16 @@ class GithubDeleteBranchBlock(Block):
|
||||
def delete_branch(
|
||||
credentials: GithubCredentials, repo_url: str, branch: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
ref_url = repo_url + f"/git/refs/heads/{branch}"
|
||||
api.delete(ref_url)
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/git/refs/heads/{branch}"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.delete(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
return "Branch deleted successfully"
|
||||
|
||||
def run(
|
||||
@@ -699,420 +760,3 @@ class GithubDeleteBranchBlock(Block):
|
||||
input_data.branch,
|
||||
)
|
||||
yield "status", status
|
||||
|
||||
|
||||
class GithubCreateFileBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
repo_url: str = SchemaField(
|
||||
description="URL of the GitHub repository",
|
||||
placeholder="https://github.com/owner/repo",
|
||||
)
|
||||
file_path: str = SchemaField(
|
||||
description="Path where the file should be created",
|
||||
placeholder="path/to/file.txt",
|
||||
)
|
||||
content: str = SchemaField(
|
||||
description="Content to write to the file",
|
||||
placeholder="File content here",
|
||||
)
|
||||
branch: str = SchemaField(
|
||||
description="Branch where the file should be created",
|
||||
default="main",
|
||||
)
|
||||
commit_message: str = SchemaField(
|
||||
description="Message for the commit",
|
||||
default="Create new file",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
url: str = SchemaField(description="URL of the created file")
|
||||
sha: str = SchemaField(description="SHA of the commit")
|
||||
error: str = SchemaField(
|
||||
description="Error message if the file creation failed"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="8fd132ac-b917-428a-8159-d62893e8a3fe",
|
||||
description="This block creates a new file in a GitHub repository.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCreateFileBlock.Input,
|
||||
output_schema=GithubCreateFileBlock.Output,
|
||||
test_input={
|
||||
"repo_url": "https://github.com/owner/repo",
|
||||
"file_path": "test/file.txt",
|
||||
"content": "Test content",
|
||||
"branch": "main",
|
||||
"commit_message": "Create test file",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("url", "https://github.com/owner/repo/blob/main/test/file.txt"),
|
||||
("sha", "abc123"),
|
||||
],
|
||||
test_mock={
|
||||
"create_file": lambda *args, **kwargs: (
|
||||
"https://github.com/owner/repo/blob/main/test/file.txt",
|
||||
"abc123",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_file(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
file_path: str,
|
||||
content: str,
|
||||
branch: str,
|
||||
commit_message: str,
|
||||
) -> tuple[str, str]:
|
||||
api = get_api(credentials)
|
||||
# Convert content to base64
|
||||
content_bytes = content.encode("utf-8")
|
||||
content_base64 = base64.b64encode(content_bytes).decode("utf-8")
|
||||
|
||||
# Create the file using the GitHub API
|
||||
contents_url = f"{repo_url}/contents/{file_path}"
|
||||
data = {
|
||||
"message": commit_message,
|
||||
"content": content_base64,
|
||||
"branch": branch,
|
||||
}
|
||||
response = api.put(contents_url, json=data)
|
||||
result = response.json()
|
||||
|
||||
return result["content"]["html_url"], result["commit"]["sha"]
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
url, sha = self.create_file(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.file_path,
|
||||
input_data.content,
|
||||
input_data.branch,
|
||||
input_data.commit_message,
|
||||
)
|
||||
yield "url", url
|
||||
yield "sha", sha
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GithubUpdateFileBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
repo_url: str = SchemaField(
|
||||
description="URL of the GitHub repository",
|
||||
placeholder="https://github.com/owner/repo",
|
||||
)
|
||||
file_path: str = SchemaField(
|
||||
description="Path to the file to update",
|
||||
placeholder="path/to/file.txt",
|
||||
)
|
||||
content: str = SchemaField(
|
||||
description="New content for the file",
|
||||
placeholder="Updated content here",
|
||||
)
|
||||
branch: str = SchemaField(
|
||||
description="Branch containing the file",
|
||||
default="main",
|
||||
)
|
||||
commit_message: str = SchemaField(
|
||||
description="Message for the commit",
|
||||
default="Update file",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
url: str = SchemaField(description="URL of the updated file")
|
||||
sha: str = SchemaField(description="SHA of the commit")
|
||||
error: str = SchemaField(description="Error message if the file update failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="30be12a4-57cb-4aa4-baf5-fcc68d136076",
|
||||
description="This block updates an existing file in a GitHub repository.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubUpdateFileBlock.Input,
|
||||
output_schema=GithubUpdateFileBlock.Output,
|
||||
test_input={
|
||||
"repo_url": "https://github.com/owner/repo",
|
||||
"file_path": "test/file.txt",
|
||||
"content": "Updated content",
|
||||
"branch": "main",
|
||||
"commit_message": "Update test file",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("url", "https://github.com/owner/repo/blob/main/test/file.txt"),
|
||||
("sha", "def456"),
|
||||
],
|
||||
test_mock={
|
||||
"update_file": lambda *args, **kwargs: (
|
||||
"https://github.com/owner/repo/blob/main/test/file.txt",
|
||||
"def456",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_file(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
file_path: str,
|
||||
content: str,
|
||||
branch: str,
|
||||
commit_message: str,
|
||||
) -> tuple[str, str]:
|
||||
api = get_api(credentials)
|
||||
|
||||
# First get the current file to get its SHA
|
||||
contents_url = f"{repo_url}/contents/{file_path}"
|
||||
params = {"ref": branch}
|
||||
response = api.get(contents_url, params=params)
|
||||
current_file = response.json()
|
||||
|
||||
# Convert new content to base64
|
||||
content_bytes = content.encode("utf-8")
|
||||
content_base64 = base64.b64encode(content_bytes).decode("utf-8")
|
||||
|
||||
# Update the file
|
||||
data = {
|
||||
"message": commit_message,
|
||||
"content": content_base64,
|
||||
"sha": current_file["sha"],
|
||||
"branch": branch,
|
||||
}
|
||||
response = api.put(contents_url, json=data)
|
||||
result = response.json()
|
||||
|
||||
return result["content"]["html_url"], result["commit"]["sha"]
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
url, sha = self.update_file(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.file_path,
|
||||
input_data.content,
|
||||
input_data.branch,
|
||||
input_data.commit_message,
|
||||
)
|
||||
yield "url", url
|
||||
yield "sha", sha
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GithubCreateRepositoryBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
name: str = SchemaField(
|
||||
description="Name of the repository to create",
|
||||
placeholder="my-new-repo",
|
||||
)
|
||||
description: str = SchemaField(
|
||||
description="Description of the repository",
|
||||
placeholder="A description of the repository",
|
||||
default="",
|
||||
)
|
||||
private: bool = SchemaField(
|
||||
description="Whether the repository should be private",
|
||||
default=False,
|
||||
)
|
||||
auto_init: bool = SchemaField(
|
||||
description="Whether to initialize the repository with a README",
|
||||
default=True,
|
||||
)
|
||||
gitignore_template: str = SchemaField(
|
||||
description="Git ignore template to use (e.g., Python, Node, Java)",
|
||||
default="",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
url: str = SchemaField(description="URL of the created repository")
|
||||
clone_url: str = SchemaField(description="Git clone URL of the repository")
|
||||
error: str = SchemaField(
|
||||
description="Error message if the repository creation failed"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="029ec3b8-1cfd-46d3-b6aa-28e4a706efd1",
|
||||
description="This block creates a new GitHub repository.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCreateRepositoryBlock.Input,
|
||||
output_schema=GithubCreateRepositoryBlock.Output,
|
||||
test_input={
|
||||
"name": "test-repo",
|
||||
"description": "A test repository",
|
||||
"private": False,
|
||||
"auto_init": True,
|
||||
"gitignore_template": "Python",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("url", "https://github.com/owner/test-repo"),
|
||||
("clone_url", "https://github.com/owner/test-repo.git"),
|
||||
],
|
||||
test_mock={
|
||||
"create_repository": lambda *args, **kwargs: (
|
||||
"https://github.com/owner/test-repo",
|
||||
"https://github.com/owner/test-repo.git",
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_repository(
|
||||
credentials: GithubCredentials,
|
||||
name: str,
|
||||
description: str,
|
||||
private: bool,
|
||||
auto_init: bool,
|
||||
gitignore_template: str,
|
||||
) -> tuple[str, str]:
|
||||
api = get_api(credentials, convert_urls=False) # Disable URL conversion
|
||||
data = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"private": private,
|
||||
"auto_init": auto_init,
|
||||
}
|
||||
|
||||
if gitignore_template:
|
||||
data["gitignore_template"] = gitignore_template
|
||||
|
||||
# Create repository using the user endpoint
|
||||
response = api.post("https://api.github.com/user/repos", json=data)
|
||||
result = response.json()
|
||||
|
||||
return result["html_url"], result["clone_url"]
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
url, clone_url = self.create_repository(
|
||||
credentials,
|
||||
input_data.name,
|
||||
input_data.description,
|
||||
input_data.private,
|
||||
input_data.auto_init,
|
||||
input_data.gitignore_template,
|
||||
)
|
||||
yield "url", url
|
||||
yield "clone_url", clone_url
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GithubListStargazersBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
repo_url: str = SchemaField(
|
||||
description="URL of the GitHub repository",
|
||||
placeholder="https://github.com/owner/repo",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
class StargazerItem(TypedDict):
|
||||
username: str
|
||||
url: str
|
||||
|
||||
stargazer: StargazerItem = SchemaField(
|
||||
title="Stargazer",
|
||||
description="Stargazers with their username and profile URL",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if listing stargazers failed"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="a4b9c2d1-e5f6-4g7h-8i9j-0k1l2m3n4o5p", # Generated unique UUID
|
||||
description="This block lists all users who have starred a specified GitHub repository.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubListStargazersBlock.Input,
|
||||
output_schema=GithubListStargazersBlock.Output,
|
||||
test_input={
|
||||
"repo_url": "https://github.com/owner/repo",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"stargazer",
|
||||
{
|
||||
"username": "octocat",
|
||||
"url": "https://github.com/octocat",
|
||||
},
|
||||
)
|
||||
],
|
||||
test_mock={
|
||||
"list_stargazers": lambda *args, **kwargs: [
|
||||
{
|
||||
"username": "octocat",
|
||||
"url": "https://github.com/octocat",
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_stargazers(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.StargazerItem]:
|
||||
api = get_api(credentials)
|
||||
# Add /stargazers to the repo URL to get stargazers endpoint
|
||||
stargazers_url = f"{repo_url}/stargazers"
|
||||
# Set accept header to get starred_at timestamp
|
||||
headers = {"Accept": "application/vnd.github.star+json"}
|
||||
response = api.get(stargazers_url, headers=headers)
|
||||
data = response.json()
|
||||
|
||||
stargazers: list[GithubListStargazersBlock.Output.StargazerItem] = [
|
||||
{
|
||||
"username": stargazer["login"],
|
||||
"url": stargazer["html_url"],
|
||||
}
|
||||
for stargazer in data
|
||||
]
|
||||
return stargazers
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
stargazers = self.list_stargazers(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("stargazer", stargazer) for stargazer in stargazers)
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
GithubFineGrainedAPICredentials,
|
||||
GithubFineGrainedAPICredentialsField,
|
||||
GithubFineGrainedAPICredentialsInput,
|
||||
)
|
||||
|
||||
|
||||
class StatusState(Enum):
|
||||
ERROR = "error"
|
||||
FAILURE = "failure"
|
||||
PENDING = "pending"
|
||||
SUCCESS = "success"
|
||||
|
||||
|
||||
class GithubCreateStatusBlock(Block):
|
||||
"""Block for creating a commit status on a GitHub repository."""
|
||||
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubFineGrainedAPICredentialsInput = (
|
||||
GithubFineGrainedAPICredentialsField("repo:status")
|
||||
)
|
||||
repo_url: str = SchemaField(
|
||||
description="URL of the GitHub repository",
|
||||
placeholder="https://github.com/owner/repo",
|
||||
)
|
||||
sha: str = SchemaField(
|
||||
description="The SHA of the commit to set status for",
|
||||
)
|
||||
state: StatusState = SchemaField(
|
||||
description="The state of the status (error, failure, pending, success)",
|
||||
)
|
||||
target_url: Optional[str] = SchemaField(
|
||||
description="URL with additional details about this status",
|
||||
default=None,
|
||||
)
|
||||
description: Optional[str] = SchemaField(
|
||||
description="Short description of the status",
|
||||
default=None,
|
||||
)
|
||||
check_name: Optional[str] = SchemaField(
|
||||
description="Label to differentiate this status from others",
|
||||
default="AutoGPT Platform Checks",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
class StatusResult(BaseModel):
|
||||
id: int
|
||||
url: str
|
||||
state: str
|
||||
context: str
|
||||
description: Optional[str]
|
||||
target_url: Optional[str]
|
||||
created_at: str
|
||||
updated_at: str
|
||||
|
||||
status: StatusResult = SchemaField(description="Details of the created status")
|
||||
error: str = SchemaField(description="Error message if status creation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3d67f123-a4b5-4c89-9d01-2e34f5c67890", # Generated UUID
|
||||
description="Creates a new commit status in a GitHub repository",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCreateStatusBlock.Input,
|
||||
output_schema=GithubCreateStatusBlock.Output,
|
||||
test_input={
|
||||
"repo_url": "https://github.com/owner/repo",
|
||||
"sha": "ce587453ced02b1526dfb4cb910479d431683101",
|
||||
"state": StatusState.SUCCESS.value,
|
||||
"target_url": "https://example.com/build/status",
|
||||
"description": "The build succeeded!",
|
||||
"check_name": "continuous-integration/jenkins",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"status",
|
||||
{
|
||||
"id": 1234567890,
|
||||
"url": "https://api.github.com/repos/owner/repo/statuses/ce587453ced02b1526dfb4cb910479d431683101",
|
||||
"state": "success",
|
||||
"context": "continuous-integration/jenkins",
|
||||
"description": "The build succeeded!",
|
||||
"target_url": "https://example.com/build/status",
|
||||
"created_at": "2024-01-21T10:00:00Z",
|
||||
"updated_at": "2024-01-21T10:00:00Z",
|
||||
},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"create_status": lambda *args, **kwargs: {
|
||||
"id": 1234567890,
|
||||
"url": "https://api.github.com/repos/owner/repo/statuses/ce587453ced02b1526dfb4cb910479d431683101",
|
||||
"state": "success",
|
||||
"context": "continuous-integration/jenkins",
|
||||
"description": "The build succeeded!",
|
||||
"target_url": "https://example.com/build/status",
|
||||
"created_at": "2024-01-21T10:00:00Z",
|
||||
"updated_at": "2024-01-21T10:00:00Z",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_status(
|
||||
credentials: GithubFineGrainedAPICredentials,
|
||||
repo_url: str,
|
||||
sha: str,
|
||||
state: StatusState,
|
||||
target_url: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
context: str = "default",
|
||||
) -> dict:
|
||||
api = get_api(credentials)
|
||||
|
||||
class StatusData(BaseModel):
|
||||
state: str
|
||||
target_url: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
context: str
|
||||
|
||||
data = StatusData(
|
||||
state=state.value,
|
||||
context=context,
|
||||
)
|
||||
|
||||
if target_url:
|
||||
data.target_url = target_url
|
||||
|
||||
if description:
|
||||
data.description = description
|
||||
|
||||
status_url = f"{repo_url}/statuses/{sha}"
|
||||
response = api.post(status_url, json=data)
|
||||
result = response.json()
|
||||
|
||||
return {
|
||||
"id": result["id"],
|
||||
"url": result["url"],
|
||||
"state": result["state"],
|
||||
"context": result["context"],
|
||||
"description": result.get("description"),
|
||||
"target_url": result.get("target_url"),
|
||||
"created_at": result["created_at"],
|
||||
"updated_at": result["updated_at"],
|
||||
}
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubFineGrainedAPICredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self.create_status(
|
||||
credentials=credentials,
|
||||
repo_url=input_data.repo_url,
|
||||
sha=input_data.sha,
|
||||
state=input_data.state,
|
||||
target_url=input_data.target_url,
|
||||
description=input_data.description,
|
||||
context=input_data.check_name or "AutoGPT Platform Checks",
|
||||
)
|
||||
yield "status", result
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,158 +0,0 @@
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
BlockWebhookConfig,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
GithubCredentialsField,
|
||||
GithubCredentialsInput,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# --8<-- [start:GithubTriggerExample]
|
||||
class GitHubTriggerBase:
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
repo: str = SchemaField(
|
||||
description=(
|
||||
"Repository to subscribe to.\n\n"
|
||||
"**Note:** Make sure your GitHub credentials have permissions "
|
||||
"to create webhooks on this repo."
|
||||
),
|
||||
placeholder="{owner}/{repo}",
|
||||
)
|
||||
# --8<-- [start:example-payload-field]
|
||||
payload: dict = SchemaField(hidden=True, default={})
|
||||
# --8<-- [end:example-payload-field]
|
||||
|
||||
class Output(BlockSchema):
|
||||
payload: dict = SchemaField(
|
||||
description="The complete webhook payload that was received from GitHub. "
|
||||
"Includes information about the affected resource (e.g. pull request), "
|
||||
"the event, and the user who triggered the event."
|
||||
)
|
||||
triggered_by_user: dict = SchemaField(
|
||||
description="Object representing the GitHub user who triggered the event"
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the payload could not be processed"
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "payload", input_data.payload
|
||||
yield "triggered_by_user", input_data.payload["sender"]
|
||||
|
||||
|
||||
class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block):
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "pull_request.synchronize.json"
|
||||
)
|
||||
|
||||
# --8<-- [start:example-event-filter]
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#pull_request
|
||||
"""
|
||||
|
||||
opened: bool = False
|
||||
edited: bool = False
|
||||
closed: bool = False
|
||||
reopened: bool = False
|
||||
synchronize: bool = False
|
||||
assigned: bool = False
|
||||
unassigned: bool = False
|
||||
labeled: bool = False
|
||||
unlabeled: bool = False
|
||||
converted_to_draft: bool = False
|
||||
locked: bool = False
|
||||
unlocked: bool = False
|
||||
enqueued: bool = False
|
||||
dequeued: bool = False
|
||||
milestoned: bool = False
|
||||
demilestoned: bool = False
|
||||
ready_for_review: bool = False
|
||||
review_requested: bool = False
|
||||
review_request_removed: bool = False
|
||||
auto_merge_enabled: bool = False
|
||||
auto_merge_disabled: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The events to subscribe to"
|
||||
)
|
||||
# --8<-- [end:example-event-filter]
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The PR event that triggered the webhook (e.g. 'opened')"
|
||||
)
|
||||
number: int = SchemaField(description="The number of the affected pull request")
|
||||
pull_request: dict = SchemaField(
|
||||
description="Object representing the affected pull request"
|
||||
)
|
||||
pull_request_url: str = SchemaField(
|
||||
description="The URL of the affected pull request"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(
|
||||
self.EXAMPLE_PAYLOAD_FILE.read_text(encoding="utf-8")
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
id="6c60ec01-8128-419e-988f-96a063ee2fea",
|
||||
description="This block triggers on pull request events and outputs the event type and payload.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubPullRequestTriggerBlock.Input,
|
||||
output_schema=GithubPullRequestTriggerBlock.Output,
|
||||
# --8<-- [start:example-webhook_config]
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider="github",
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="pull_request.{event}",
|
||||
),
|
||||
# --8<-- [end:example-webhook_config]
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"opened": True, "synchronize": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("number", example_payload["number"]),
|
||||
("pull_request", example_payload["pull_request"]),
|
||||
("pull_request_url", example_payload["pull_request"]["html_url"]),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
yield from super().run(input_data, **kwargs)
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "number", input_data.payload["number"]
|
||||
yield "pull_request", input_data.payload["pull_request"]
|
||||
yield "pull_request_url", input_data.payload["pull_request"]["html_url"]
|
||||
|
||||
|
||||
# --8<-- [end:GithubTriggerExample]
|
||||
@@ -1,9 +1,9 @@
|
||||
from typing import Literal
|
||||
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import OAuth2Credentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, OAuth2Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
# --8<-- [start:GoogleOAuthIsConfigured]
|
||||
@@ -13,9 +13,7 @@ GOOGLE_OAUTH_IS_CONFIGURED = bool(
|
||||
)
|
||||
# --8<-- [end:GoogleOAuthIsConfigured]
|
||||
GoogleCredentials = OAuth2Credentials
|
||||
GoogleCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.GOOGLE], Literal["oauth2"]
|
||||
]
|
||||
GoogleCredentialsInput = CredentialsMetaInput[Literal["google"], Literal["oauth2"]]
|
||||
|
||||
|
||||
def GoogleCredentialsField(scopes: list[str]) -> GoogleCredentialsInput:
|
||||
@@ -26,6 +24,8 @@ def GoogleCredentialsField(scopes: list[str]) -> GoogleCredentialsInput:
|
||||
scopes: The authorization scopes needed for the block to work.
|
||||
"""
|
||||
return CredentialsField(
|
||||
provider="google",
|
||||
supported_credential_types={"oauth2"},
|
||||
required_scopes=set(scopes),
|
||||
description="The Google integration requires OAuth2 authentication.",
|
||||
)
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
from typing import Literal
|
||||
|
||||
import googlemaps
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -39,8 +34,12 @@ class Place(BaseModel):
|
||||
class GoogleMapsSearchBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.GOOGLE_MAPS], Literal["api_key"]
|
||||
] = CredentialsField(description="Google Maps API Key")
|
||||
Literal["google_maps"], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
provider="google_maps",
|
||||
supported_credential_types={"api_key"},
|
||||
description="Google Maps API Key",
|
||||
)
|
||||
query: str = SchemaField(
|
||||
description="Search query for local businesses",
|
||||
placeholder="e.g., 'restaurants in New York'",
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class GetRequest:
|
||||
@classmethod
|
||||
def get_request(
|
||||
cls, url: str, headers: Optional[dict] = None, json: bool = False
|
||||
) -> Any:
|
||||
if headers is None:
|
||||
headers = {}
|
||||
response = requests.get(url, headers=headers)
|
||||
return response.json() if json else response.text
|
||||
@@ -1,10 +1,10 @@
|
||||
import json
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class HttpMethod(Enum):
|
||||
@@ -31,14 +31,9 @@ class SendWebRequestBlock(Block):
|
||||
description="The headers to include in the request",
|
||||
default={},
|
||||
)
|
||||
json_format: bool = SchemaField(
|
||||
title="JSON format",
|
||||
description="Whether to send and receive body as JSON",
|
||||
default=True,
|
||||
)
|
||||
body: Any = SchemaField(
|
||||
body: object = SchemaField(
|
||||
description="The body of the request",
|
||||
default=None,
|
||||
default={},
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -56,32 +51,20 @@ class SendWebRequestBlock(Block):
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
body = input_data.body
|
||||
|
||||
if input_data.json_format:
|
||||
if isinstance(body, str):
|
||||
try:
|
||||
# Try to parse as JSON first
|
||||
body = json.loads(body)
|
||||
except json.JSONDecodeError:
|
||||
# If it's not valid JSON and just plain text,
|
||||
# we should send it as plain text instead
|
||||
input_data.json_format = False
|
||||
if isinstance(input_data.body, str):
|
||||
input_data.body = json.loads(input_data.body)
|
||||
|
||||
response = requests.request(
|
||||
input_data.method.value,
|
||||
input_data.url,
|
||||
headers=input_data.headers,
|
||||
json=body if input_data.json_format else None,
|
||||
data=body if not input_data.json_format else None,
|
||||
json=input_data.body,
|
||||
)
|
||||
result = response.json() if input_data.json_format else response.text
|
||||
|
||||
if response.status_code // 100 == 2:
|
||||
yield "response", result
|
||||
yield "response", response.json()
|
||||
elif response.status_code // 100 == 4:
|
||||
yield "client_error", result
|
||||
yield "client_error", response.json()
|
||||
elif response.status_code // 100 == 5:
|
||||
yield "server_error", result
|
||||
yield "server_error", response.json()
|
||||
else:
|
||||
raise ValueError(f"Unexpected status code: {response.status_code}")
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
HubSpotCredentials = APIKeyCredentials
|
||||
HubSpotCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.HUBSPOT],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
|
||||
def HubSpotCredentialsField() -> HubSpotCredentialsInput:
|
||||
"""Creates a HubSpot credentials input on a block."""
|
||||
return CredentialsField(
|
||||
description="The HubSpot integration requires an API Key.",
|
||||
)
|
||||
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="hubspot",
|
||||
api_key=SecretStr("mock-hubspot-api-key"),
|
||||
title="Mock HubSpot API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
from backend.blocks.hubspot._auth import (
|
||||
HubSpotCredentials,
|
||||
HubSpotCredentialsField,
|
||||
HubSpotCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class HubSpotCompanyBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
|
||||
operation: str = SchemaField(
|
||||
description="Operation to perform (create, update, get)", default="get"
|
||||
)
|
||||
company_data: dict = SchemaField(
|
||||
description="Company data for create/update operations", default={}
|
||||
)
|
||||
domain: str = SchemaField(
|
||||
description="Company domain for get/update operations", default=""
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
company: dict = SchemaField(description="Company information")
|
||||
status: str = SchemaField(description="Operation status")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3ae02219-d540-47cd-9c78-3ad6c7d9820a",
|
||||
description="Manages HubSpot companies - create, update, and retrieve company information",
|
||||
categories={BlockCategory.CRM},
|
||||
input_schema=HubSpotCompanyBlock.Input,
|
||||
output_schema=HubSpotCompanyBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
if input_data.operation == "create":
|
||||
response = requests.post(
|
||||
base_url, headers=headers, json={"properties": input_data.company_data}
|
||||
)
|
||||
result = response.json()
|
||||
yield "company", result
|
||||
yield "status", "created"
|
||||
|
||||
elif input_data.operation == "get":
|
||||
search_url = f"{base_url}/search"
|
||||
search_data = {
|
||||
"filterGroups": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"propertyName": "domain",
|
||||
"operator": "EQ",
|
||||
"value": input_data.domain,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
response = requests.post(search_url, headers=headers, json=search_data)
|
||||
result = response.json()
|
||||
yield "company", result.get("results", [{}])[0]
|
||||
yield "status", "retrieved"
|
||||
|
||||
elif input_data.operation == "update":
|
||||
# First get company ID by domain
|
||||
search_response = requests.post(
|
||||
f"{base_url}/search",
|
||||
headers=headers,
|
||||
json={
|
||||
"filterGroups": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"propertyName": "domain",
|
||||
"operator": "EQ",
|
||||
"value": input_data.domain,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
company_id = search_response.json().get("results", [{}])[0].get("id")
|
||||
|
||||
if company_id:
|
||||
response = requests.patch(
|
||||
f"{base_url}/{company_id}",
|
||||
headers=headers,
|
||||
json={"properties": input_data.company_data},
|
||||
)
|
||||
result = response.json()
|
||||
yield "company", result
|
||||
yield "status", "updated"
|
||||
else:
|
||||
yield "company", {}
|
||||
yield "status", "company_not_found"
|
||||
@@ -1,106 +0,0 @@
|
||||
from backend.blocks.hubspot._auth import (
|
||||
HubSpotCredentials,
|
||||
HubSpotCredentialsField,
|
||||
HubSpotCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class HubSpotContactBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
|
||||
operation: str = SchemaField(
|
||||
description="Operation to perform (create, update, get)", default="get"
|
||||
)
|
||||
contact_data: dict = SchemaField(
|
||||
description="Contact data for create/update operations", default={}
|
||||
)
|
||||
email: str = SchemaField(
|
||||
description="Email address for get/update operations", default=""
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
contact: dict = SchemaField(description="Contact information")
|
||||
status: str = SchemaField(description="Operation status")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5267326e-c4c1-4016-9f54-4e72ad02f813",
|
||||
description="Manages HubSpot contacts - create, update, and retrieve contact information",
|
||||
categories={BlockCategory.CRM},
|
||||
input_schema=HubSpotContactBlock.Input,
|
||||
output_schema=HubSpotContactBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
base_url = "https://api.hubapi.com/crm/v3/objects/contacts"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
if input_data.operation == "create":
|
||||
response = requests.post(
|
||||
base_url, headers=headers, json={"properties": input_data.contact_data}
|
||||
)
|
||||
result = response.json()
|
||||
yield "contact", result
|
||||
yield "status", "created"
|
||||
|
||||
elif input_data.operation == "get":
|
||||
# Search for contact by email
|
||||
search_url = f"{base_url}/search"
|
||||
search_data = {
|
||||
"filterGroups": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"propertyName": "email",
|
||||
"operator": "EQ",
|
||||
"value": input_data.email,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
response = requests.post(search_url, headers=headers, json=search_data)
|
||||
result = response.json()
|
||||
yield "contact", result.get("results", [{}])[0]
|
||||
yield "status", "retrieved"
|
||||
|
||||
elif input_data.operation == "update":
|
||||
search_response = requests.post(
|
||||
f"{base_url}/search",
|
||||
headers=headers,
|
||||
json={
|
||||
"filterGroups": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"propertyName": "email",
|
||||
"operator": "EQ",
|
||||
"value": input_data.email,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
contact_id = search_response.json().get("results", [{}])[0].get("id")
|
||||
|
||||
if contact_id:
|
||||
response = requests.patch(
|
||||
f"{base_url}/{contact_id}",
|
||||
headers=headers,
|
||||
json={"properties": input_data.contact_data},
|
||||
)
|
||||
result = response.json()
|
||||
yield "contact", result
|
||||
yield "status", "updated"
|
||||
else:
|
||||
yield "contact", {}
|
||||
yield "status", "contact_not_found"
|
||||
@@ -1,121 +0,0 @@
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from backend.blocks.hubspot._auth import (
|
||||
HubSpotCredentials,
|
||||
HubSpotCredentialsField,
|
||||
HubSpotCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class HubSpotEngagementBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
|
||||
operation: str = SchemaField(
|
||||
description="Operation to perform (send_email, track_engagement)",
|
||||
default="send_email",
|
||||
)
|
||||
email_data: dict = SchemaField(
|
||||
description="Email data including recipient, subject, content",
|
||||
default={},
|
||||
)
|
||||
contact_id: str = SchemaField(
|
||||
description="Contact ID for engagement tracking", default=""
|
||||
)
|
||||
timeframe_days: int = SchemaField(
|
||||
description="Number of days to look back for engagement",
|
||||
default=30,
|
||||
optional=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: dict = SchemaField(description="Operation result")
|
||||
status: str = SchemaField(description="Operation status")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c6524385-7d87-49d6-a470-248bd29ca765",
|
||||
description="Manages HubSpot engagements - sends emails and tracks engagement metrics",
|
||||
categories={BlockCategory.CRM, BlockCategory.COMMUNICATION},
|
||||
input_schema=HubSpotEngagementBlock.Input,
|
||||
output_schema=HubSpotEngagementBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
base_url = "https://api.hubapi.com"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
if input_data.operation == "send_email":
|
||||
# Using the email send API
|
||||
email_url = f"{base_url}/crm/v3/objects/emails"
|
||||
email_data = {
|
||||
"properties": {
|
||||
"hs_timestamp": datetime.now().isoformat(),
|
||||
"hubspot_owner_id": "1", # This should be configurable
|
||||
"hs_email_direction": "OUTBOUND",
|
||||
"hs_email_status": "SEND",
|
||||
"hs_email_subject": input_data.email_data.get("subject"),
|
||||
"hs_email_text": input_data.email_data.get("content"),
|
||||
"hs_email_to_email": input_data.email_data.get("recipient"),
|
||||
}
|
||||
}
|
||||
|
||||
response = requests.post(email_url, headers=headers, json=email_data)
|
||||
result = response.json()
|
||||
yield "result", result
|
||||
yield "status", "email_sent"
|
||||
|
||||
elif input_data.operation == "track_engagement":
|
||||
# Get engagement events for the contact
|
||||
from_date = datetime.now() - timedelta(days=input_data.timeframe_days)
|
||||
engagement_url = (
|
||||
f"{base_url}/crm/v3/objects/contacts/{input_data.contact_id}/engagement"
|
||||
)
|
||||
|
||||
params = {"limit": 100, "after": from_date.isoformat()}
|
||||
|
||||
response = requests.get(engagement_url, headers=headers, params=params)
|
||||
engagements = response.json()
|
||||
|
||||
# Process engagement metrics
|
||||
metrics = {
|
||||
"email_opens": 0,
|
||||
"email_clicks": 0,
|
||||
"email_replies": 0,
|
||||
"last_engagement": None,
|
||||
"engagement_score": 0,
|
||||
}
|
||||
|
||||
for engagement in engagements.get("results", []):
|
||||
eng_type = engagement.get("properties", {}).get("hs_engagement_type")
|
||||
if eng_type == "EMAIL":
|
||||
metrics["email_opens"] += 1
|
||||
elif eng_type == "EMAIL_CLICK":
|
||||
metrics["email_clicks"] += 1
|
||||
elif eng_type == "EMAIL_REPLY":
|
||||
metrics["email_replies"] += 1
|
||||
|
||||
# Update last engagement time
|
||||
eng_time = engagement.get("properties", {}).get("hs_timestamp")
|
||||
if eng_time and (
|
||||
not metrics["last_engagement"]
|
||||
or eng_time > metrics["last_engagement"]
|
||||
):
|
||||
metrics["last_engagement"] = eng_time
|
||||
|
||||
# Calculate simple engagement score
|
||||
metrics["engagement_score"] = (
|
||||
metrics["email_opens"]
|
||||
+ metrics["email_clicks"] * 2
|
||||
+ metrics["email_replies"] * 3
|
||||
)
|
||||
|
||||
yield "result", metrics
|
||||
yield "status", "engagement_tracked"
|
||||
@@ -1,18 +1,12 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Literal, Optional
|
||||
|
||||
import requests
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.request import requests
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, SchemaField
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -84,10 +78,13 @@ class UpscaleOption(str, Enum):
|
||||
|
||||
class IdeogramModelBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.IDEOGRAM], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="The Ideogram integration can be used with any API key with sufficient permissions for the blocks it is used on.",
|
||||
|
||||
credentials: CredentialsMetaInput[Literal["ideogram"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="ideogram",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Ideogram integration can be used with any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="Text prompt for image generation",
|
||||
@@ -151,7 +148,7 @@ class IdeogramModelBlock(Block):
|
||||
super().__init__(
|
||||
id="6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
description="This block runs Ideogram models with both simple and advanced settings.",
|
||||
categories={BlockCategory.AI, BlockCategory.MULTIMEDIA},
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=IdeogramModelBlock.Input,
|
||||
output_schema=IdeogramModelBlock.Output,
|
||||
test_input={
|
||||
@@ -245,8 +242,9 @@ class IdeogramModelBlock(Block):
|
||||
|
||||
try:
|
||||
response = requests.post(url, json=data, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()["data"][0]["url"]
|
||||
except RequestException as e:
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise Exception(f"Failed to fetch image: {str(e)}")
|
||||
|
||||
def upscale_image(self, api_key: SecretStr, image_url: str):
|
||||
@@ -258,6 +256,7 @@ class IdeogramModelBlock(Block):
|
||||
try:
|
||||
# Step 1: Download the image from the provided URL
|
||||
image_response = requests.get(image_url)
|
||||
image_response.raise_for_status()
|
||||
|
||||
# Step 2: Send the downloaded image to the upscale API
|
||||
files = {
|
||||
@@ -273,7 +272,8 @@ class IdeogramModelBlock(Block):
|
||||
files=files,
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
return response.json()["data"][0]["url"]
|
||||
|
||||
except RequestException as e:
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise Exception(f"Failed to upscale image: {str(e)}")
|
||||
|
||||
@@ -2,28 +2,13 @@ from typing import Any
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.json import json
|
||||
|
||||
|
||||
class StepThroughItemsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
items: list = SchemaField(
|
||||
advanced=False,
|
||||
items: list | dict = SchemaField(
|
||||
description="The list or dictionary of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
|
||||
default=[],
|
||||
)
|
||||
items_object: dict = SchemaField(
|
||||
advanced=False,
|
||||
description="The list or dictionary of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
|
||||
default={},
|
||||
)
|
||||
items_str: str = SchemaField(
|
||||
advanced=False,
|
||||
description="The list or dictionary of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
|
||||
default="",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -54,20 +39,14 @@ class StepThroughItemsBlock(Block):
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
for data in [input_data.items, input_data.items_object, input_data.items_str]:
|
||||
if not data:
|
||||
continue
|
||||
if isinstance(data, str):
|
||||
items = json.loads(data)
|
||||
else:
|
||||
items = data
|
||||
if isinstance(items, dict):
|
||||
# If items is a dictionary, iterate over its values
|
||||
for item in items.values():
|
||||
yield "item", item
|
||||
yield "key", item
|
||||
else:
|
||||
# If items is a list, iterate over the list
|
||||
for index, item in enumerate(items):
|
||||
yield "item", item
|
||||
yield "key", index
|
||||
items = input_data.items
|
||||
if isinstance(items, dict):
|
||||
# If items is a dictionary, iterate over its values
|
||||
for item in items.values():
|
||||
yield "item", item
|
||||
yield "key", item
|
||||
else:
|
||||
# If items is a list, iterate over the list
|
||||
for index, item in enumerate(items):
|
||||
yield "item", item
|
||||
yield "key", index
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
from typing import Literal
|
||||
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import APIKeyCredentials
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput
|
||||
|
||||
JinaCredentials = APIKeyCredentials
|
||||
JinaCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.JINA],
|
||||
Literal["jina"],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
@@ -18,6 +18,8 @@ def JinaCredentialsField() -> JinaCredentialsInput:
|
||||
|
||||
"""
|
||||
return CredentialsField(
|
||||
provider="jina",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Jina integration can be used with an API Key.",
|
||||
)
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import requests
|
||||
|
||||
from backend.blocks.jina._auth import (
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
@@ -5,7 +7,6 @@ from backend.blocks.jina._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class JinaChunkingBlock(Block):
|
||||
@@ -56,6 +57,7 @@ class JinaChunkingBlock(Block):
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
all_chunks.extend(result.get("chunks", []))
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import requests
|
||||
|
||||
from backend.blocks.jina._auth import (
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
@@ -5,7 +7,6 @@ from backend.blocks.jina._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class JinaEmbeddingBlock(Block):
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
|
||||
from backend.blocks.jina._auth import (
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
JinaCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class FactCheckerBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
statement: str = SchemaField(
|
||||
description="The statement to check for factuality"
|
||||
)
|
||||
credentials: JinaCredentialsInput = JinaCredentialsField()
|
||||
|
||||
class Output(BlockSchema):
|
||||
factuality: float = SchemaField(
|
||||
description="The factuality score of the statement"
|
||||
)
|
||||
result: bool = SchemaField(description="The result of the factuality check")
|
||||
reason: str = SchemaField(description="The reason for the factuality result")
|
||||
error: str = SchemaField(description="Error message if the check fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d38b6c5e-9968-4271-8423-6cfe60d6e7e6",
|
||||
description="This block checks the factuality of a given statement using Jina AI's Grounding API.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=FactCheckerBlock.Input,
|
||||
output_schema=FactCheckerBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
encoded_statement = quote(input_data.statement)
|
||||
url = f"https://g.jina.ai/{encoded_statement}"
|
||||
|
||||
headers = {
|
||||
"Accept": "application/json",
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
if "data" in data:
|
||||
data = data["data"]
|
||||
yield "factuality", data["factuality"]
|
||||
yield "result", data["result"]
|
||||
yield "reason", data["reason"]
|
||||
else:
|
||||
raise RuntimeError(f"Expected 'data' key not found in response: {data}")
|
||||
@@ -1,107 +0,0 @@
|
||||
from groq._utils._utils import quote
|
||||
|
||||
from backend.blocks.jina._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
JinaCredentialsInput,
|
||||
)
|
||||
from backend.blocks.search import GetRequest
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class SearchTheWebBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
credentials: JinaCredentialsInput = JinaCredentialsField()
|
||||
query: str = SchemaField(description="The search query to search the web for")
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: str = SchemaField(
|
||||
description="The search results including content from top 5 URLs"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the search fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="87840993-2053-44b7-8da4-187ad4ee518c",
|
||||
description="This block searches the internet for the given search query.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchTheWebBlock.Input,
|
||||
output_schema=SearchTheWebBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"query": "Artificial Intelligence",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=("results", "search content"),
|
||||
test_mock={"get_request": lambda *args, **kwargs: "search content"},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Encode the search query
|
||||
encoded_query = quote(input_data.query)
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
|
||||
# Prepend the Jina Search URL to the encoded query
|
||||
jina_search_url = f"https://s.jina.ai/{encoded_query}"
|
||||
results = self.get_request(jina_search_url, headers=headers, json=False)
|
||||
|
||||
# Output the search results
|
||||
yield "results", results
|
||||
|
||||
|
||||
class ExtractWebsiteContentBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
credentials: JinaCredentialsInput = JinaCredentialsField()
|
||||
url: str = SchemaField(description="The URL to scrape the content from")
|
||||
raw_content: bool = SchemaField(
|
||||
default=False,
|
||||
title="Raw Content",
|
||||
description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
content: str = SchemaField(description="The scraped content from the given URL")
|
||||
error: str = SchemaField(
|
||||
description="Error message if the content cannot be retrieved"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="436c3984-57fd-4b85-8e9a-459b356883bd",
|
||||
description="This block scrapes the content from the given web URL.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=ExtractWebsiteContentBlock.Input,
|
||||
output_schema=ExtractWebsiteContentBlock.Output,
|
||||
test_input={
|
||||
"url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=("content", "scraped content"),
|
||||
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
if input_data.raw_content:
|
||||
url = input_data.url
|
||||
headers = {}
|
||||
else:
|
||||
url = f"https://r.jina.ai/{input_data.url}"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
|
||||
content = self.get_request(url, json=False, headers=headers)
|
||||
yield "content", content
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user