Compare commits

..

40 Commits

Author SHA1 Message Date
Aarushi
c51b8e7291 attempt to fix 2025-01-08 13:54:28 +00:00
Aarushi
f9ae76123a fix lock 2025-01-08 13:34:12 +00:00
Aarushi
281ae65dcb Merge branch 'dev' into twitter-integration 2025-01-08 13:29:54 +00:00
Aarushi
bd815fc9d7 udpate poetry lock 2025-01-08 13:24:39 +00:00
Nicholas Tindle
cdaa2ee456 Merge branch 'dev' into feature/twitter-integration 2025-01-08 00:16:00 -06:00
Nicholas Tindle
70dfaf1ef5 fix: lock 2025-01-07 22:59:46 -06:00
Nicholas Tindle
565774f77a Discard changes to autogpt_platform/frontend/yarn.lock 2025-01-07 22:45:01 -06:00
Nicholas Tindle
4c6dd35310 Merge remote-tracking branch 'origin/dev' into pr/8754 2025-01-07 12:49:31 -06:00
abhi1992002
632a39e877 upadate twitter documentation 2025-01-03 13:02:25 +05:30
abhi1992002
e297eff27c remove multi select 2025-01-03 12:50:24 +05:30
abhi1992002
57aa6745da update twitter env variable comment 2025-01-03 12:48:12 +05:30
abhi1992002
fb9d42f466 fix formatting 2025-01-03 12:35:03 +05:30
abhi1992002
eb25e731fc revert img change 2025-01-03 12:22:16 +05:30
abhi1992002
d75c08e348 adding documentation for twitter block 2025-01-03 11:45:53 +05:30
abhi1992002
428c012a43 write documentation for oneOf 2025-01-02 16:50:19 +05:30
abhi1992002
4fe135e472 add support for oneOf and optional oneOf 2025-01-02 16:24:00 +05:30
abhi1992002
1be3a29dc0 fix yarn lock 2025-01-02 11:01:35 +05:30
abhi1992002
e8ae8ccd6d fix backend tests 2025-01-02 11:01:35 +05:30
abhi1992002
4aa36fda55 fix expansions in tweet block 2025-01-02 11:01:35 +05:30
abhi1992002
2e19f3e9e2 fix tweet blocks expansions 2025-01-02 11:01:35 +05:30
abhi1992002
a85f671237 1. add optional datetime and multi select support 2025-01-02 11:01:27 +05:30
Nicholas Tindle
6c3a401ceb fix: linting came back with a vengence 2025-01-02 11:01:08 +05:30
Nicholas Tindle
949139ed7a fix: credentials issues 2025-01-02 11:01:08 +05:30
Nicholas Tindle
5bdf541bce fix: only add tweepy (and down bumb related :() 2025-01-02 11:01:08 +05:30
Nicholas Tindle
a1a3f9e179 fix: project problems 2025-01-02 11:01:00 +05:30
Nicholas Tindle
d14045c7b7 Discard changes to autogpt_platform/frontend/yarn.lock 2025-01-02 11:00:30 +05:30
Nicholas Tindle
84c30be37d fix: formatting 2025-01-02 11:00:30 +05:30
abhi
bc0aab9c73 fix:oauth2 2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
19b6dfd0f7 Update credentials-input.tsx 2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
e85f593dab Update credentials-provider.tsx 2025-01-02 11:00:30 +05:30
abhi
5200250ffb fix multiselect 2025-01-02 11:00:30 +05:30
abhi
8eba862723 fix multiselect 2025-01-02 11:00:30 +05:30
abhi
240e030a36 initial changes 2025-01-02 11:00:30 +05:30
abhi1992002
8782caf39a refactor: Enhance Supabase integration and Twitter OAuth handling
- Updated `store.py` to improve state token management by adding PKCE support and simplifying code challenge generation.
- Modified environment variable names in `.env.example` for consistency.
- Removed unnecessary `is_multi_select` attributes from various Twitter-related input schemas to streamline the code.
- Cleaned up exception handling in Twitter list management and tweet management blocks by removing redundant error logging.
- Removed debug print statements from various components to clean up the codebase.
- Fixed a minor error message in the Twitter OAuth handler for clarity.
2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
779cec003c Update pyproject.toml 2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
5a48f6cec4 Update credentials-input.tsx 2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
e7056e5642 Update credentials-provider.tsx 2025-01-02 11:00:30 +05:30
abhi1992002
ee0a75027a fix: test 2025-01-02 11:00:30 +05:30
abhi1992002
1fc5a7beae fix linting 2025-01-02 11:00:30 +05:30
abhi1992002
c4f77d4074 add twitter credentials with some frontend changes
# Conflicts:
#	autogpt_platform/backend/backend/data/model.py
#	autogpt_platform/backend/pyproject.toml
#	autogpt_platform/frontend/src/components/integrations/credentials-input.tsx
2025-01-02 11:00:30 +05:30
1354 changed files with 37013 additions and 140611 deletions

View File

@@ -1,18 +0,0 @@
version = 1
test_patterns = ["**/*.spec.ts","**/*_test.py","**/*_tests.py","**/test_*.py"]
exclude_patterns = ["classic/**"]
[[analyzers]]
name = "javascript"
[analyzers.meta]
plugins = ["react"]
environment = ["nodejs"]
[[analyzers]]
name = "python"
[analyzers.meta]
runtime_version = "3.x.x"

View File

@@ -27,7 +27,7 @@
!autogpt_platform/frontend/src/
!autogpt_platform/frontend/public/
!autogpt_platform/frontend/package.json
!autogpt_platform/frontend/pnpm-lock.yaml
!autogpt_platform/frontend/yarn.lock
!autogpt_platform/frontend/tsconfig.json
!autogpt_platform/frontend/README.md
## config

View File

@@ -10,19 +10,17 @@ updates:
commit-message:
prefix: "chore(libs/deps)"
prefix-development: "chore(libs/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# backend (Poetry project)
- package-ecosystem: "pip"
@@ -34,19 +32,17 @@ updates:
commit-message:
prefix: "chore(backend/deps)"
prefix-development: "chore(backend/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# frontend (Next.js project)
- package-ecosystem: "npm"
@@ -62,13 +58,13 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# infra (Terraform)
- package-ecosystem: "terraform"
@@ -85,13 +81,14 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# GitHub Actions
- package-ecosystem: "github-actions"
@@ -104,13 +101,14 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# Docker
- package-ecosystem: "docker"
@@ -123,16 +121,40 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# Submodules
- package-ecosystem: "gitsubmodule"
directory: "autogpt_platform/supabase"
schedule:
interval: "weekly"
open-pull-requests-limit: 1
target-branch: "dev"
commit-message:
prefix: "chore(platform/deps)"
prefix-development: "chore(platform/deps-dev)"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# Docs
- package-ecosystem: "pip"
- package-ecosystem: 'pip'
directory: "docs/"
schedule:
interval: "weekly"
@@ -144,10 +166,10 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"

5
.github/labeler.yml vendored
View File

@@ -24,9 +24,8 @@ platform/frontend:
platform/backend:
- changed-files:
- all-globs-to-any-file:
- autogpt_platform/backend/**
- '!autogpt_platform/backend/backend/blocks/**'
- any-glob-to-any-file: autogpt_platform/backend/**
- all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**'
platform/blocks:
- changed-files:

View File

@@ -115,7 +115,6 @@ jobs:
poetry run pytest -vv \
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
tests/unit tests/integration
env:
CI: true
@@ -125,14 +124,8 @@ jobs:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: autogpt-agent,${{ runner.os }}

View File

@@ -87,20 +87,13 @@ jobs:
poetry run pytest -vv \
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
tests
env:
CI: true
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: agbenchmark,${{ runner.os }}

View File

@@ -139,7 +139,6 @@ jobs:
poetry run pytest -vv \
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
forge
env:
CI: true
@@ -149,14 +148,8 @@ jobs:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: forge,${{ runner.os }}

View File

@@ -1,47 +0,0 @@
name: Claude Code
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
issues:
types: [opened, assigned]
pull_request_review:
types: [submitted]
jobs:
claude:
if: |
(
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
) && (
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR' ||
github.event.review.author_association == 'OWNER' ||
github.event.review.author_association == 'MEMBER' ||
github.event.review.author_association == 'COLLABORATOR' ||
github.event.issue.author_association == 'OWNER' ||
github.event.issue.author_association == 'MEMBER' ||
github.event.issue.author_association == 'COLLABORATOR'
)
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
issues: read
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@beta
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}

View File

@@ -34,7 +34,6 @@ jobs:
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:

View File

@@ -36,7 +36,6 @@ jobs:
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:
needs: migrate

View File

@@ -32,7 +32,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.11"]
python-version: ["3.10"]
runs-on: ubuntu-latest
services:
@@ -42,31 +42,6 @@ jobs:
REDIS_PASSWORD: testpassword
ports:
- 6379:6379
rabbitmq:
image: rabbitmq:3.12-management
ports:
- 5672:5672
- 15672:15672
env:
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
clamav:
image: clamav/clamav-debian:latest
ports:
- 3310:3310
env:
CLAMAV_NO_FRESHCLAMD: false
CLAMD_CONF_StreamMaxLength: 50M
CLAMD_CONF_MaxFileSize: 100M
CLAMD_CONF_MaxScanSize: 100M
CLAMD_CONF_MaxThreads: 4
CLAMD_CONF_ReadTimeout: 300
options: >-
--health-cmd "clamdscan --version || exit 1"
--health-interval 30s
--health-timeout 10s
--health-retries 5
--health-start-period 180s
steps:
- name: Checkout repository
@@ -83,7 +58,7 @@ jobs:
- name: Setup Supabase
uses: supabase/setup-cli@v1
with:
version: 1.178.1
version: latest
- id: get_date
name: Get date
@@ -97,35 +72,18 @@ jobs:
- name: Install Poetry (Unix)
run: |
# Extract Poetry version from backend/poetry.lock
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
if [ -n "$BASE_REF" ]; then
BASE_BRANCH=${BASE_REF/refs\/heads\//}
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
else
POETRY_VERSION=$HEAD_POETRY_VERSION
fi
echo "Using Poetry version ${POETRY_VERSION}"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$POETRY_VERSION python3 -
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
env:
BASE_REF: ${{ github.base_ref || github.event.merge_group.base_ref }}
- name: Check poetry.lock
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
if ! git diff --quiet poetry.lock; then
echo "Error: poetry.lock not up to date."
echo
git diff poetry.lock
@@ -148,40 +106,10 @@ jobs:
# outputs:
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
- name: Wait for ClamAV to be ready
run: |
echo "Waiting for ClamAV daemon to start..."
max_attempts=60
attempt=0
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
sleep 5
attempt=$((attempt+1))
done
if [ $attempt -eq $max_attempts ]; then
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
echo "Checking ClamAV service logs..."
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
fi
echo "ClamAV is ready!"
# Verify ClamAV is responsive
echo "Testing ClamAV connection..."
timeout 10 bash -c 'echo "PING" | nc localhost 3310' || {
echo "ClamAV is not responding to PING"
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
}
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
- id: lint
name: Run Linter
@@ -190,22 +118,20 @@ jobs:
- name: Run pytest with coverage
run: |
if [[ "${{ runner.debug }}" == "1" ]]; then
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
else
poetry run pytest -s -vv
poetry run pytest -s -vv test
fi
if: success() || (failure() && steps.lint.outcome == 'failure')
env:
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
REDIS_HOST: "localhost"
REDIS_PORT: "6379"
REDIS_PASSWORD: "testpassword"
ENCRYPTION_KEY: "dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=" # DO NOT USE IN PRODUCTION!!
REDIS_HOST: 'localhost'
REDIS_PORT: '6379'
REDIS_PASSWORD: 'testpassword'
env:
CI: true
@@ -213,13 +139,6 @@ jobs:
RUN_ENV: local
PORT: 8080
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
# We know these are here, don't report this as a security vulnerability
# This is used as the default credential for the entire system's RabbitMQ instance
# If you want to replace this, you can do so by making our entire system generate
# new credentials for each local user and update the environment variables in
# the backend service, docker composes, and examples
RABBITMQ_DEFAULT_USER: "rabbitmq_user_default"
RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7"
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4

View File

@@ -1,198 +0,0 @@
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
on:
pull_request:
types: [closed]
issue_comment:
types: [created]
permissions:
issues: write
pull-requests: write
jobs:
dispatch:
runs-on: ubuntu-latest
steps:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
uses: actions/github-script@v7
with:
script: |
const commentBody = context.payload.comment.body.trim();
const commentUser = context.payload.comment.user.login;
const prAuthor = context.payload.issue.user.login;
const authorAssociation = context.payload.comment.author_association;
// Check permissions
const hasPermission = (
authorAssociation === 'OWNER' ||
authorAssociation === 'MEMBER' ||
authorAssociation === 'COLLABORATOR'
);
core.setOutput('comment_body', commentBody);
core.setOutput('has_permission', hasPermission);
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
core.setOutput('permission_denied', 'true');
return;
}
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
return;
}
// Process deploy command
if (commentBody === '!deploy') {
core.setOutput('should_deploy', 'true');
}
// Process undeploy command
else if (commentBody === '!undeploy') {
core.setOutput('should_undeploy', 'true');
}
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
});
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number
});
core.setOutput('pr_number', pr.data.number);
core.setOutput('pr_title', pr.data.title);
core.setOutput('pr_state', pr.data.state);
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "deploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
});
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
});
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
uses: actions/github-script@v7
with:
script: |
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
let lastDeployIndex = -1;
let lastUndeployIndex = -1;
comments.data.forEach((comment, index) => {
if (comment.body.trim() === '!deploy') {
lastDeployIndex = index;
} else if (comment.body.trim() === '!undeploy') {
lastUndeployIndex = index;
}
});
// Should undeploy if there's a !deploy without a subsequent !undeploy
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
core.setOutput('should_undeploy', shouldUndeploy);
- name: Dispatch Undeploy Event (PR closed with active deployment)
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}
- name: Post PR close undeploy comment
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
});

View File

@@ -18,141 +18,27 @@ defaults:
working-directory: autogpt_platform/frontend
jobs:
setup:
runs-on: ubuntu-latest
outputs:
cache-key: ${{ steps.cache-key.outputs.key }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Generate cache key
id: cache-key
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
lint:
runs-on: ubuntu-latest
needs: setup
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
run: |
yarn install --frozen-lockfile
- name: Run lint
run: pnpm lint
type-check:
runs-on: ubuntu-latest
needs: setup
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Run tsc check
run: pnpm type-check
chromatic:
runs-on: ubuntu-latest
needs: setup
# Only run on dev branch pushes or PRs targeting dev
if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Run Chromatic
uses: chromaui/action@latest
with:
projectToken: chpt_9e7c1a76478c9c8
onlyChanged: true
workingDir: autogpt_platform/frontend
token: ${{ secrets.GITHUB_TOKEN }}
exitOnceUploaded: true
run: |
yarn lint
test:
runs-on: ubuntu-latest
needs: setup
strategy:
fail-fast: false
matrix:
@@ -169,18 +55,15 @@ jobs:
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
large-packages: false # slow
docker-images: false # limited benefit
large-packages: false # slow
docker-images: false # limited benefit
- name: Copy default supabase .env
run: |
cp ../.env.example ../.env
cp ../supabase/docker/.env.example ../.env
- name: Copy backend .env
run: |
@@ -190,35 +73,25 @@ jobs:
run: |
docker compose -f ../docker-compose.yml up -d
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
run: |
yarn install --frozen-lockfile
- name: Setup .env
run: cp .env.example .env
- name: Build frontend
run: pnpm build --turbo
# uses Turbopack, much faster and safe enough for a test pipeline
- name: Setup Builder .env
run: |
cp .env.example .env
- name: Install Browser '${{ matrix.browser }}'
run: pnpm playwright install --with-deps ${{ matrix.browser }}
run: yarn playwright install --with-deps ${{ matrix.browser }}
- name: Run Playwright tests
run: pnpm test:no-build --project=${{ matrix.browser }}
env:
BROWSER_TYPE: ${{ matrix.browser }}
- name: Run tests
run: |
yarn test --project=${{ matrix.browser }}
- name: Print Final Docker Compose logs
if: always()
run: docker compose -f ../docker-compose.yml logs
- name: Print Docker Compose logs in debug mode
if: runner.debug
run: |
docker compose -f ../docker-compose.yml logs
- uses: actions/upload-artifact@v4
if: ${{ !cancelled() }}

View File

@@ -16,7 +16,7 @@ jobs:
# operations-per-run: 5000
stale-issue-message: >
This issue has automatically been marked as _stale_ because it has not had
any activity in the last 170 days. You can _unstale_ it by commenting or
any activity in the last 50 days. You can _unstale_ it by commenting or
removing the label. Otherwise, this issue will be closed in 10 days.
stale-pr-message: >
This pull request has automatically been marked as _stale_ because it has
@@ -25,7 +25,7 @@ jobs:
close-issue-message: >
This issue was closed automatically because it has been stale for 10 days
with no activity.
days-before-stale: 170
days-before-stale: 50
days-before-close: 10
# Do not touch meta issues:
exempt-issue-labels: meta,fridge,project management

View File

@@ -1,60 +0,0 @@
#!/usr/bin/env python3
import sys
if sys.version_info < (3, 11):
print("Python version 3.11 or higher required")
sys.exit(1)
import tomllib
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
"""Extract package version from poetry.lock file."""
try:
if lockfile_path == "-":
data = tomllib.load(sys.stdin.buffer)
else:
with open(lockfile_path, "rb") as f:
data = tomllib.load(f)
except FileNotFoundError:
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
sys.exit(1)
except tomllib.TOMLDecodeError as e:
print(f"Error parsing TOML file: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(1)
# Look for the package in the packages list
packages = data.get("package", [])
for package in packages:
if package.get("name", "").lower() == package_name.lower():
return package.get("version")
return None
def main():
if len(sys.argv) not in (2, 3):
print(
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
file=sys.stderr,
)
sys.exit(1)
package_name = sys.argv[1]
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
version = get_package_version(package_name, lockfile_path)
if version:
print(version)
else:
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

6
.gitignore vendored
View File

@@ -165,7 +165,7 @@ package-lock.json
# Allow for locally private items
# private
pri*
pri*
# ignore
ig*
.github_access_token
@@ -176,7 +176,3 @@ autogpt_platform/backend/settings.py
*.ign.*
.test-contents
.claude/settings.local.json
api.md
blocks.md

3
.gitmodules vendored
View File

@@ -1,3 +1,6 @@
[submodule "classic/forge/tests/vcr_cassettes"]
path = classic/forge/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
[submodule "autogpt_platform/supabase"]
path = autogpt_platform/supabase
url = https://github.com/supabase/supabase.git

View File

@@ -17,7 +17,7 @@ repos:
name: Detect secrets
description: Detects high entropy strings that are likely to be passwords.
files: ^autogpt_platform/
stages: [pre-push]
stages: [push]
- repo: local
# For proper type checking, all dependencies need to be up-to-date.
@@ -140,7 +140,7 @@ repos:
language: system
- repo: https://github.com/psf/black
rev: 24.10.0
rev: 23.12.1
# Black has sensible defaults, doesn't need package context, and ignores
# everything in .gitignore, so it works fine without any config or arguments.
hooks:
@@ -170,16 +170,6 @@ repos:
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=classic/benchmark/.flake8]
- repo: local
hooks:
- id: prettier
name: Format (Prettier) - AutoGPT Platform - Frontend
alias: format-platform-frontend
entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' --
files: ^autogpt_platform/frontend/
types: [file]
language: system
- repo: local
# To have watertight type checking, we check *all* the files in an affected
# project. To trigger on poetry.lock we also reset the file `types` filter.
@@ -233,46 +223,36 @@ repos:
- repo: local
hooks:
- id: tsc
name: Typecheck - AutoGPT Platform - Frontend
entry: bash -c 'cd autogpt_platform/frontend && pnpm type-check'
files: ^autogpt_platform/frontend/
types: [file]
- id: pytest
name: Run tests - AutoGPT Platform - Backend
alias: pytest-platform-backend
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
# - repo: local
# hooks:
# - id: pytest
# name: Run tests - AutoGPT Platform - Backend
# alias: pytest-platform-backend
# entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
# # include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
# files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
- id: pytest
name: Run tests - Classic - AutoGPT (excl. slow tests)
alias: pytest-classic-autogpt
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
# - id: pytest
# name: Run tests - Classic - AutoGPT (excl. slow tests)
# alias: pytest-classic-autogpt
# entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# # include forge source (since it's a path dependency) but exclude *_test.py files:
# files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
- id: pytest
name: Run tests - Classic - Forge (excl. slow tests)
alias: pytest-classic-forge
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
language: system
pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Forge (excl. slow tests)
# alias: pytest-classic-forge
# entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
# files: ^classic/forge/(forge/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Benchmark
# alias: pytest-classic-benchmark
# entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
# files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false
- id: pytest
name: Run tests - Classic - Benchmark
alias: pytest-classic-benchmark
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

6
.vscode/launch.json vendored
View File

@@ -32,9 +32,9 @@
"type": "debugpy",
"request": "launch",
"module": "backend.app",
"env": {
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY": "YES"
},
// "env": {
// "ENV": "dev"
// },
"envFile": "${workspaceFolder}/backend/.env",
"justMyCode": false,
"cwd": "${workspaceFolder}/autogpt_platform/backend"

View File

@@ -1,53 +0,0 @@
# AutoGPT Platform Contribution Guide
This guide provides context for Codex when updating the **autogpt_platform** folder.
## Directory overview
- `autogpt_platform/backend` FastAPI based backend service.
- `autogpt_platform/autogpt_libs` Shared Python libraries.
- `autogpt_platform/frontend` Next.js + Typescript frontend.
- `autogpt_platform/docker-compose.yml` development stack.
See `docs/content/platform/getting-started.md` for setup instructions.
## Code style
- Format Python code with `poetry run format`.
- Format frontend code using `pnpm format`.
## Testing
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
Always run the relevant linters and tests before committing.
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
Types:
- feat
- fix
- refactor
- ci
- dx (developer experience)
Scopes:
- platform
- platform/library
- platform/marketplace
- backend
- backend/executor
- frontend
- frontend/library
- frontend/marketplace
- blocks
## Pull requests
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
- Rely on the pre-commit checks for linting and formatting
- Fill out the **Changes** section and the checklist.
- Use conventional commit titles with a scope (e.g. `feat(frontend): add feature`).
- Keep out-of-scope changes under 20% of the PR.
- Ensure PR descriptions are complete.
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs

View File

@@ -2,6 +2,9 @@
If you are reading this, you are probably looking for the full **[contribution guide]**,
which is part of our [wiki].
Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
<!-- You can find our immediate priorities and their progress on our public [kanban board]. -->
[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971

View File

@@ -15,38 +15,7 @@
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
### System Requirements
Before proceeding with the installation, ensure your system meets the following requirements:
#### Hardware Requirements
- CPU: 4+ cores recommended
- RAM: Minimum 8GB, 16GB recommended
- Storage: At least 10GB of free space
#### Software Requirements
- Operating Systems:
- Linux (Ubuntu 20.04 or newer recommended)
- macOS (10.15 or newer)
- Windows 10/11 with WSL2
- Required Software (with minimum versions):
- Docker Engine (20.10.0 or newer)
- Docker Compose (2.0.0 or newer)
- Git (2.30 or newer)
- Node.js (16.x or newer)
- npm (8.x or newer)
- VSCode (1.60 or newer) or any modern code editor
#### Network Requirements
- Stable internet connection
- Access to required ports (will be configured in Docker)
- Ability to make outbound HTTPS connections
### Updated Setup Instructions:
We've moved to a fully maintained and regularly updated documentation site.
👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/)
https://github.com/user-attachments/assets/d04273a5-b36a-4a37-818e-f631ce72d603
This tutorial assumes you have Docker, VSCode, git and npm installed.
@@ -179,7 +148,7 @@ Just clone the repo, install dependencies with `./run setup`, and you should be
[![Join us on Discord](https://invidget.switchblade.xyz/autogpt)](https://discord.gg/autogpt)
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn't created an issue for the same topic.
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasnt created an issue for the same topic.
## 🤝 Sister projects
@@ -206,4 +175,4 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
<a href="https://github.com/Significant-Gravitas/AutoGPT/graphs/contributors" alt="View Contributors">
<img src="https://contrib.rocks/image?repo=Significant-Gravitas/AutoGPT&max=1000&columns=10" alt="Contributors" />
</a>
</a>

View File

@@ -20,7 +20,6 @@ Instead, please report them via:
- Please provide detailed reports with reproducible steps
- Include the version/commit hash where you discovered the vulnerability
- Allow us a 90-day security fix window before any public disclosure
- After patch is released, allow 30 days for users to update before public disclosure (for a total of 120 days max between update time and fix time)
- Share any potential mitigations or workarounds if known
## Supported Versions

View File

@@ -1,123 +0,0 @@
############
# Secrets
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
############
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-encryption-key-32-chars-min
############
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
############
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=5432
# default user is postgres
############
# Supavisor -- Database pooler
############
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=your-tenant-id
############
# API Proxy - Configuration for the Kong Reverse proxy.
############
KONG_HTTP_PORT=8000
KONG_HTTPS_PORT=8443
############
# API - Configuration for PostgREST.
############
PGRST_DB_SCHEMAS=public,storage,graphql_public
############
# Auth - Configuration for the GoTrue authentication server.
############
## General
SITE_URL=http://localhost:3000
ADDITIONAL_REDIRECT_URLS=
JWT_EXPIRY=3600
DISABLE_SIGNUP=false
API_EXTERNAL_URL=http://localhost:8000
## Mailer Config
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
## Email auth
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=false
SMTP_ADMIN_EMAIL=admin@example.com
SMTP_HOST=supabase-mail
SMTP_PORT=2500
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
ENABLE_ANONYMOUS_USERS=false
## Phone auth
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
############
# Studio - Configuration for the Dashboard
############
STUDIO_DEFAULT_ORGANIZATION=Default Organization
STUDIO_DEFAULT_PROJECT=Default Project
STUDIO_PORT=3000
# replace if you intend to use Studio outside of localhost
SUPABASE_PUBLIC_URL=http://localhost:8000
# Enable webp support
IMGPROXY_ENABLE_WEBP_DETECTION=true
# Add your OpenAI API key to enable SQL Editor Assistant
OPENAI_API_KEY=
############
# Functions - Configuration for Functions
############
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
FUNCTIONS_VERIFY_JWT=false
############
# Logs - Configuration for Logflare
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
############
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
# Change vector.toml sinks to reflect this change
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
# Docker socket location - this value will differ depending on your OS
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
# Google Cloud Project details
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER

View File

@@ -1,146 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Repository Overview
AutoGPT Platform is a monorepo containing:
- **Backend** (`/backend`): Python FastAPI server with async support
- **Frontend** (`/frontend`): Next.js React application
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
## Essential Commands
### Backend Development
```bash
# Install dependencies
cd backend && poetry install
# Run database migrations
poetry run prisma migrate dev
# Start all services (database, redis, rabbitmq, clamav)
docker compose up -d
# Run the backend server
poetry run serve
# Run tests
poetry run test
# Run specific test
poetry run pytest path/to/test_file.py::test_function_name
# Lint and format
# prefer format if you want to just "fix" it and only get the errors that can't be autofixed
poetry run format # Black + isort
poetry run lint # ruff
```
More details can be found in TESTING.md
#### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Frontend Development
```bash
# Install dependencies
cd frontend && npm install
# Start development server
npm run dev
# Run E2E tests
npm run test
# Run Storybook for component development
npm run storybook
# Build production
npm run build
# Type checking
npm run type-check
```
## Architecture Overview
### Backend Architecture
- **API Layer**: FastAPI with REST and WebSocket endpoints
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
- **Queue System**: RabbitMQ for async task processing
- **Execution Engine**: Separate executor service processes agent workflows
- **Authentication**: JWT-based with Supabase integration
- **Security**: Cache protection middleware prevents sensitive data caching in browsers/proxies
### Frontend Architecture
- **Framework**: Next.js App Router with React Server Components
- **State Management**: React hooks + Supabase client for real-time updates
- **Workflow Builder**: Visual graph editor using @xyflow/react
- **UI Components**: Radix UI primitives with Tailwind CSS styling
- **Feature Flags**: LaunchDarkly integration
### Key Concepts
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
3. **Integrations**: OAuth and API connections stored per user
4. **Store**: Marketplace for sharing agent templates
5. **Virus Scanning**: ClamAV integration for file upload security
### Testing Approach
- Backend uses pytest with snapshot testing for API responses
- Test files are colocated with source files (`*_test.py`)
- Frontend uses Playwright for E2E tests
- Component testing via Storybook
### Database Schema
Key models (defined in `/backend/schema.prisma`):
- `User`: Authentication and profile data
- `AgentGraph`: Workflow definitions with version control
- `AgentGraphExecution`: Execution history and results
- `AgentNode`: Individual nodes in a workflow
- `StoreListing`: Marketplace listings for sharing agents
### Environment Configuration
- Backend: `.env` file in `/backend`
- Frontend: `.env.local` file in `/frontend`
- Both require Supabase credentials and API keys for various services
### Common Development Tasks
**Adding a new block:**
1. Create new file in `/backend/backend/blocks/`
2. Inherit from `Block` base class
3. Define input/output schemas
4. Implement `run` method
5. Register in block registry
6. Generate the block uuid using `uuid.uuid4()`
**Modifying the API:**
1. Update route in `/backend/backend/server/routers/`
2. Add/update Pydantic models in same directory
3. Write tests alongside the route file
4. Run `poetry run test` to verify
**Frontend feature development:**
1. Components go in `/frontend/src/components/`
2. Use existing UI components from `/frontend/src/components/ui/`
3. Add Storybook stories for new components
4. Test with Playwright if user-facing
### Security Implementation
**Cache Protection Middleware:**
- Located in `/backend/backend/server/middleware/security.py`
- Default behavior: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
- Uses an allow list approach - only explicitly permitted paths can be cached
- Cacheable paths include: static assets (`/static/*`, `/_next/static/*`), health checks, public store pages, documentation
- Prevents sensitive data (auth tokens, API keys, user data) from being cached by browsers/proxies
- To allow caching for a new endpoint, add it to `CACHEABLE_PATHS` in the middleware
- Applied to both main API server and external API applications

View File

@@ -15,66 +15,53 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
To run the AutoGPT Platform, follow these steps:
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
```
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
cd AutoGPT/autogpt_platform
```
2. Run the following command:
```
cp .env.example .env
git submodule update --init --recursive
```
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
3. Run the following command:
```
cp supabase/docker/.env.example .env
```
This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables.
4. Run the following command:
```
docker compose up -d
```
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
4. Navigate to `frontend` within the `autogpt_platform` directory:
5. Navigate to `frontend` within the `autogpt_platform` directory:
```
cd frontend
```
You will need to run your frontend application separately on your local machine.
5. Run the following command:
6. Run the following command:
```
cp .env.example .env.local
```
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
6. Run the following command:
Enable corepack and install dependencies by running:
7. Run the following command:
```
corepack enable
pnpm i
npm install
npm run dev
```
This command will install the necessary dependencies and start the frontend application in development mode.
If you are using Yarn, you can run the following commands instead:
```
yarn install && yarn dev
```
Generate the API client (this step is required before running the frontend):
```
pnpm generate:api-client
```
Then start the frontend application in development mode:
```
pnpm dev
```
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
8. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
### Docker Compose Commands
@@ -87,52 +74,43 @@ Here are some useful Docker Compose commands for managing your AutoGPT Platform:
- `docker compose down`: Stop and remove containers, networks, and volumes.
- `docker compose watch`: Watch for changes in your services and automatically update them.
### Sample Scenarios
Here are some common scenarios where you might use multiple Docker Compose commands:
1. Updating and restarting a specific service:
```
docker compose build api_srv
docker compose up -d --no-deps api_srv
```
This rebuilds the `api_srv` service and restarts it without affecting other services.
2. Viewing logs for troubleshooting:
```
docker compose logs -f api_srv ws_srv
```
This shows and follows the logs for both `api_srv` and `ws_srv` services.
3. Scaling a service for increased load:
```
docker compose up -d --scale executor=3
```
This scales the `executor` service to 3 instances to handle increased load.
4. Stopping the entire system for maintenance:
```
docker compose stop
docker compose rm -f
docker compose pull
docker compose up -d
```
This stops all services, removes containers, pulls the latest images, and restarts the system.
5. Developing with live updates:
```
docker compose watch
```
This watches for changes in your code and automatically updates the relevant services.
6. Checking the status of services:
@@ -143,6 +121,7 @@ Here are some common scenarios where you might use multiple Docker Compose comma
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
### Persisting Data
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
@@ -170,27 +149,3 @@ To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml
3. Save the file and run `docker compose up -d` to apply the changes.
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
### API Client Generation
The platform includes scripts for generating and managing the API client:
- `pnpm fetch:openapi`: Fetches the OpenAPI specification from the backend service (requires backend to be running on port 8006)
- `pnpm generate:api-client`: Generates the TypeScript API client from the OpenAPI specification using Orval
- `pnpm generate:api-all`: Runs both fetch and generate commands in sequence
#### Manual API Client Updates
If you need to update the API client after making changes to the backend API:
1. Ensure the backend services are running:
```
docker compose up -d
```
2. Generate the updated API client:
```
pnpm generate:api-all
```
This will fetch the latest OpenAPI specification and regenerate the TypeScript client code.

View File

@@ -1,3 +1,3 @@
# AutoGPT Libs
This is a new project to store shared functionality across different services in the AutoGPT Platform (e.g. authentication)
This is a new project to store shared functionality across different services in NextGen AutoGPT (e.g. authentication)

View File

@@ -31,5 +31,4 @@ class APIKeyManager:
"""Verify if a provided API key matches the stored hash."""
if not provided_key.startswith(self.PREFIX):
return False
provided_hash = hashlib.sha256(provided_key.encode()).hexdigest()
return secrets.compare_digest(provided_hash, stored_hash)
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash

View File

@@ -1,13 +1,14 @@
from .config import Settings
from .depends import requires_admin_user, requires_user
from .jwt_utils import parse_jwt_token
from .middleware import APIKeyValidator, auth_middleware
from .middleware import auth_middleware
from .models import User
__all__ = [
"Settings",
"parse_jwt_token",
"requires_user",
"requires_admin_user",
"APIKeyValidator",
"auth_middleware",
"User",
]

View File

@@ -1,11 +1,14 @@
import os
from dotenv import load_dotenv
load_dotenv()
class Settings:
def __init__(self):
self.JWT_SECRET_KEY: str = os.getenv("SUPABASE_JWT_SECRET", "")
self.ENABLE_AUTH: bool = os.getenv("ENABLE_AUTH", "false").lower() == "true"
self.JWT_ALGORITHM: str = "HS256"
JWT_SECRET_KEY: str = os.getenv("SUPABASE_JWT_SECRET", "")
ENABLE_AUTH: bool = os.getenv("ENABLE_AUTH", "false").lower() == "true"
JWT_ALGORITHM: str = "HS256"
@property
def is_configured(self) -> bool:

View File

@@ -1,6 +1,6 @@
import fastapi
from .config import settings
from .config import Settings
from .middleware import auth_middleware
from .models import DEFAULT_USER_ID, User
@@ -17,7 +17,7 @@ def requires_admin_user(
def verify_user(payload: dict | None, admin_only: bool) -> User:
if not payload:
if settings.ENABLE_AUTH:
if Settings.ENABLE_AUTH:
raise fastapi.HTTPException(
status_code=401, detail="Authorization header is missing"
)

View File

@@ -1,11 +1,7 @@
import inspect
import logging
import secrets
from typing import Any, Callable, Optional
from fastapi import HTTPException, Request, Security
from fastapi.security import APIKeyHeader, HTTPBearer
from starlette.status import HTTP_401_UNAUTHORIZED
from fastapi import HTTPException, Request
from fastapi.security import HTTPBearer
from .config import settings
from .jwt_utils import parse_jwt_token
@@ -17,7 +13,7 @@ logger = logging.getLogger(__name__)
async def auth_middleware(request: Request):
if not settings.ENABLE_AUTH:
# If authentication is disabled, allow the request to proceed
logger.warning("Auth disabled")
logger.warn("Auth disabled")
return {}
security = HTTPBearer()
@@ -33,108 +29,3 @@ async def auth_middleware(request: Request):
except ValueError as e:
raise HTTPException(status_code=401, detail=str(e))
return payload
class APIKeyValidator:
"""
Configurable API key validator that supports custom validation functions
for FastAPI applications.
This class provides a flexible way to implement API key authentication with optional
custom validation logic. It can be used for simple token matching
or more complex validation scenarios like database lookups.
Examples:
Simple token validation:
```python
validator = APIKeyValidator(
header_name="X-API-Key",
expected_token="your-secret-token"
)
@app.get("/protected", dependencies=[Depends(validator.get_dependency())])
def protected_endpoint():
return {"message": "Access granted"}
```
Custom validation with database lookup:
```python
async def validate_with_db(api_key: str):
api_key_obj = await db.get_api_key(api_key)
return api_key_obj if api_key_obj and api_key_obj.is_active else None
validator = APIKeyValidator(
header_name="X-API-Key",
validate_fn=validate_with_db
)
```
Args:
header_name (str): The name of the header containing the API key
expected_token (Optional[str]): The expected API key value for simple token matching
validate_fn (Optional[Callable]): Custom validation function that takes an API key
string and returns a boolean or object. Can be async.
error_status (int): HTTP status code to use for validation errors
error_message (str): Error message to return when validation fails
"""
def __init__(
self,
header_name: str,
expected_token: Optional[str] = None,
validate_fn: Optional[Callable[[str], bool]] = None,
error_status: int = HTTP_401_UNAUTHORIZED,
error_message: str = "Invalid API key",
):
# Create the APIKeyHeader as a class property
self.security_scheme = APIKeyHeader(name=header_name)
self.expected_token = expected_token
self.custom_validate_fn = validate_fn
self.error_status = error_status
self.error_message = error_message
async def default_validator(self, api_key: str) -> bool:
if not self.expected_token:
raise ValueError(
"Expected Token Required to be set when uisng API Key Validator default validation"
)
return secrets.compare_digest(api_key, self.expected_token)
async def __call__(
self, request: Request, api_key: str = Security(APIKeyHeader)
) -> Any:
if api_key is None:
raise HTTPException(status_code=self.error_status, detail="Missing API key")
# Use custom validation if provided, otherwise use default equality check
validator = self.custom_validate_fn or self.default_validator
result = (
await validator(api_key)
if inspect.iscoroutinefunction(validator)
else validator(api_key)
)
if not result:
raise HTTPException(
status_code=self.error_status, detail=self.error_message
)
# Store validation result in request state if it's not just a boolean
if result is not True:
request.state.api_key = result
return result
def get_dependency(self):
"""
Returns a callable dependency that FastAPI will recognize as a security scheme
"""
async def validate_api_key(
request: Request, api_key: str = Security(self.security_scheme)
) -> Any:
return await self(request, api_key)
# This helps FastAPI recognize it as a security dependency
validate_api_key.__name__ = f"validate_{self.security_scheme.model.name}"
return validate_api_key

View File

@@ -13,6 +13,7 @@ from typing_extensions import ParamSpec
from .config import SETTINGS
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
P = ParamSpec("P")
T = TypeVar("T")

View File

@@ -8,7 +8,7 @@ from pydantic import Field, field_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
from .filters import BelowLevelFilter
from .formatters import AGPTFormatter
from .formatters import AGPTFormatter, StructuredLoggingFormatter
LOG_DIR = Path(__file__).parent.parent.parent.parent / "logs"
LOG_FILE = "activity.log"
@@ -18,7 +18,7 @@ ERROR_LOG_FILE = "error.log"
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s"
DEBUG_LOG_FORMAT = (
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(title)s%(message)s"
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d" " %(title)s%(message)s"
)
@@ -81,26 +81,9 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
"""
config = LoggingConfig()
log_handlers: list[logging.Handler] = []
# Console output handlers
stdout = logging.StreamHandler(stream=sys.stdout)
stdout.setLevel(config.level)
stdout.addFilter(BelowLevelFilter(logging.WARNING))
if config.level == logging.DEBUG:
stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
else:
stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
stderr = logging.StreamHandler()
stderr.setLevel(logging.WARNING)
if config.level == logging.DEBUG:
stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
else:
stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
log_handlers += [stdout, stderr]
# Cloud logging setup
if config.enable_cloud_logging or force_cloud_logging:
import google.cloud.logging
@@ -114,7 +97,28 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
transport=SyncTransport,
)
cloud_handler.setLevel(config.level)
cloud_handler.setFormatter(StructuredLoggingFormatter())
log_handlers.append(cloud_handler)
print("Cloud logging enabled")
else:
# Console output handlers
stdout = logging.StreamHandler(stream=sys.stdout)
stdout.setLevel(config.level)
stdout.addFilter(BelowLevelFilter(logging.WARNING))
if config.level == logging.DEBUG:
stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
else:
stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
stderr = logging.StreamHandler()
stderr.setLevel(logging.WARNING)
if config.level == logging.DEBUG:
stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
else:
stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
log_handlers += [stdout, stderr]
print("Console logging enabled")
# File logging setup
if config.enable_file_logging:
@@ -152,6 +156,7 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
error_log_handler.setLevel(logging.ERROR)
error_log_handler.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True))
log_handlers.append(error_log_handler)
print("File logging enabled")
# Configure the root logger
logging.basicConfig(

View File

@@ -1,6 +1,7 @@
import logging
from colorama import Fore, Style
from google.cloud.logging_v2.handlers import CloudLoggingFilter, StructuredLogHandler
from .utils import remove_color_codes
@@ -79,3 +80,16 @@ class AGPTFormatter(FancyConsoleFormatter):
return remove_color_codes(super().format(record))
else:
return super().format(record)
class StructuredLoggingFormatter(StructuredLogHandler, logging.Formatter):
def __init__(self):
# Set up CloudLoggingFilter to add diagnostic info to the log records
self.cloud_logging_filter = CloudLoggingFilter()
# Init StructuredLogHandler
super().__init__()
def format(self, record: logging.LogRecord) -> str:
self.cloud_logging_filter.filter(record)
return super().format(record)

View File

@@ -2,7 +2,6 @@ import logging
import re
from typing import Any
import uvicorn.config
from colorama import Fore
@@ -26,14 +25,3 @@ def print_attribute(
"color": value_color,
},
)
def generate_uvicorn_config():
"""
Generates a uvicorn logging config that silences uvicorn's default logging and tells it to use the native logging module.
"""
log_config = dict(uvicorn.config.LOGGING_CONFIG)
log_config["loggers"]["uvicorn"] = {"handlers": []}
log_config["loggers"]["uvicorn.error"] = {"handlers": []}
log_config["loggers"]["uvicorn.access"] = {"handlers": []}
return log_config

View File

@@ -1,59 +1,20 @@
import inspect
import threading
from typing import Awaitable, Callable, ParamSpec, TypeVar, cast, overload
from typing import Callable, ParamSpec, TypeVar
P = ParamSpec("P")
R = TypeVar("R")
@overload
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ...
@overload
def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ...
def thread_cached(
func: Callable[P, R] | Callable[P, Awaitable[R]],
) -> Callable[P, R] | Callable[P, Awaitable[R]]:
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
thread_local = threading.local()
def _clear():
if hasattr(thread_local, "cache"):
del thread_local.cache
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
if inspect.iscoroutinefunction(func):
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = await cast(Callable[P, Awaitable[R]], func)(
*args, **kwargs
)
return cache[key]
setattr(async_wrapper, "clear_cache", _clear)
return async_wrapper
else:
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
setattr(sync_wrapper, "clear_cache", _clear)
return sync_wrapper
def clear_thread_cache(func: Callable) -> None:
if clear := getattr(func, "clear_cache", None):
clear()
return wrapper

View File

@@ -1,15 +1,15 @@
import asyncio
from contextlib import asynccontextmanager
from contextlib import contextmanager
from threading import Lock
from typing import TYPE_CHECKING, Any
from expiringdict import ExpiringDict
if TYPE_CHECKING:
from redis.asyncio import Redis as AsyncRedis
from redis.asyncio.lock import Lock as AsyncRedisLock
from redis import Redis
from redis.lock import Lock as RedisLock
class AsyncRedisKeyedMutex:
class RedisKeyedMutex:
"""
This class provides a mutex that can be locked and unlocked by a specific key,
using Redis as a distributed locking provider.
@@ -17,45 +17,41 @@ class AsyncRedisKeyedMutex:
in case the key is not unlocked for a specified duration, to prevent memory leaks.
"""
def __init__(self, redis: "AsyncRedis", timeout: int | None = 60):
def __init__(self, redis: "Redis", timeout: int | None = 60):
self.redis = redis
self.timeout = timeout
self.locks: dict[Any, "AsyncRedisLock"] = ExpiringDict(
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
max_len=6000, max_age_seconds=self.timeout
)
self.locks_lock = asyncio.Lock()
self.locks_lock = Lock()
@asynccontextmanager
async def locked(self, key: Any):
lock = await self.acquire(key)
@contextmanager
def locked(self, key: Any):
lock = self.acquire(key)
try:
yield
finally:
if (await lock.locked()) and (await lock.owned()):
await lock.release()
if lock.locked():
lock.release()
async def acquire(self, key: Any) -> "AsyncRedisLock":
def acquire(self, key: Any) -> "RedisLock":
"""Acquires and returns a lock with the given key"""
async with self.locks_lock:
with self.locks_lock:
if key not in self.locks:
self.locks[key] = self.redis.lock(
str(key), self.timeout, thread_local=False
)
lock = self.locks[key]
await lock.acquire()
lock.acquire()
return lock
async def release(self, key: Any):
if (
(lock := self.locks.get(key))
and (await lock.locked())
and (await lock.owned())
):
await lock.release()
def release(self, key: Any):
if (lock := self.locks.get(key)) and lock.locked() and lock.owned():
lock.release()
async def release_all_locks(self):
def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
async with self.locks_lock:
for lock in self.locks.values():
if (await lock.locked()) and (await lock.owned()):
await lock.release()
self.locks_lock.acquire(blocking=False)
for lock in self.locks.values():
if lock.locked() and lock.owned():
lock.release()

File diff suppressed because it is too large Load Diff

View File

@@ -7,23 +7,21 @@ readme = "README.md"
packages = [{ include = "autogpt_libs" }]
[tool.poetry.dependencies]
python = ">=3.10,<4.0"
colorama = "^0.4.6"
expiringdict = "^1.2.2"
google-cloud-logging = "^3.12.1"
pydantic = "^2.11.4"
pydantic-settings = "^2.9.1"
google-cloud-logging = "^3.11.3"
pydantic = "^2.10.3"
pydantic-settings = "^2.7.0"
pyjwt = "^2.10.1"
pytest-asyncio = "^0.26.0"
pytest-asyncio = "^0.25.0"
pytest-mock = "^3.14.0"
supabase = "^2.15.1"
launchdarkly-server-sdk = "^9.11.1"
fastapi = "^0.115.12"
uvicorn = "^0.34.3"
python = ">=3.10,<4.0"
python-dotenv = "^1.0.1"
supabase = "^2.10.0"
[tool.poetry.group.dev.dependencies]
redis = "^5.2.1"
ruff = "^0.12.2"
ruff = "^0.8.6"
[build-system]
requires = ["poetry-core"]

View File

@@ -2,32 +2,19 @@ DB_USER=postgres
DB_PASS=your-super-secret-and-long-postgres-password
DB_NAME=postgres
DB_PORT=5432
DB_HOST=localhost
DB_CONNECTION_LIMIT=12
DB_CONNECT_TIMEOUT=60
DB_POOL_TIMEOUT=300
DB_SCHEMA=platform
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
DIRECT_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}?connect_timeout=60&schema=platform"
PRISMA_SCHEMA="postgres/schema.prisma"
# EXECUTOR
NUM_GRAPH_WORKERS=10
BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
# generate using `from cryptography.fernet import Fernet;Fernet.generate_key().decode()`
ENCRYPTION_KEY='dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw='
UNSUBSCRIBE_SECRET_KEY = 'HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio='
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD=password
ENABLE_CREDIT=false
STRIPE_API_KEY=
STRIPE_WEBHOOK_SECRET=
# What environment things should be logged under: local dev or prod
APP_ENV=local
# What environment to behave as: "local" or "cloud"
@@ -35,26 +22,12 @@ BEHAVE_AS=local
PYRO_HOST=localhost
SENTRY_DSN=
# Email For Postmark so we can send emails
POSTMARK_SERVER_API_TOKEN=
POSTMARK_SENDER_EMAIL=invalid@invalid.com
POSTMARK_WEBHOOK_TOKEN=
## User auth with Supabase is required for any of the 3rd party integrations with auth to work.
ENABLE_AUTH=true
SUPABASE_URL=http://localhost:8000
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
# RabbitMQ credentials -- Used for communication between services
RABBITMQ_HOST=localhost
RABBITMQ_PORT=5672
RABBITMQ_DEFAULT_USER=rabbitmq_user_default
RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
## GCS bucket is required for marketplace and library functionality
MEDIA_GCS_BUCKET_NAME=
## For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow
## for integrations to work. Defaults to the value of PLATFORM_BASE_URL if not set.
# FRONTEND_BASE_URL=http://localhost:3000
@@ -63,14 +36,7 @@ MEDIA_GCS_BUCKET_NAME=
## to use the platform's webhook-related functionality.
## If you are developing locally, you can use something like ngrok to get a publc URL
## and tunnel it to your locally running backend.
PLATFORM_BASE_URL=http://localhost:3000
## Cloudflare Turnstile (CAPTCHA) Configuration
## Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
## This is the backend secret key
TURNSTILE_SECRET_KEY=
## This is the verify URL
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
PLATFORM_BASE_URL=https://your-public-url-here
## == INTEGRATION CREDENTIALS == ##
# Each set of server side credentials is required for the corresponding 3rd party
@@ -106,38 +72,20 @@ GOOGLE_CLIENT_SECRET=
TWITTER_CLIENT_ID=
TWITTER_CLIENT_SECRET=
# Linear App
# Make a new workspace for your OAuth APP -- trust me
# https://linear.app/settings/api/applications/new
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
LINEAR_CLIENT_ID=
LINEAR_CLIENT_SECRET=
# To obtain Todoist API credentials:
# 1. Create a Todoist account at todoist.com
# 2. Visit the Developer Console: https://developer.todoist.com/appconsole.html
# 3. Click "Create new app"
# 4. Once created, copy your Client ID and Client Secret below
TODOIST_CLIENT_ID=
TODOIST_CLIENT_SECRET=
## ===== OPTIONAL API KEYS ===== ##
# LLM
OPENAI_API_KEY=
ANTHROPIC_API_KEY=
AIML_API_KEY=
GROQ_API_KEY=
OPEN_ROUTER_API_KEY=
LLAMA_API_KEY=
# Reddit
# Go to https://www.reddit.com/prefs/apps and create a new app
# Choose "script" for the type
# Fill in the redirect uri as <your_frontend_url>/auth/integrations/oauth_callback, e.g. http://localhost:3000/auth/integrations/oauth_callback
REDDIT_CLIENT_ID=
REDDIT_CLIENT_SECRET=
REDDIT_USER_AGENT="AutoGPT:1.0 (by /u/autogpt)"
REDDIT_USERNAME=
REDDIT_PASSWORD=
# Discord
DISCORD_BOT_TOKEN=
@@ -173,40 +121,9 @@ REPLICATE_API_KEY=
# Ideogram
IDEOGRAM_API_KEY=
# Fal
FAL_API_KEY=
# Exa
EXA_API_KEY=
# E2B
E2B_API_KEY=
# Mem0
MEM0_API_KEY=
# Nvidia
NVIDIA_API_KEY=
# Apollo
APOLLO_API_KEY=
# SmartLead
SMARTLEAD_API_KEY=
# ZeroBounce
ZEROBOUNCE_API_KEY=
## ===== OPTIONAL API KEYS END ===== ##
# Logging Configuration
LOG_LEVEL=INFO
ENABLE_CLOUD_LOGGING=false
ENABLE_FILE_LOGGING=false
# Use to manually set the log directory
# LOG_DIR=./logs
# Example Blocks Configuration
# Set to true to enable example blocks in development
# These blocks are disabled by default in production
ENABLE_EXAMPLE_BLOCKS=false

View File

@@ -73,6 +73,7 @@ FROM server_dependencies AS server
COPY autogpt_platform/backend /app/autogpt_platform/backend
RUN poetry install --no-ansi --only-root
ENV DATABASE_URL=""
ENV PORT=8000
CMD ["poetry", "run", "rest"]

View File

@@ -1 +1,75 @@
[Advanced Setup (Dev Branch)](https://dev-docs.agpt.co/platform/advanced_setup/#autogpt_agent_server_advanced_set_up)
# AutoGPT Agent Server Advanced set up
This guide walks you through a dockerized set up, with an external DB (postgres)
## Setup
We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory:
0. Install Poetry
```sh
pip install poetry
```
1. Configure Poetry to use .venv in your project directory
```sh
poetry config virtualenvs.in-project true
```
2. Enter the poetry shell
```sh
poetry shell
```
3. Install dependencies
```sh
poetry install
```
4. Copy .env.example to .env
```sh
cp .env.example .env
```
5. Generate the Prisma client
```sh
poetry run prisma generate
```
> In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package:
>
> ```sh
> pip uninstall prisma
> ```
>
> Then run the generation again. The path *should* look something like this:
> `<some path>/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma`
6. Run the postgres database from the /rnd folder
```sh
cd autogpt_platform/
docker compose up -d
```
7. Run the migrations (from the backend folder)
```sh
cd ../backend
prisma migrate deploy
```
## Running The Server
### Starting the server directly
Run the following command:
```sh
poetry run app
```

View File

@@ -1 +1,203 @@
[Getting Started (Released)](https://docs.agpt.co/platform/getting-started/#autogpt_agent_server)
# AutoGPT Agent Server
This is an initial project for creating the next generation of agent execution, which is an AutoGPT agent server.
The agent server will enable the creation of composite multi-agent systems that utilize AutoGPT agents and other non-agent components as its primitives.
## Docs
You can access the docs for the [AutoGPT Agent Server here](https://docs.agpt.co/server/setup).
## Setup
We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory:
0. Install Poetry
```sh
pip install poetry
```
1. Configure Poetry to use .venv in your project directory
```sh
poetry config virtualenvs.in-project true
```
2. Enter the poetry shell
```sh
poetry shell
```
3. Install dependencies
```sh
poetry install
```
4. Copy .env.example to .env
```sh
cp .env.example .env
```
5. Generate the Prisma client
```sh
poetry run prisma generate
```
> In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package:
>
> ```sh
> pip uninstall prisma
> ```
>
> Then run the generation again. The path *should* look something like this:
> `<some path>/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma`
6. Migrate the database. Be careful because this deletes current data in the database.
```sh
docker compose up db -d
poetry run prisma migrate deploy
```
## Running The Server
### Starting the server without Docker
Run the following command to run database in docker but the application locally:
```sh
docker compose --profile local up deps --build --detach
poetry run app
```
### Starting the server with Docker
Run the following command to build the dockerfiles:
```sh
docker compose build
```
Run the following command to run the app:
```sh
docker compose up
```
Run the following to automatically rebuild when code changes, in another terminal:
```sh
docker compose watch
```
Run the following command to shut down:
```sh
docker compose down
```
If you run into issues with dangling orphans, try:
```sh
docker compose down --volumes --remove-orphans && docker-compose up --force-recreate --renew-anon-volumes --remove-orphans
```
## Testing
To run the tests:
```sh
poetry run test
```
## Development
### Formatting & Linting
Auto formatter and linter are set up in the project. To run them:
Install:
```sh
poetry install --with dev
```
Format the code:
```sh
poetry run format
```
Lint the code:
```sh
poetry run lint
```
## Project Outline
The current project has the following main modules:
### **blocks**
This module stores all the Agent Blocks, which are reusable components to build a graph that represents the agent's behavior.
### **data**
This module stores the logical model that is persisted in the database.
It abstracts the database operations into functions that can be called by the service layer.
Any code that interacts with Prisma objects or the database should reside in this module.
The main models are:
* `block`: anything related to the block used in the graph
* `execution`: anything related to the execution graph execution
* `graph`: anything related to the graph, node, and its relations
### **execution**
This module stores the business logic of executing the graph.
It currently has the following main modules:
* `manager`: A service that consumes the queue of the graph execution and executes the graph. It contains both pieces of logic.
* `scheduler`: A service that triggers scheduled graph execution based on a cron expression. It pushes an execution request to the manager.
### **server**
This module stores the logic for the server API.
It contains all the logic used for the API that allows the client to create, execute, and monitor the graph and its execution.
This API service interacts with other services like those defined in `manager` and `scheduler`.
### **utils**
This module stores utility functions that are used across the project.
Currently, it has two main modules:
* `process`: A module that contains the logic to spawn a new process.
* `service`: A module that serves as a parent class for all the services in the project.
## Service Communication
Currently, there are only 3 active services:
- AgentServer (the API, defined in `server.py`)
- ExecutionManager (the executor, defined in `manager.py`)
- ExecutionScheduler (the scheduler, defined in `scheduler.py`)
The services run in independent Python processes and communicate through an IPC.
A communication layer (`service.py`) is created to decouple the communication library from the implementation.
Currently, the IPC is done using Pyro5 and abstracted in a way that allows a function decorated with `@expose` to be called from a different process.
By default the daemons run on the following ports:
Execution Manager Daemon: 8002
Execution Scheduler Daemon: 8003
Rest Server Daemon: 8004
## Adding a New Agent Block
To add a new agent block, you need to create a new class that inherits from `Block` and provides the following information:
* All the block code should live in the `blocks` (`backend.blocks`) module.
* `input_schema`: the schema of the input data, represented by a Pydantic object.
* `output_schema`: the schema of the output data, represented by a Pydantic object.
* `run` method: the main logic of the block.
* `test_input` & `test_output`: the sample input and output data for the block, which will be used to auto-test the block.
* You can mock the functions declared in the block using the `test_mock` field for your unit tests.
* Once you finish creating the block, you can test it by running `poetry run pytest -s test/block/test_block.py`.

View File

@@ -1,237 +0,0 @@
# Backend Testing Guide
This guide covers testing practices for the AutoGPT Platform backend, with a focus on snapshot testing for API endpoints.
## Table of Contents
- [Overview](#overview)
- [Running Tests](#running-tests)
- [Snapshot Testing](#snapshot-testing)
- [Writing Tests for API Routes](#writing-tests-for-api-routes)
- [Best Practices](#best-practices)
## Overview
The backend uses pytest for testing with the following key libraries:
- `pytest` - Test framework
- `pytest-asyncio` - Async test support
- `pytest-mock` - Mocking support
- `pytest-snapshot` - Snapshot testing for API responses
## Running Tests
### Run all tests
```bash
poetry run test
```
### Run specific test file
```bash
poetry run pytest path/to/test_file.py
```
### Run with verbose output
```bash
poetry run pytest -v
```
### Run with coverage
```bash
poetry run pytest --cov=backend
```
## Snapshot Testing
Snapshot testing captures the output of your code and compares it against previously saved snapshots. This is particularly useful for testing API responses.
### How Snapshot Testing Works
1. First run: Creates snapshot files in `snapshots/` directories
2. Subsequent runs: Compares output against saved snapshots
3. Changes detected: Test fails if output differs from snapshot
### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Snapshot Test Example
```python
import json
from pytest_snapshot.plugin import Snapshot
def test_api_endpoint(snapshot: Snapshot):
response = client.get("/api/endpoint")
# Snapshot the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"endpoint_response"
)
```
### Best Practices for Snapshots
1. **Use descriptive names**: `"user_list_response"` not `"response1"`
2. **Sort JSON keys**: Ensures consistent snapshots
3. **Format JSON**: Use `indent=2` for readable diffs
4. **Exclude dynamic data**: Remove timestamps, IDs, etc. that change between runs
Example of excluding dynamic data:
```python
response_data = response.json()
# Remove dynamic fields for snapshot
response_data.pop("created_at", None)
response_data.pop("id", None)
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"static_response_data"
)
```
## Writing Tests for API Routes
### Basic Structure
```python
import json
import fastapi
import fastapi.testclient
import pytest
from pytest_snapshot.plugin import Snapshot
from backend.server.v2.myroute import router
app = fastapi.FastAPI()
app.include_router(router)
client = fastapi.testclient.TestClient(app)
def test_endpoint_success(snapshot: Snapshot):
response = client.get("/endpoint")
assert response.status_code == 200
# Test specific fields
data = response.json()
assert data["status"] == "success"
# Snapshot the full response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(data, indent=2, sort_keys=True),
"endpoint_success_response"
)
```
### Testing with Authentication
```python
def override_auth_middleware():
return {"sub": "test-user-id"}
def override_get_user_id():
return "test-user-id"
app.dependency_overrides[auth_middleware] = override_auth_middleware
app.dependency_overrides[get_user_id] = override_get_user_id
```
### Mocking External Services
```python
def test_external_api_call(mocker, snapshot):
# Mock external service
mock_response = {"external": "data"}
mocker.patch(
"backend.services.external_api.call",
return_value=mock_response
)
response = client.post("/api/process")
assert response.status_code == 200
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"process_with_external_response"
)
```
## Best Practices
### 1. Test Organization
- Place tests next to the code: `routes.py``routes_test.py`
- Use descriptive test names: `test_create_user_with_invalid_email`
- Group related tests in classes when appropriate
### 2. Test Coverage
- Test happy path and error cases
- Test edge cases (empty data, invalid formats)
- Test authentication and authorization
### 3. Snapshot Testing Guidelines
- Review all snapshot changes carefully
- Don't snapshot sensitive data
- Keep snapshots focused and minimal
- Update snapshots intentionally, not accidentally
### 4. Async Testing
- Use regular `def` for FastAPI TestClient tests
- Use `async def` with `@pytest.mark.asyncio` for testing async functions directly
### 5. Fixtures
Create reusable fixtures for common test data:
```python
@pytest.fixture
def sample_user():
return {
"email": "test@example.com",
"name": "Test User"
}
def test_create_user(sample_user, snapshot):
response = client.post("/users", json=sample_user)
# ... test implementation
```
## CI/CD Integration
The GitHub Actions workflow automatically runs tests on:
- Pull requests
- Pushes to main branch
Snapshot tests work in CI by:
1. Committing snapshot files to the repository
2. CI compares against committed snapshots
3. Fails if snapshots don't match
## Troubleshooting
### Snapshot Mismatches
- Review the diff carefully
- If changes are expected: `poetry run pytest --snapshot-update`
- If changes are unexpected: Fix the code causing the difference
### Async Test Issues
- Ensure async functions use `@pytest.mark.asyncio`
- Use `AsyncMock` for mocking async functions
- FastAPI TestClient handles async automatically
### Import Errors
- Check that all dependencies are in `pyproject.toml`
- Run `poetry install` to ensure dependencies are installed
- Verify import paths are correct
## Summary
Snapshot testing provides a powerful way to ensure API responses remain consistent. Combined with traditional assertions, it creates a robust test suite that catches regressions while remaining maintainable.
Remember: Good tests are as important as good code!

View File

@@ -1,30 +1,22 @@
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
logger = logging.getLogger(__name__)
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
Includes enhanced error handling and process lifecycle management.
"""
try:
# Run all processes except the last one in the background.
for process in processes[:-1]:
process.start(background=True, **kwargs)
# Run the last process in the foreground.
# Run the last process in the foreground
processes[-1].start(background=False, **kwargs)
finally:
for process in processes:
try:
process.stop()
except Exception as e:
logger.exception(f"[{process.service_name}] unable to stop: {e}")
process.stop()
def main(**kwargs):
@@ -32,16 +24,14 @@ def main(**kwargs):
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
"""
from backend.executor import DatabaseManager, ExecutionManager, Scheduler
from backend.notifications import NotificationManager
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
from backend.server.rest_api import AgentServer
from backend.server.ws_api import WebsocketServer
run_processes(
DatabaseManager(),
ExecutionManager(),
Scheduler(),
NotificationManager(),
ExecutionScheduler(),
WebsocketServer(),
AgentServer(),
**kwargs,

View File

@@ -1,112 +1,89 @@
import functools
import importlib
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, TypeVar
from typing import Type, TypeVar
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
AVAILABLE_BLOCKS: dict[str, Type[Block]] = {}
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T")
@functools.cache
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import Block
from backend.util.settings import Config
# Check if example blocks should be loaded from settings
config = Config()
load_examples = config.enable_example_blocks
# Dynamically load all modules under backend.blocks
current_dir = Path(__file__).parent
modules = []
for f in current_dir.rglob("*.py"):
if not f.is_file() or f.name == "__init__.py" or f.name.startswith("test_"):
continue
# Skip examples directory if not enabled
relative_path = f.relative_to(current_dir)
if not load_examples and relative_path.parts[0] == "examples":
continue
module_path = str(relative_path)[:-3].replace(os.path.sep, ".")
modules.append(module_path)
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
# Load all Block instances from the available modules
available_blocks: dict[str, type["Block"]] = {}
for block_cls in all_subclasses(Block):
class_name = block_cls.__name__
if class_name.endswith("Base"):
continue
if not class_name.endswith("Block"):
raise ValueError(
f"Block class {class_name} does not end with 'Block'. "
"If you are creating an abstract class, "
"please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(
f"Block ID {block.name} error: {block.id} is not a valid UUID"
)
if block.id in available_blocks:
raise ValueError(
f"Block ID {block.name} error: {block.id} is already in use"
)
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Ensure all fields in input_schema and output_schema are annotated SchemaFields
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(
f"{block.name} has a boolean field with no default value"
)
available_blocks[block.id] = block_cls
return available_blocks
__all__ = ["load_all_blocks"]
def all_subclasses(cls: type[T]) -> list[type[T]]:
def all_subclasses(cls: Type[T]) -> list[Type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
for block_cls in all_subclasses(Block):
name = block_cls.__name__
if block_cls.__name__.endswith("Base"):
continue
if not block_cls.__name__.endswith("Block"):
raise ValueError(
f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
if block.id in AVAILABLE_BLOCKS:
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Make sure all fields in input_schema and output_schema are annotated and has a value
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(f"{block.name} has a boolean field with no default value")
if block.disabled:
continue
AVAILABLE_BLOCKS[block.id] = block_cls
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]

View File

@@ -1,8 +1,6 @@
import asyncio
import logging
from typing import Any, Optional
from pydantic import JsonValue
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
@@ -15,9 +13,23 @@ from backend.data.block import (
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json, retry
_logger = logging.getLogger(__name__)
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
@@ -26,31 +38,10 @@ class AgentExecutorBlock(Block):
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
inputs: BlockInput = SchemaField(description="Input data for the graph")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
nodes_input_masks: Optional[dict[str, dict[str, JsonValue]]] = SchemaField(
default=None, hidden=True
)
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("inputs", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
@@ -64,99 +55,37 @@ class AgentExecutorBlock(Block):
categories={BlockCategory.AGENT},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
from backend.executor import utils as execution_utils
graph_exec = await execution_utils.add_graph_execution(
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
inputs=input_data.inputs,
nodes_input_masks=input_data.nodes_input_masks,
use_db_query=False,
data=input_data.data,
)
logger = execution_utils.LogMetadata(
logger=_logger,
user_id=input_data.user_id,
graph_eid=graph_exec.id,
graph_id=input_data.graph_id,
node_eid="*",
node_id="*",
block_name=self.name,
)
try:
async for name, data in self._run(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
graph_exec_id=graph_exec.id,
user_id=input_data.user_id,
logger=logger,
):
yield name, data
except asyncio.CancelledError:
await self._stop(
graph_exec_id=graph_exec.id,
user_id=input_data.user_id,
logger=logger,
)
logger.warning(
f"Execution of graph {input_data.graph_id}v{input_data.graph_version} was cancelled."
)
except Exception as e:
await self._stop(
graph_exec_id=graph_exec.id,
user_id=input_data.user_id,
logger=logger,
)
logger.error(
f"Execution of graph {input_data.graph_id}v{input_data.graph_version} failed: {e}, execution is stopped."
)
raise
async def _run(
self,
graph_id: str,
graph_version: int,
graph_exec_id: str,
user_id: str,
logger,
) -> BlockOutput:
from backend.data.execution import ExecutionEventType
from backend.executor import utils as execution_utils
event_bus = execution_utils.get_async_execution_event_bus()
log_id = f"Graph #{graph_id}-V{graph_version}, exec-id: {graph_exec_id}"
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
async for event in event_bus.listen(
user_id=user_id,
graph_id=graph_id,
graph_exec_id=graph_exec_id,
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
if event.status not in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.debug(
f"Execution {log_id} received event {event.event_type} with status {event.status}"
)
continue
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
# If the graph execution is COMPLETED, TERMINATED, or FAILED,
# we can stop listening for further events.
break
logger.debug(
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
@@ -171,29 +100,5 @@ class AgentExecutorBlock(Block):
continue
for output_data in event.output_data.get("output", []):
logger.debug(
f"Execution {log_id} produced {output_name}: {output_data}"
)
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
@retry.func_retry
async def _stop(
self,
graph_exec_id: str,
user_id: str,
logger,
) -> None:
from backend.executor import utils as execution_utils
log_id = f"Graph exec-id: {graph_exec_id}"
logger.info(f"Stopping execution of {log_id}")
try:
await execution_utils.stop_graph_execution(
graph_exec_id=graph_exec_id,
user_id=user_id,
use_db_query=False,
)
logger.info(f"Execution {log_id} stopped successfully.")
except Exception as e:
logger.error(f"Failed to stop execution {log_id}: {e}")

View File

@@ -1,8 +1,8 @@
from enum import Enum
from typing import Literal
import replicate
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.data.block import Block, BlockCategory, BlockSchema
@@ -165,15 +165,15 @@ class AIImageGeneratorBlock(Block):
},
)
async def _run_client(
def _run_client(
self, credentials: APIKeyCredentials, model_name: str, input_params: dict
):
try:
# Initialize Replicate client
client = ReplicateClient(api_token=credentials.api_key.get_secret_value())
client = replicate.Client(api_token=credentials.api_key.get_secret_value())
# Run the model with input parameters
output = await client.async_run(model_name, input=input_params, wait=False)
output = client.run(model_name, input=input_params, wait=False)
# Process output
if isinstance(output, list) and len(output) > 0:
@@ -195,7 +195,7 @@ class AIImageGeneratorBlock(Block):
except Exception as e:
raise RuntimeError(f"Unexpected error during model execution: {e}")
async def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
try:
# Handle style-based prompt modification for models without native style support
modified_prompt = input_data.prompt
@@ -213,7 +213,7 @@ class AIImageGeneratorBlock(Block):
"steps": 40,
"cfg_scale": 7.0,
}
output = await self._run_client(
output = self._run_client(
credentials,
"stability-ai/stable-diffusion-3.5-medium",
input_params,
@@ -231,7 +231,7 @@ class AIImageGeneratorBlock(Block):
"output_format": "jpg", # Set to jpg for Flux models
"output_quality": 90,
}
output = await self._run_client(
output = self._run_client(
credentials, "black-forest-labs/flux-1.1-pro", input_params
)
return output
@@ -246,7 +246,7 @@ class AIImageGeneratorBlock(Block):
"output_format": "jpg",
"output_quality": 90,
}
output = await self._run_client(
output = self._run_client(
credentials, "black-forest-labs/flux-1.1-pro-ultra", input_params
)
return output
@@ -257,7 +257,7 @@ class AIImageGeneratorBlock(Block):
"size": SIZE_TO_RECRAFT_DIMENSIONS[input_data.size],
"style": input_data.style.value,
}
output = await self._run_client(
output = self._run_client(
credentials, "recraft-ai/recraft-v3", input_params
)
return output
@@ -296,9 +296,9 @@ class AIImageGeneratorBlock(Block):
style_text = style_map.get(style, "")
return f"{style_text} of" if style_text else ""
async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
try:
url = await self.generate_image(input_data, credentials)
url = self.generate_image(input_data, credentials)
if url:
yield "image_url", url
else:

View File

@@ -1,10 +1,10 @@
import asyncio
import logging
import time
from enum import Enum
from typing import Literal
import replicate
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
@@ -142,7 +142,7 @@ class AIMusicGeneratorBlock(Block):
test_credentials=TEST_CREDENTIALS,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
max_retries = 3
@@ -154,7 +154,7 @@ class AIMusicGeneratorBlock(Block):
logger.debug(
f"[AIMusicGeneratorBlock] - Running model (attempt {attempt + 1})"
)
result = await self.run_model(
result = self.run_model(
api_key=credentials.api_key,
music_gen_model_version=input_data.music_gen_model_version,
prompt=input_data.prompt,
@@ -176,13 +176,13 @@ class AIMusicGeneratorBlock(Block):
last_error = f"Unexpected error: {str(e)}"
logger.error(f"[AIMusicGeneratorBlock] - Error: {last_error}")
if attempt < max_retries - 1:
await asyncio.sleep(retry_delay)
time.sleep(retry_delay)
continue
# If we've exhausted all retries, yield the error
yield "error", f"Failed after {max_retries} attempts. Last error: {last_error}"
async def run_model(
def run_model(
self,
api_key: SecretStr,
music_gen_model_version: MusicGenModelVersion,
@@ -196,10 +196,10 @@ class AIMusicGeneratorBlock(Block):
normalization_strategy: NormalizationStrategy,
):
# Initialize Replicate client with the API key
client = ReplicateClient(api_token=api_key.get_secret_value())
client = replicate.Client(api_token=api_key.get_secret_value())
# Run the model with parameters
output = await client.async_run(
output = client.run(
"meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb",
input={
"prompt": prompt,

View File

@@ -1,4 +1,3 @@
import asyncio
import logging
import time
from enum import Enum
@@ -14,7 +13,7 @@ from backend.data.model import (
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.request import Requests
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
@@ -53,7 +52,6 @@ class AudioTrack(str, Enum):
REFRESHER = ("Refresher",)
TOURIST = ("Tourist",)
TWIN_TYCHES = ("Twin Tyches",)
DONT_STOP_ME_ABSTRACT_FUTURE_BASS = ("Dont Stop Me Abstract Future Bass",)
@property
def audio_url(self):
@@ -79,7 +77,6 @@ class AudioTrack(str, Enum):
AudioTrack.REFRESHER: "https://cdn.tfrv.xyz/audio/refresher.mp3",
AudioTrack.TOURIST: "https://cdn.tfrv.xyz/audio/tourist.mp3",
AudioTrack.TWIN_TYCHES: "https://cdn.tfrv.xyz/audio/twin-tynches.mp3",
AudioTrack.DONT_STOP_ME_ABSTRACT_FUTURE_BASS: "https://cdn.revid.ai/audio/_dont-stop-me-abstract-future-bass.mp3",
}
return audio_urls[self]
@@ -107,7 +104,6 @@ class GenerationPreset(str, Enum):
MOVIE = ("Movie",)
STYLIZED_ILLUSTRATION = ("Stylized Illustration",)
MANGA = ("Manga",)
DEFAULT = ("DEFAULT",)
class Voice(str, Enum):
@@ -117,7 +113,6 @@ class Voice(str, Enum):
JESSICA = "Jessica"
CHARLOTTE = "Charlotte"
CALLUM = "Callum"
EVA = "Eva"
@property
def voice_id(self):
@@ -128,7 +123,6 @@ class Voice(str, Enum):
Voice.JESSICA: "cgSgspJ2msm6clMCkdW9",
Voice.CHARLOTTE: "XB0fDUnXU5powFXDhCwa",
Voice.CALLUM: "N2lVS1w4EtoT3dr4eOWO",
Voice.EVA: "FGY2WhTYpPnrIDTdsKH5",
}
return voice_id_map[self]
@@ -146,8 +140,6 @@ logger = logging.getLogger(__name__)
class AIShortformVideoCreatorBlock(Block):
"""Creates a shortform texttovideo clip using stock or AI imagery."""
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.REVID], Literal["api_key"]
@@ -191,58 +183,6 @@ class AIShortformVideoCreatorBlock(Block):
video_url: str = SchemaField(description="The URL of the created video")
error: str = SchemaField(description="Error message if the request failed")
async def create_webhook(self) -> tuple[str, str]:
"""Create a new webhook URL for receiving notifications."""
url = "https://webhook.site/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = await Requests().post(url, headers=headers)
webhook_data = response.json()
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
async def create_video(self, api_key: SecretStr, payload: dict) -> dict:
"""Create a video using the Revid API."""
url = "https://www.revid.ai/api/public/v2/render"
headers = {"key": api_key.get_secret_value()}
response = await Requests().post(url, json=payload, headers=headers)
logger.debug(
f"API Response Status Code: {response.status}, Content: {response.text}"
)
return response.json()
async def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
"""Check the status of a video creation job."""
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
headers = {"key": api_key.get_secret_value()}
response = await Requests().get(url, headers=headers)
return response.json()
async def wait_for_video(
self,
api_key: SecretStr,
pid: str,
max_wait_time: int = 1000,
) -> str:
"""Wait for video creation to complete and return the video URL."""
start_time = time.time()
while time.time() - start_time < max_wait_time:
status = await self.check_video_status(api_key, pid)
logger.debug(f"Video status: {status}")
if status.get("status") == "ready" and "videoUrl" in status:
return status["videoUrl"]
elif status.get("status") == "error":
error_message = status.get("error", "Unknown error occurred")
logger.error(f"Video creation failed: {error_message}")
raise ValueError(f"Video creation failed: {error_message}")
elif status.get("status") in ["FAILED", "CANCELED"]:
logger.error(f"Video creation failed: {status.get('message')}")
raise ValueError(f"Video creation failed: {status.get('message')}")
await asyncio.sleep(10)
logger.error("Video creation timed out")
raise TimeoutError("Video creation timed out")
def __init__(self):
super().__init__(
id="361697fb-0c4f-4feb-aed3-8320c88c771b",
@@ -261,41 +201,91 @@ class AIShortformVideoCreatorBlock(Block):
"voice": Voice.LILY,
"video_style": VisualMediaType.STOCK_VIDEOS,
},
test_output=("video_url", "https://example.com/video.mp4"),
test_output=(
"video_url",
"https://example.com/video.mp4",
),
test_mock={
"create_webhook": lambda *args, **kwargs: (
"create_webhook": lambda: (
"test_uuid",
"https://webhook.site/test_uuid",
),
"create_video": lambda *args, **kwargs: {"pid": "test_pid"},
"check_video_status": lambda *args, **kwargs: {
"status": "ready",
"videoUrl": "https://example.com/video.mp4",
},
"wait_for_video": lambda *args, **kwargs: "https://example.com/video.mp4",
"create_video": lambda api_key, payload: {"pid": "test_pid"},
"wait_for_video": lambda api_key, pid, webhook_token, max_wait_time=1000: "https://example.com/video.mp4",
},
test_credentials=TEST_CREDENTIALS,
)
async def run(
def create_webhook(self):
url = "https://webhook.site/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = requests.post(url, headers=headers)
webhook_data = response.json()
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
def create_video(self, api_key: SecretStr, payload: dict) -> dict:
url = "https://www.revid.ai/api/public/v2/render"
headers = {"key": api_key.get_secret_value()}
response = requests.post(url, json=payload, headers=headers)
logger.debug(
f"API Response Status Code: {response.status_code}, Content: {response.text}"
)
return response.json()
def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
headers = {"key": api_key.get_secret_value()}
response = requests.get(url, headers=headers)
return response.json()
def wait_for_video(
self,
api_key: SecretStr,
pid: str,
webhook_token: str,
max_wait_time: int = 1000,
) -> str:
start_time = time.time()
while time.time() - start_time < max_wait_time:
status = self.check_video_status(api_key, pid)
logger.debug(f"Video status: {status}")
if status.get("status") == "ready" and "videoUrl" in status:
return status["videoUrl"]
elif status.get("status") == "error":
error_message = status.get("error", "Unknown error occurred")
logger.error(f"Video creation failed: {error_message}")
raise ValueError(f"Video creation failed: {error_message}")
elif status.get("status") in ["FAILED", "CANCELED"]:
logger.error(f"Video creation failed: {status.get('message')}")
raise ValueError(f"Video creation failed: {status.get('message')}")
time.sleep(10)
logger.error("Video creation timed out")
raise TimeoutError("Video creation timed out")
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
# Create a new Webhook.site URL
webhook_token, webhook_url = await self.create_webhook()
webhook_token, webhook_url = self.create_webhook()
logger.debug(f"Webhook URL: {webhook_url}")
audio_url = input_data.background_music.audio_url
payload = {
"frameRate": input_data.frame_rate,
"resolution": input_data.resolution,
"frameDurationMultiplier": 18,
"webhook": None,
"webhook": webhook_url,
"creationParams": {
"mediaType": input_data.video_style,
"captionPresetName": "Wrap 1",
"selectedVoice": input_data.voice.voice_id,
"hasEnhancedGeneration": True,
"generationPreset": input_data.generation_preset.name,
"selectedAudio": input_data.background_music.value,
"selectedAudio": input_data.background_music,
"origin": "/create",
"inputText": input_data.script,
"flowType": "text-to-video",
@@ -311,12 +301,12 @@ class AIShortformVideoCreatorBlock(Block):
"selectedStoryStyle": {"value": "custom", "label": "Custom"},
"hasToGenerateVideos": input_data.video_style
!= VisualMediaType.STOCK_VIDEOS,
"audioUrl": input_data.background_music.audio_url,
"audioUrl": audio_url,
},
}
logger.debug("Creating video...")
response = await self.create_video(credentials.api_key, payload)
response = self.create_video(credentials.api_key, payload)
pid = response.get("pid")
if not pid:
@@ -328,370 +318,6 @@ class AIShortformVideoCreatorBlock(Block):
logger.debug(
f"Video created with project ID: {pid}. Waiting for completion..."
)
video_url = await self.wait_for_video(credentials.api_key, pid)
video_url = self.wait_for_video(credentials.api_key, pid, webhook_token)
logger.debug(f"Video ready: {video_url}")
yield "video_url", video_url
class AIAdMakerVideoCreatorBlock(Block):
"""Generates a 30second vertical AI advert using optional usersupplied imagery."""
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.REVID], Literal["api_key"]
] = CredentialsField(
description="Credentials for Revid.ai API access.",
)
script: str = SchemaField(
description="Short advertising copy. Line breaks create new scenes.",
placeholder="Introducing Foobar [show product photo] the gadget that does it all.",
)
ratio: str = SchemaField(description="Aspect ratio", default="9 / 16")
target_duration: int = SchemaField(
description="Desired length of the ad in seconds.", default=30
)
voice: Voice = SchemaField(
description="Narration voice", default=Voice.EVA, placeholder=Voice.EVA
)
background_music: AudioTrack = SchemaField(
description="Background track",
default=AudioTrack.DONT_STOP_ME_ABSTRACT_FUTURE_BASS,
)
input_media_urls: list[str] = SchemaField(
description="List of image URLs to feature in the advert.", default=[]
)
use_only_provided_media: bool = SchemaField(
description="Restrict visuals to supplied images only.", default=True
)
class Output(BlockSchema):
video_url: str = SchemaField(description="URL of the finished advert")
error: str = SchemaField(description="Error message on failure")
async def create_webhook(self) -> tuple[str, str]:
"""Create a new webhook URL for receiving notifications."""
url = "https://webhook.site/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = await Requests().post(url, headers=headers)
webhook_data = response.json()
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
async def create_video(self, api_key: SecretStr, payload: dict) -> dict:
"""Create a video using the Revid API."""
url = "https://www.revid.ai/api/public/v2/render"
headers = {"key": api_key.get_secret_value()}
response = await Requests().post(url, json=payload, headers=headers)
logger.debug(
f"API Response Status Code: {response.status}, Content: {response.text}"
)
return response.json()
async def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
"""Check the status of a video creation job."""
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
headers = {"key": api_key.get_secret_value()}
response = await Requests().get(url, headers=headers)
return response.json()
async def wait_for_video(
self,
api_key: SecretStr,
pid: str,
max_wait_time: int = 1000,
) -> str:
"""Wait for video creation to complete and return the video URL."""
start_time = time.time()
while time.time() - start_time < max_wait_time:
status = await self.check_video_status(api_key, pid)
logger.debug(f"Video status: {status}")
if status.get("status") == "ready" and "videoUrl" in status:
return status["videoUrl"]
elif status.get("status") == "error":
error_message = status.get("error", "Unknown error occurred")
logger.error(f"Video creation failed: {error_message}")
raise ValueError(f"Video creation failed: {error_message}")
elif status.get("status") in ["FAILED", "CANCELED"]:
logger.error(f"Video creation failed: {status.get('message')}")
raise ValueError(f"Video creation failed: {status.get('message')}")
await asyncio.sleep(10)
logger.error("Video creation timed out")
raise TimeoutError("Video creation timed out")
def __init__(self):
super().__init__(
id="58bd2a19-115d-4fd1-8ca4-13b9e37fa6a0",
description="Creates an AIgenerated 30second advert (text + images)",
categories={BlockCategory.MARKETING, BlockCategory.AI},
input_schema=AIAdMakerVideoCreatorBlock.Input,
output_schema=AIAdMakerVideoCreatorBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"script": "Test product launch!",
"input_media_urls": [
"https://cdn.revid.ai/uploads/1747076315114-image.png",
],
},
test_output=("video_url", "https://example.com/ad.mp4"),
test_mock={
"create_webhook": lambda *args, **kwargs: (
"test_uuid",
"https://webhook.site/test_uuid",
),
"create_video": lambda *args, **kwargs: {"pid": "test_pid"},
"check_video_status": lambda *args, **kwargs: {
"status": "ready",
"videoUrl": "https://example.com/ad.mp4",
},
"wait_for_video": lambda *args, **kwargs: "https://example.com/ad.mp4",
},
test_credentials=TEST_CREDENTIALS,
)
async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
webhook_token, webhook_url = await self.create_webhook()
payload = {
"webhook": webhook_url,
"creationParams": {
"targetDuration": input_data.target_duration,
"ratio": input_data.ratio,
"mediaType": "aiVideo",
"inputText": input_data.script,
"flowType": "text-to-video",
"slug": "ai-ad-generator",
"slugNew": "",
"isCopiedFrom": False,
"hasToGenerateVoice": True,
"hasToTranscript": False,
"hasToSearchMedia": True,
"hasAvatar": False,
"hasWebsiteRecorder": False,
"hasTextSmallAtBottom": False,
"selectedAudio": input_data.background_music.value,
"selectedVoice": input_data.voice.voice_id,
"selectedAvatar": "https://cdn.revid.ai/avatars/young-woman.mp4",
"selectedAvatarType": "video/mp4",
"websiteToRecord": "",
"hasToGenerateCover": True,
"nbGenerations": 1,
"disableCaptions": False,
"mediaMultiplier": "medium",
"characters": [],
"captionPresetName": "Revid",
"sourceType": "contentScraping",
"selectedStoryStyle": {"value": "custom", "label": "General"},
"generationPreset": "DEFAULT",
"hasToGenerateMusic": False,
"isOptimizedForChinese": False,
"generationUserPrompt": "",
"enableNsfwFilter": False,
"addStickers": False,
"typeMovingImageAnim": "dynamic",
"hasToGenerateSoundEffects": False,
"forceModelType": "gpt-image-1",
"selectedCharacters": [],
"lang": "",
"voiceSpeed": 1,
"disableAudio": False,
"disableVoice": False,
"useOnlyProvidedMedia": input_data.use_only_provided_media,
"imageGenerationModel": "ultra",
"videoGenerationModel": "pro",
"hasEnhancedGeneration": True,
"hasEnhancedGenerationPro": True,
"inputMedias": [
{"url": url, "title": "", "type": "image"}
for url in input_data.input_media_urls
],
"hasToGenerateVideos": True,
"audioUrl": input_data.background_music.audio_url,
"watermark": None,
},
}
response = await self.create_video(credentials.api_key, payload)
pid = response.get("pid")
if not pid:
raise RuntimeError("Failed to create video: No project ID returned")
video_url = await self.wait_for_video(credentials.api_key, pid)
yield "video_url", video_url
class AIScreenshotToVideoAdBlock(Block):
"""Creates an advert where the supplied screenshot is narrated by an AI avatar."""
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.REVID], Literal["api_key"]
] = CredentialsField(description="Revid.ai API key")
script: str = SchemaField(
description="Narration that will accompany the screenshot.",
placeholder="Check out these amazing stats!",
)
screenshot_url: str = SchemaField(
description="Screenshot or image URL to showcase."
)
ratio: str = SchemaField(default="9 / 16")
target_duration: int = SchemaField(default=30)
voice: Voice = SchemaField(default=Voice.EVA)
background_music: AudioTrack = SchemaField(
default=AudioTrack.DONT_STOP_ME_ABSTRACT_FUTURE_BASS
)
class Output(BlockSchema):
video_url: str = SchemaField(description="Rendered video URL")
error: str = SchemaField(description="Error, if encountered")
async def create_webhook(self) -> tuple[str, str]:
"""Create a new webhook URL for receiving notifications."""
url = "https://webhook.site/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = await Requests().post(url, headers=headers)
webhook_data = response.json()
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
async def create_video(self, api_key: SecretStr, payload: dict) -> dict:
"""Create a video using the Revid API."""
url = "https://www.revid.ai/api/public/v2/render"
headers = {"key": api_key.get_secret_value()}
response = await Requests().post(url, json=payload, headers=headers)
logger.debug(
f"API Response Status Code: {response.status}, Content: {response.text}"
)
return response.json()
async def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
"""Check the status of a video creation job."""
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
headers = {"key": api_key.get_secret_value()}
response = await Requests().get(url, headers=headers)
return response.json()
async def wait_for_video(
self,
api_key: SecretStr,
pid: str,
max_wait_time: int = 1000,
) -> str:
"""Wait for video creation to complete and return the video URL."""
start_time = time.time()
while time.time() - start_time < max_wait_time:
status = await self.check_video_status(api_key, pid)
logger.debug(f"Video status: {status}")
if status.get("status") == "ready" and "videoUrl" in status:
return status["videoUrl"]
elif status.get("status") == "error":
error_message = status.get("error", "Unknown error occurred")
logger.error(f"Video creation failed: {error_message}")
raise ValueError(f"Video creation failed: {error_message}")
elif status.get("status") in ["FAILED", "CANCELED"]:
logger.error(f"Video creation failed: {status.get('message')}")
raise ValueError(f"Video creation failed: {status.get('message')}")
await asyncio.sleep(10)
logger.error("Video creation timed out")
raise TimeoutError("Video creation timed out")
def __init__(self):
super().__init__(
id="0f3e4635-e810-43d9-9e81-49e6f4e83b7c",
description="Turns a screenshot into an engaging, avatarnarrated video advert.",
categories={BlockCategory.AI, BlockCategory.MARKETING},
input_schema=AIScreenshotToVideoAdBlock.Input,
output_schema=AIScreenshotToVideoAdBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"script": "Amazing numbers!",
"screenshot_url": "https://cdn.revid.ai/uploads/1747080376028-image.png",
},
test_output=("video_url", "https://example.com/screenshot.mp4"),
test_mock={
"create_webhook": lambda *args, **kwargs: (
"test_uuid",
"https://webhook.site/test_uuid",
),
"create_video": lambda *args, **kwargs: {"pid": "test_pid"},
"check_video_status": lambda *args, **kwargs: {
"status": "ready",
"videoUrl": "https://example.com/screenshot.mp4",
},
"wait_for_video": lambda *args, **kwargs: "https://example.com/screenshot.mp4",
},
test_credentials=TEST_CREDENTIALS,
)
async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
webhook_token, webhook_url = await self.create_webhook()
payload = {
"webhook": webhook_url,
"creationParams": {
"targetDuration": input_data.target_duration,
"ratio": input_data.ratio,
"mediaType": "aiVideo",
"hasAvatar": True,
"removeAvatarBackground": True,
"inputText": input_data.script,
"flowType": "text-to-video",
"slug": "ai-ad-generator",
"slugNew": "screenshot-to-video-ad",
"isCopiedFrom": "ai-ad-generator",
"hasToGenerateVoice": True,
"hasToTranscript": False,
"hasToSearchMedia": True,
"hasWebsiteRecorder": False,
"hasTextSmallAtBottom": False,
"selectedAudio": input_data.background_music.value,
"selectedVoice": input_data.voice.voice_id,
"selectedAvatar": "https://cdn.revid.ai/avatars/young-woman.mp4",
"selectedAvatarType": "video/mp4",
"websiteToRecord": "",
"hasToGenerateCover": True,
"nbGenerations": 1,
"disableCaptions": False,
"mediaMultiplier": "medium",
"characters": [],
"captionPresetName": "Revid",
"sourceType": "contentScraping",
"selectedStoryStyle": {"value": "custom", "label": "General"},
"generationPreset": "DEFAULT",
"hasToGenerateMusic": False,
"isOptimizedForChinese": False,
"generationUserPrompt": "",
"enableNsfwFilter": False,
"addStickers": False,
"typeMovingImageAnim": "dynamic",
"hasToGenerateSoundEffects": False,
"forceModelType": "gpt-image-1",
"selectedCharacters": [],
"lang": "",
"voiceSpeed": 1,
"disableAudio": False,
"disableVoice": False,
"useOnlyProvidedMedia": True,
"imageGenerationModel": "ultra",
"videoGenerationModel": "ultra",
"hasEnhancedGeneration": True,
"hasEnhancedGenerationPro": True,
"inputMedias": [
{"url": input_data.screenshot_url, "title": "", "type": "image"}
],
"hasToGenerateVideos": True,
"audioUrl": input_data.background_music.audio_url,
"watermark": None,
},
}
response = await self.create_video(credentials.api_key, payload)
pid = response.get("pid")
if not pid:
raise RuntimeError("Failed to create video: No project ID returned")
video_url = await self.wait_for_video(credentials.api_key, pid)
yield "video_url", video_url

View File

@@ -1,84 +0,0 @@
"""
Airtable integration for AutoGPT Platform.
This integration provides comprehensive access to the Airtable Web API,
including:
- Webhook triggers and management
- Record CRUD operations
- Attachment uploads
- Schema and table management
- Metadata operations
"""
# Attachments
from .attachments import AirtableUploadAttachmentBlock
# Metadata
from .metadata import (
AirtableGetViewBlock,
AirtableListBasesBlock,
AirtableListViewsBlock,
)
# Record Operations
from .records import (
AirtableCreateRecordsBlock,
AirtableDeleteRecordsBlock,
AirtableGetRecordBlock,
AirtableListRecordsBlock,
AirtableUpdateRecordsBlock,
AirtableUpsertRecordsBlock,
)
# Schema & Table Management
from .schema import (
AirtableAddFieldBlock,
AirtableCreateTableBlock,
AirtableDeleteFieldBlock,
AirtableDeleteTableBlock,
AirtableListSchemaBlock,
AirtableUpdateFieldBlock,
AirtableUpdateTableBlock,
)
# Webhook Triggers
from .triggers import AirtableWebhookTriggerBlock
# Webhook Management
from .webhooks import (
AirtableCreateWebhookBlock,
AirtableDeleteWebhookBlock,
AirtableFetchWebhookPayloadsBlock,
AirtableRefreshWebhookBlock,
)
__all__ = [
# Webhook Triggers
"AirtableWebhookTriggerBlock",
# Webhook Management
"AirtableCreateWebhookBlock",
"AirtableDeleteWebhookBlock",
"AirtableFetchWebhookPayloadsBlock",
"AirtableRefreshWebhookBlock",
# Record Operations
"AirtableCreateRecordsBlock",
"AirtableDeleteRecordsBlock",
"AirtableGetRecordBlock",
"AirtableListRecordsBlock",
"AirtableUpdateRecordsBlock",
"AirtableUpsertRecordsBlock",
# Attachments
"AirtableUploadAttachmentBlock",
# Schema & Table Management
"AirtableAddFieldBlock",
"AirtableCreateTableBlock",
"AirtableDeleteFieldBlock",
"AirtableDeleteTableBlock",
"AirtableListSchemaBlock",
"AirtableUpdateFieldBlock",
"AirtableUpdateTableBlock",
# Metadata
"AirtableGetViewBlock",
"AirtableListBasesBlock",
"AirtableListViewsBlock",
]

View File

@@ -1,16 +0,0 @@
"""
Shared configuration for all Airtable blocks using the SDK pattern.
"""
from backend.sdk import BlockCostType, ProviderBuilder
from ._webhook import AirtableWebhookManager
# Configure the Airtable provider with API key authentication
airtable = (
ProviderBuilder("airtable")
.with_api_key("AIRTABLE_API_KEY", "Airtable Personal Access Token")
.with_webhook_manager(AirtableWebhookManager)
.with_base_cost(1, BlockCostType.RUN)
.build()
)

View File

@@ -1,125 +0,0 @@
"""
Webhook management for Airtable blocks.
"""
import hashlib
import hmac
from enum import Enum
from typing import Tuple
from backend.sdk import (
APIKeyCredentials,
BaseWebhooksManager,
Credentials,
ProviderName,
Requests,
Webhook,
)
class AirtableWebhookManager(BaseWebhooksManager):
"""Webhook manager for Airtable API."""
PROVIDER_NAME = ProviderName("airtable")
class WebhookType(str, Enum):
TABLE_CHANGE = "table_change"
@classmethod
async def validate_payload(cls, webhook: Webhook, request) -> Tuple[dict, str]:
"""Validate incoming webhook payload and signature."""
payload = await request.json()
# Verify webhook signature using HMAC-SHA256
if webhook.secret:
mac_secret = webhook.config.get("mac_secret")
if mac_secret:
# Get the raw body for signature verification
body = await request.body()
# Calculate expected signature
expected_mac = hmac.new(
mac_secret.encode(), body, hashlib.sha256
).hexdigest()
# Get signature from headers
signature = request.headers.get("X-Airtable-Content-MAC")
if signature and not hmac.compare_digest(signature, expected_mac):
raise ValueError("Invalid webhook signature")
# Airtable sends the cursor in the payload
event_type = "notification"
return payload, event_type
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> Tuple[str, dict]:
"""Register webhook with Airtable API."""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("Airtable webhooks require API key credentials")
api_key = credentials.api_key.get_secret_value()
# Parse resource to get base_id and table_id/name
# Resource format: "{base_id}/{table_id_or_name}"
parts = resource.split("/", 1)
if len(parts) != 2:
raise ValueError("Resource must be in format: {base_id}/{table_id_or_name}")
base_id, table_id_or_name = parts
# Prepare webhook specification
specification = {
"filters": {
"dataTypes": events or ["tableData", "tableFields", "tableMetadata"]
}
}
# If specific table is provided, add to specification
if table_id_or_name and table_id_or_name != "*":
specification["filters"]["recordChangeScope"] = [table_id_or_name]
# Create webhook
response = await Requests().post(
f"https://api.airtable.com/v0/bases/{base_id}/webhooks",
headers={"Authorization": f"Bearer {api_key}"},
json={"notificationUrl": ingress_url, "specification": specification},
)
webhook_data = response.json()
webhook_id = webhook_data["id"]
mac_secret = webhook_data.get("macSecretBase64")
return webhook_id, {
"base_id": base_id,
"table_id_or_name": table_id_or_name,
"events": events,
"mac_secret": mac_secret,
"cursor": 1, # Start from cursor 1
"expiration_time": webhook_data.get("expirationTime"),
}
async def _deregister_webhook(
self, webhook: Webhook, credentials: Credentials
) -> None:
"""Deregister webhook from Airtable API."""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("Airtable webhooks require API key credentials")
api_key = credentials.api_key.get_secret_value()
base_id = webhook.config.get("base_id")
if not base_id:
raise ValueError("Missing base_id in webhook metadata")
await Requests().delete(
f"https://api.airtable.com/v0/bases/{base_id}/webhooks/{webhook.provider_webhook_id}",
headers={"Authorization": f"Bearer {api_key}"},
)

View File

@@ -1,98 +0,0 @@
"""
Airtable attachment blocks.
"""
import base64
from typing import Union
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import airtable
class AirtableUploadAttachmentBlock(Block):
"""
Uploads a file to Airtable for use as an attachment.
Files can be uploaded directly (up to 5MB) or via URL.
The returned attachment ID can be used when creating or updating records.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
filename: str = SchemaField(description="Name of the file")
file: Union[bytes, str] = SchemaField(
description="File content (binary data or base64 string)"
)
content_type: str = SchemaField(
description="MIME type of the file", default="application/octet-stream"
)
class Output(BlockSchema):
attachment: dict = SchemaField(
description="Attachment object with id, url, size, and type"
)
attachment_id: str = SchemaField(description="ID of the uploaded attachment")
url: str = SchemaField(description="URL of the uploaded attachment")
size: int = SchemaField(description="Size of the file in bytes")
def __init__(self):
super().__init__(
id="962e801b-5a6f-4c56-a929-83e816343a41",
description="Upload a file to Airtable for use as an attachment",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Convert file to base64 if it's bytes
if isinstance(input_data.file, bytes):
file_data = base64.b64encode(input_data.file).decode("utf-8")
else:
# Assume it's already base64 encoded
file_data = input_data.file
# Check file size (5MB limit)
file_bytes = base64.b64decode(file_data)
if len(file_bytes) > 5 * 1024 * 1024:
raise ValueError(
"File size exceeds 5MB limit. Use URL upload for larger files."
)
# Upload the attachment
response = await Requests().post(
f"https://api.airtable.com/v0/bases/{input_data.base_id}/attachments/upload",
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
},
json={
"content": file_data,
"filename": input_data.filename,
"type": input_data.content_type,
},
)
attachment_data = response.json()
yield "attachment", attachment_data
yield "attachment_id", attachment_data.get("id", "")
yield "url", attachment_data.get("url", "")
yield "size", attachment_data.get("size", 0)

View File

@@ -1,145 +0,0 @@
"""
Airtable metadata blocks for bases and views.
"""
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import airtable
class AirtableListBasesBlock(Block):
"""
Lists all Airtable bases accessible by the API token.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
class Output(BlockSchema):
bases: list[dict] = SchemaField(
description="Array of base objects with id and name"
)
def __init__(self):
super().__init__(
id="613f9907-bef8-468a-be6d-2dd7a53f96e7",
description="List all accessible Airtable bases",
categories={BlockCategory.SEARCH},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# List bases
response = await Requests().get(
"https://api.airtable.com/v0/meta/bases",
headers={"Authorization": f"Bearer {api_key}"},
)
data = response.json()
yield "bases", data.get("bases", [])
class AirtableListViewsBlock(Block):
"""
Lists all views in an Airtable base with their associated tables.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
class Output(BlockSchema):
views: list[dict] = SchemaField(
description="Array of view objects with tableId"
)
def __init__(self):
super().__init__(
id="3878cf82-d384-40c2-aace-097042233f6a",
description="List all views in an Airtable base",
categories={BlockCategory.SEARCH},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Get base schema which includes views
response = await Requests().get(
f"https://api.airtable.com/v0/meta/bases/{input_data.base_id}/tables",
headers={"Authorization": f"Bearer {api_key}"},
)
data = response.json()
# Extract all views from all tables
all_views = []
for table in data.get("tables", []):
table_id = table.get("id")
for view in table.get("views", []):
view_with_table = {**view, "tableId": table_id}
all_views.append(view_with_table)
yield "views", all_views
class AirtableGetViewBlock(Block):
"""
Gets detailed information about a specific view in an Airtable base.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
view_id: str = SchemaField(description="The view ID to retrieve")
class Output(BlockSchema):
view: dict = SchemaField(description="Full view object with configuration")
def __init__(self):
super().__init__(
id="ad0dd9f3-b3f4-446b-8142-e81a566797c4",
description="Get details of a specific Airtable view",
categories={BlockCategory.SEARCH},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Get specific view
response = await Requests().get(
f"https://api.airtable.com/v0/meta/bases/{input_data.base_id}/views/{input_data.view_id}",
headers={"Authorization": f"Bearer {api_key}"},
)
view_data = response.json()
yield "view", view_data

View File

@@ -1,395 +0,0 @@
"""
Airtable record operation blocks.
"""
from typing import Optional
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import airtable
class AirtableListRecordsBlock(Block):
"""
Lists records from an Airtable table with optional filtering, sorting, and pagination.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id_or_name: str = SchemaField(description="Table ID or name", default="")
filter_formula: str = SchemaField(
description="Airtable formula to filter records", default=""
)
view: str = SchemaField(description="View ID or name to use", default="")
sort: list[dict] = SchemaField(
description="Sort configuration (array of {field, direction})", default=[]
)
max_records: int = SchemaField(
description="Maximum number of records to return", default=100
)
page_size: int = SchemaField(
description="Number of records per page (max 100)", default=100
)
offset: str = SchemaField(
description="Pagination offset from previous request", default=""
)
return_fields: list[str] = SchemaField(
description="Specific fields to return (comma-separated)", default=[]
)
class Output(BlockSchema):
records: list[dict] = SchemaField(description="Array of record objects")
offset: Optional[str] = SchemaField(
description="Offset for next page (null if no more records)", default=None
)
def __init__(self):
super().__init__(
id="588a9fde-5733-4da7-b03c-35f5671e960f",
description="List records from an Airtable table",
categories={BlockCategory.SEARCH},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params = {}
if input_data.filter_formula:
params["filterByFormula"] = input_data.filter_formula
if input_data.view:
params["view"] = input_data.view
if input_data.sort:
for i, sort_config in enumerate(input_data.sort):
params[f"sort[{i}][field]"] = sort_config.get("field", "")
params[f"sort[{i}][direction]"] = sort_config.get("direction", "asc")
if input_data.max_records:
params["maxRecords"] = input_data.max_records
if input_data.page_size:
params["pageSize"] = min(input_data.page_size, 100)
if input_data.offset:
params["offset"] = input_data.offset
if input_data.return_fields:
for i, field in enumerate(input_data.return_fields):
params[f"fields[{i}]"] = field
# Make request
response = await Requests().get(
f"https://api.airtable.com/v0/{input_data.base_id}/{input_data.table_id_or_name}",
headers={"Authorization": f"Bearer {api_key}"},
params=params,
)
data = response.json()
yield "records", data.get("records", [])
yield "offset", data.get("offset", None)
class AirtableGetRecordBlock(Block):
"""
Retrieves a single record from an Airtable table by its ID.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id_or_name: str = SchemaField(description="Table ID or name", default="")
record_id: str = SchemaField(description="The record ID to retrieve")
return_fields: list[str] = SchemaField(
description="Specific fields to return", default=[]
)
class Output(BlockSchema):
record: dict = SchemaField(description="The record object")
def __init__(self):
super().__init__(
id="c29c5cbf-0aff-40f9-bbb5-f26061792d2b",
description="Get a single record from Airtable",
categories={BlockCategory.SEARCH},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params = {}
if input_data.return_fields:
for i, field in enumerate(input_data.return_fields):
params[f"fields[{i}]"] = field
# Make request
response = await Requests().get(
f"https://api.airtable.com/v0/{input_data.base_id}/{input_data.table_id_or_name}/{input_data.record_id}",
headers={"Authorization": f"Bearer {api_key}"},
params=params,
)
record = response.json()
yield "record", record
class AirtableCreateRecordsBlock(Block):
"""
Creates one or more records in an Airtable table.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id_or_name: str = SchemaField(description="Table ID or name", default="")
records: list[dict] = SchemaField(
description="Array of records to create (each with 'fields' object)"
)
typecast: bool = SchemaField(
description="Automatically convert string values to appropriate types",
default=False,
)
return_fields: list[str] = SchemaField(
description="Specific fields to return in created records", default=[]
)
class Output(BlockSchema):
records: list[dict] = SchemaField(description="Array of created record objects")
def __init__(self):
super().__init__(
id="42527e98-47b6-44ce-ac0e-86b4883721d3",
description="Create records in an Airtable table",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build request body
body = {"records": input_data.records, "typecast": input_data.typecast}
# Build query parameters for return fields
params = {}
if input_data.return_fields:
for i, field in enumerate(input_data.return_fields):
params[f"fields[{i}]"] = field
# Make request
response = await Requests().post(
f"https://api.airtable.com/v0/{input_data.base_id}/{input_data.table_id_or_name}",
headers={"Authorization": f"Bearer {api_key}"},
json=body,
params=params,
)
data = response.json()
yield "records", data.get("records", [])
class AirtableUpdateRecordsBlock(Block):
"""
Updates one or more existing records in an Airtable table.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id_or_name: str = SchemaField(description="Table ID or name", default="")
records: list[dict] = SchemaField(
description="Array of records to update (each with 'id' and 'fields')"
)
typecast: bool = SchemaField(
description="Automatically convert string values to appropriate types",
default=False,
)
return_fields: list[str] = SchemaField(
description="Specific fields to return in updated records", default=[]
)
class Output(BlockSchema):
records: list[dict] = SchemaField(description="Array of updated record objects")
def __init__(self):
super().__init__(
id="6e7d2590-ac2b-4b5d-b08c-fc039cd77e1f",
description="Update records in an Airtable table",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build request body
body = {"records": input_data.records, "typecast": input_data.typecast}
# Build query parameters for return fields
params = {}
if input_data.return_fields:
for i, field in enumerate(input_data.return_fields):
params[f"fields[{i}]"] = field
# Make request
response = await Requests().patch(
f"https://api.airtable.com/v0/{input_data.base_id}/{input_data.table_id_or_name}",
headers={"Authorization": f"Bearer {api_key}"},
json=body,
params=params,
)
data = response.json()
yield "records", data.get("records", [])
class AirtableUpsertRecordsBlock(Block):
"""
Creates or updates records in an Airtable table based on a merge field.
If a record with the same value in the merge field exists, it will be updated.
Otherwise, a new record will be created.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id_or_name: str = SchemaField(description="Table ID or name", default="")
records: list[dict] = SchemaField(
description="Array of records to upsert (each with 'fields' object)"
)
merge_field: str = SchemaField(
description="Field to use for matching existing records"
)
typecast: bool = SchemaField(
description="Automatically convert string values to appropriate types",
default=False,
)
return_fields: list[str] = SchemaField(
description="Specific fields to return in upserted records", default=[]
)
class Output(BlockSchema):
records: list[dict] = SchemaField(
description="Array of created/updated record objects"
)
def __init__(self):
super().__init__(
id="99f78a9d-3418-429f-a6fb-9d2166638e99",
description="Create or update records based on a merge field",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build request body
body = {
"performUpsert": {"fieldsToMergeOn": [input_data.merge_field]},
"records": input_data.records,
"typecast": input_data.typecast,
}
# Build query parameters for return fields
params = {}
if input_data.return_fields:
for i, field in enumerate(input_data.return_fields):
params[f"fields[{i}]"] = field
# Make request
response = await Requests().post(
f"https://api.airtable.com/v0/{input_data.base_id}/{input_data.table_id_or_name}",
headers={"Authorization": f"Bearer {api_key}"},
json=body,
params=params,
)
data = response.json()
yield "records", data.get("records", [])
class AirtableDeleteRecordsBlock(Block):
"""
Deletes one or more records from an Airtable table.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id_or_name: str = SchemaField(description="Table ID or name", default="")
record_ids: list[str] = SchemaField(description="Array of record IDs to delete")
class Output(BlockSchema):
records: list[dict] = SchemaField(description="Array of deletion results")
def __init__(self):
super().__init__(
id="93e22b8b-3642-4477-aefb-1c0929a4a3a6",
description="Delete records from an Airtable table",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params = {}
for i, record_id in enumerate(input_data.record_ids):
params[f"records[{i}]"] = record_id
# Make request
response = await Requests().delete(
f"https://api.airtable.com/v0/{input_data.base_id}/{input_data.table_id_or_name}",
headers={"Authorization": f"Bearer {api_key}"},
params=params,
)
data = response.json()
yield "records", data.get("records", [])

View File

@@ -1,328 +0,0 @@
"""
Airtable schema and table management blocks.
"""
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import airtable
class AirtableListSchemaBlock(Block):
"""
Retrieves the complete schema of an Airtable base, including all tables,
fields, and views.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
class Output(BlockSchema):
base_schema: dict = SchemaField(
description="Complete base schema with tables, fields, and views"
)
tables: list[dict] = SchemaField(description="Array of table objects")
def __init__(self):
super().__init__(
id="64291d3c-99b5-47b7-a976-6d94293cdb2d",
description="Get the complete schema of an Airtable base",
categories={BlockCategory.SEARCH},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Get base schema
response = await Requests().get(
f"https://api.airtable.com/v0/meta/bases/{input_data.base_id}/tables",
headers={"Authorization": f"Bearer {api_key}"},
)
data = response.json()
yield "base_schema", data
yield "tables", data.get("tables", [])
class AirtableCreateTableBlock(Block):
"""
Creates a new table in an Airtable base with specified fields and views.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_definition: dict = SchemaField(
description="Table definition with name, description, fields, and views",
default={
"name": "New Table",
"fields": [{"name": "Name", "type": "singleLineText"}],
},
)
class Output(BlockSchema):
table: dict = SchemaField(description="Created table object")
table_id: str = SchemaField(description="ID of the created table")
def __init__(self):
super().__init__(
id="fcc20ced-d817-42ea-9b40-c35e7bf34b4f",
description="Create a new table in an Airtable base",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Create table
response = await Requests().post(
f"https://api.airtable.com/v0/meta/bases/{input_data.base_id}/tables",
headers={"Authorization": f"Bearer {api_key}"},
json=input_data.table_definition,
)
table_data = response.json()
yield "table", table_data
yield "table_id", table_data.get("id", "")
class AirtableUpdateTableBlock(Block):
"""
Updates an existing table's properties such as name or description.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id: str = SchemaField(description="The table ID to update")
patch: dict = SchemaField(
description="Properties to update (name, description)", default={}
)
class Output(BlockSchema):
table: dict = SchemaField(description="Updated table object")
def __init__(self):
super().__init__(
id="34077c5f-f962-49f2-9ec6-97c67077013a",
description="Update table properties",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Update table
response = await Requests().patch(
f"https://api.airtable.com/v0/meta/bases/{input_data.base_id}/tables/{input_data.table_id}",
headers={"Authorization": f"Bearer {api_key}"},
json=input_data.patch,
)
table_data = response.json()
yield "table", table_data
class AirtableDeleteTableBlock(Block):
"""
Deletes a table from an Airtable base.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id: str = SchemaField(description="The table ID to delete")
class Output(BlockSchema):
deleted: bool = SchemaField(
description="Confirmation that the table was deleted"
)
def __init__(self):
super().__init__(
id="6b96c196-d0ad-4fb2-981f-7a330549bc22",
description="Delete a table from an Airtable base",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Delete table
response = await Requests().delete(
f"https://api.airtable.com/v0/meta/bases/{input_data.base_id}/tables/{input_data.table_id}",
headers={"Authorization": f"Bearer {api_key}"},
)
deleted = response.status in [200, 204]
yield "deleted", deleted
class AirtableAddFieldBlock(Block):
"""
Adds a new field (column) to an existing Airtable table.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id: str = SchemaField(description="The table ID to add field to")
field_definition: dict = SchemaField(
description="Field definition with name, type, and options",
default={"name": "New Field", "type": "singleLineText"},
)
class Output(BlockSchema):
field: dict = SchemaField(description="Created field object")
field_id: str = SchemaField(description="ID of the created field")
def __init__(self):
super().__init__(
id="6c98a32f-dbf9-45d8-a2a8-5e97e8326351",
description="Add a new field to an Airtable table",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Add field
response = await Requests().post(
f"https://api.airtable.com/v0/meta/bases/{input_data.base_id}/tables/{input_data.table_id}/fields",
headers={"Authorization": f"Bearer {api_key}"},
json=input_data.field_definition,
)
field_data = response.json()
yield "field", field_data
yield "field_id", field_data.get("id", "")
class AirtableUpdateFieldBlock(Block):
"""
Updates an existing field's properties in an Airtable table.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id: str = SchemaField(description="The table ID containing the field")
field_id: str = SchemaField(description="The field ID to update")
patch: dict = SchemaField(description="Field properties to update", default={})
class Output(BlockSchema):
field: dict = SchemaField(description="Updated field object")
def __init__(self):
super().__init__(
id="f46ac716-3b18-4da1-92e4-34ca9a464d48",
description="Update field properties in an Airtable table",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Update field
response = await Requests().patch(
f"https://api.airtable.com/v0/meta/bases/{input_data.base_id}/tables/{input_data.table_id}/fields/{input_data.field_id}",
headers={"Authorization": f"Bearer {api_key}"},
json=input_data.patch,
)
field_data = response.json()
yield "field", field_data
class AirtableDeleteFieldBlock(Block):
"""
Deletes a field from an Airtable table.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID", default="")
table_id: str = SchemaField(description="The table ID containing the field")
field_id: str = SchemaField(description="The field ID to delete")
class Output(BlockSchema):
deleted: bool = SchemaField(
description="Confirmation that the field was deleted"
)
def __init__(self):
super().__init__(
id="ca6ebacb-be8b-4c54-80a3-1fb519ad51c6",
description="Delete a field from an Airtable table",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Delete field
response = await Requests().delete(
f"https://api.airtable.com/v0/meta/bases/{input_data.base_id}/tables/{input_data.table_id}/fields/{input_data.field_id}",
headers={"Authorization": f"Bearer {api_key}"},
)
deleted = response.status in [200, 204]
yield "deleted", deleted

View File

@@ -1,149 +0,0 @@
"""
Airtable webhook trigger blocks.
"""
from backend.sdk import (
Block,
BlockCategory,
BlockOutput,
BlockSchema,
BlockType,
BlockWebhookConfig,
CredentialsMetaInput,
ProviderName,
SchemaField,
)
from ._config import airtable
class AirtableWebhookTriggerBlock(Block):
"""
Starts a flow whenever Airtable pings your webhook URL.
If auto-fetch is enabled, it automatically fetches the full payloads
after receiving the notification.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
webhook_url: str = SchemaField(
description="URL to receive webhooks (auto-generated)",
default="",
hidden=True,
)
base_id: str = SchemaField(
description="The Airtable base ID to monitor",
default="",
)
table_id_or_name: str = SchemaField(
description="Table ID or name to monitor (leave empty for all tables)",
default="",
)
event_types: list[str] = SchemaField(
description="Event types to listen for",
default=["tableData", "tableFields", "tableMetadata"],
)
auto_fetch: bool = SchemaField(
description="Automatically fetch full payloads after notification",
default=True,
)
payload: dict = SchemaField(
description="Webhook payload data",
default={},
hidden=True,
)
class Output(BlockSchema):
ping: dict = SchemaField(description="Raw webhook notification body")
headers: dict = SchemaField(description="Webhook request headers")
verified: bool = SchemaField(
description="Whether the webhook signature was verified"
)
# Fields populated when auto_fetch is True
payloads: list[dict] = SchemaField(
description="Array of change payloads (when auto-fetch is enabled)",
default=[],
)
next_cursor: int = SchemaField(
description="Next cursor for pagination (when auto-fetch is enabled)",
default=0,
)
might_have_more: bool = SchemaField(
description="Whether there might be more payloads (when auto-fetch is enabled)",
default=False,
)
def __init__(self):
super().__init__(
id="d0180ce6-ccb9-48c7-8256-b39e93e62801",
description="Starts a flow whenever Airtable pings your webhook URL",
categories={BlockCategory.INPUT},
input_schema=self.Input,
output_schema=self.Output,
block_type=BlockType.WEBHOOK,
webhook_config=BlockWebhookConfig(
provider=ProviderName("airtable"),
webhook_type="table_change",
# event_filter_input="event_types",
resource_format="{base_id}/{table_id_or_name}",
),
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
payload = input_data.payload
# Extract headers from the webhook request (passed through kwargs)
headers = kwargs.get("webhook_headers", {})
# Check if signature was verified (handled by webhook manager)
verified = True # Webhook manager raises error if verification fails
# Output basic webhook data
yield "ping", payload
yield "headers", headers
yield "verified", verified
# If auto-fetch is enabled and we have a cursor, fetch the full payloads
if input_data.auto_fetch and payload.get("base", {}).get("id"):
base_id = payload["base"]["id"]
webhook_id = payload.get("webhook", {}).get("id", "")
cursor = payload.get("cursor", 1)
if webhook_id and cursor:
# Get credentials from kwargs
credentials = kwargs.get("credentials")
if credentials:
# Fetch payloads using the Airtable API
api_key = credentials.api_key.get_secret_value()
from backend.sdk import Requests
response = await Requests().get(
f"https://api.airtable.com/v0/bases/{base_id}/webhooks/{webhook_id}/payloads",
headers={"Authorization": f"Bearer {api_key}"},
params={"cursor": cursor},
)
if response.status == 200:
data = response.json()
yield "payloads", data.get("payloads", [])
yield "next_cursor", data.get("cursor", cursor)
yield "might_have_more", data.get("mightHaveMore", False)
else:
# On error, still output empty payloads
yield "payloads", []
yield "next_cursor", cursor
yield "might_have_more", False
else:
# No credentials, can't fetch
yield "payloads", []
yield "next_cursor", cursor
yield "might_have_more", False
else:
# Auto-fetch disabled or missing data
yield "payloads", []
yield "next_cursor", 0
yield "might_have_more", False

View File

@@ -1,229 +0,0 @@
"""
Airtable webhook management blocks.
"""
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import airtable
class AirtableFetchWebhookPayloadsBlock(Block):
"""
Fetches accumulated event payloads for a webhook.
Use this to pull the full change details after receiving a webhook notification,
or run on a schedule to poll for changes.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID")
webhook_id: str = SchemaField(
description="The webhook ID to fetch payloads for"
)
cursor: int = SchemaField(
description="Cursor position (0 = all payloads)", default=0
)
class Output(BlockSchema):
payloads: list[dict] = SchemaField(description="Array of webhook payloads")
next_cursor: int = SchemaField(description="Next cursor for pagination")
might_have_more: bool = SchemaField(
description="Whether there might be more payloads"
)
def __init__(self):
super().__init__(
id="7172db38-e338-4561-836f-9fa282c99949",
description="Fetch webhook payloads from Airtable",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Fetch payloads from Airtable
params = {}
if input_data.cursor > 0:
params["cursor"] = input_data.cursor
response = await Requests().get(
f"https://api.airtable.com/v0/bases/{input_data.base_id}/webhooks/{input_data.webhook_id}/payloads",
headers={"Authorization": f"Bearer {api_key}"},
params=params,
)
data = response.json()
yield "payloads", data.get("payloads", [])
yield "next_cursor", data.get("cursor", input_data.cursor)
yield "might_have_more", data.get("mightHaveMore", False)
class AirtableRefreshWebhookBlock(Block):
"""
Refreshes a webhook to extend its expiration by another 7 days.
Webhooks expire after 7 days of inactivity. Use this block in a daily
cron job to keep long-lived webhooks active.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID")
webhook_id: str = SchemaField(description="The webhook ID to refresh")
class Output(BlockSchema):
expiration_time: str = SchemaField(
description="New expiration time (ISO format)"
)
webhook: dict = SchemaField(description="Full webhook object")
def __init__(self):
super().__init__(
id="5e82d957-02b8-47eb-8974-7bdaf8caff78",
description="Refresh a webhook to extend its expiration",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Refresh the webhook
response = await Requests().post(
f"https://api.airtable.com/v0/bases/{input_data.base_id}/webhooks/{input_data.webhook_id}/refresh",
headers={"Authorization": f"Bearer {api_key}"},
)
webhook_data = response.json()
yield "expiration_time", webhook_data.get("expirationTime", "")
yield "webhook", webhook_data
class AirtableCreateWebhookBlock(Block):
"""
Creates a new webhook for monitoring changes in an Airtable base.
The webhook will send notifications to the specified URL when changes occur.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID to monitor")
notification_url: str = SchemaField(
description="URL to receive webhook notifications"
)
specification: dict = SchemaField(
description="Webhook specification (filters, options)",
default={
"filters": {"dataTypes": ["tableData", "tableFields", "tableMetadata"]}
},
)
class Output(BlockSchema):
webhook: dict = SchemaField(description="Created webhook object")
webhook_id: str = SchemaField(description="ID of the created webhook")
mac_secret: str = SchemaField(
description="MAC secret for signature verification"
)
expiration_time: str = SchemaField(description="Webhook expiration time")
def __init__(self):
super().__init__(
id="b9f1f4ec-f4d1-4fbd-ab0b-b219c0e4da9a",
description="Create a new Airtable webhook",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Create the webhook
response = await Requests().post(
f"https://api.airtable.com/v0/bases/{input_data.base_id}/webhooks",
headers={"Authorization": f"Bearer {api_key}"},
json={
"notificationUrl": input_data.notification_url,
"specification": input_data.specification,
},
)
webhook_data = response.json()
yield "webhook", webhook_data
yield "webhook_id", webhook_data.get("id", "")
yield "mac_secret", webhook_data.get("macSecretBase64", "")
yield "expiration_time", webhook_data.get("expirationTime", "")
class AirtableDeleteWebhookBlock(Block):
"""
Deletes a webhook from an Airtable base.
This will stop all notifications from the webhook.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = airtable.credentials_field(
description="Airtable API credentials"
)
base_id: str = SchemaField(description="The Airtable base ID")
webhook_id: str = SchemaField(description="The webhook ID to delete")
class Output(BlockSchema):
deleted: bool = SchemaField(
description="Whether the webhook was successfully deleted"
)
def __init__(self):
super().__init__(
id="e4ded448-1515-4fe2-b93e-3e4db527df83",
description="Delete an Airtable webhook",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Delete the webhook
response = await Requests().delete(
f"https://api.airtable.com/v0/bases/{input_data.base_id}/webhooks/{input_data.webhook_id}",
headers={"Authorization": f"Bearer {api_key}"},
)
# Check if deletion was successful
deleted = response.status in [200, 204]
yield "deleted", deleted

View File

@@ -1,131 +0,0 @@
import logging
from typing import List
from backend.blocks.apollo._auth import ApolloCredentials
from backend.blocks.apollo.models import (
Contact,
EnrichPersonRequest,
Organization,
SearchOrganizationsRequest,
SearchOrganizationsResponse,
SearchPeopleRequest,
SearchPeopleResponse,
)
from backend.util.request import Requests
logger = logging.getLogger(name=__name__)
class ApolloClient:
"""Client for the Apollo API"""
API_URL = "https://api.apollo.io/api/v1"
def __init__(self, credentials: ApolloCredentials):
self.credentials = credentials
self.requests = Requests()
def _get_headers(self) -> dict[str, str]:
return {"x-api-key": self.credentials.api_key.get_secret_value()}
async def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
"""Search for people in Apollo"""
response = await self.requests.post(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
json=query.model_dump(exclude={"max_results"}),
)
data = response.json()
parsed_response = SearchPeopleResponse(**data)
if parsed_response.pagination.total_entries == 0:
return []
people = parsed_response.people
# handle pagination
if (
query.max_results is not None
and query.max_results < parsed_response.pagination.total_entries
and len(people) < query.max_results
):
while (
len(people) < query.max_results
and query.page < parsed_response.pagination.total_pages
and len(parsed_response.people) > 0
):
query.page += 1
response = await self.requests.post(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
json=query.model_dump(exclude={"max_results"}),
)
data = response.json()
parsed_response = SearchPeopleResponse(**data)
people.extend(parsed_response.people[: query.max_results - len(people)])
logger.info(f"Found {len(people)} people")
return people[: query.max_results] if query.max_results else people
async def search_organizations(
self, query: SearchOrganizationsRequest
) -> List[Organization]:
"""Search for organizations in Apollo"""
response = await self.requests.post(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
json=query.model_dump(exclude={"max_results"}),
)
data = response.json()
parsed_response = SearchOrganizationsResponse(**data)
if parsed_response.pagination.total_entries == 0:
return []
organizations = parsed_response.organizations
# handle pagination
if (
query.max_results is not None
and query.max_results < parsed_response.pagination.total_entries
and len(organizations) < query.max_results
):
while (
len(organizations) < query.max_results
and query.page < parsed_response.pagination.total_pages
and len(parsed_response.organizations) > 0
):
query.page += 1
response = await self.requests.post(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
json=query.model_dump(exclude={"max_results"}),
)
data = response.json()
parsed_response = SearchOrganizationsResponse(**data)
organizations.extend(
parsed_response.organizations[
: query.max_results - len(organizations)
]
)
logger.info(f"Found {len(organizations)} organizations")
return (
organizations[: query.max_results] if query.max_results else organizations
)
async def enrich_person(self, query: EnrichPersonRequest) -> Contact:
"""Enrich a person's data including email & phone reveal"""
response = await self.requests.post(
f"{self.API_URL}/people/match",
headers=self._get_headers(),
json=query.model_dump(),
params={
"reveal_personal_emails": "true",
},
)
data = response.json()
if "person" not in data:
raise ValueError(f"Person not found or enrichment failed: {data}")
contact = Contact(**data["person"])
contact.email = contact.email or "-"
return contact

View File

@@ -1,607 +0,0 @@
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel as OriginalBaseModel
from pydantic import ConfigDict
from backend.data.model import SchemaField
class BaseModel(OriginalBaseModel):
def model_dump(self, *args, exclude: set[str] | None = None, **kwargs):
if exclude is None:
exclude = set("credentials")
else:
exclude.add("credentials")
kwargs.setdefault("exclude_none", True)
kwargs.setdefault("exclude_unset", True)
kwargs.setdefault("exclude_defaults", True)
return super().model_dump(*args, exclude=exclude, **kwargs)
class PrimaryPhone(BaseModel):
"""A primary phone in Apollo"""
number: Optional[str] = ""
source: Optional[str] = ""
sanitized_number: Optional[str] = ""
class SenorityLevels(str, Enum):
"""Seniority levels in Apollo"""
OWNER = "owner"
FOUNDER = "founder"
C_SUITE = "c_suite"
PARTNER = "partner"
VP = "vp"
HEAD = "head"
DIRECTOR = "director"
MANAGER = "manager"
SENIOR = "senior"
ENTRY = "entry"
INTERN = "intern"
class ContactEmailStatuses(str, Enum):
"""Contact email statuses in Apollo"""
VERIFIED = "verified"
UNVERIFIED = "unverified"
LIKELY_TO_ENGAGE = "likely_to_engage"
UNAVAILABLE = "unavailable"
class RuleConfigStatus(BaseModel):
"""A rule config status in Apollo"""
_id: Optional[str] = ""
created_at: Optional[str] = ""
rule_action_config_id: Optional[str] = ""
rule_config_id: Optional[str] = ""
status_cd: Optional[str] = ""
updated_at: Optional[str] = ""
id: Optional[str] = ""
key: Optional[str] = ""
class ContactCampaignStatus(BaseModel):
"""A contact campaign status in Apollo"""
id: Optional[str] = ""
emailer_campaign_id: Optional[str] = ""
send_email_from_user_id: Optional[str] = ""
inactive_reason: Optional[str] = ""
status: Optional[str] = ""
added_at: Optional[str] = ""
added_by_user_id: Optional[str] = ""
finished_at: Optional[str] = ""
paused_at: Optional[str] = ""
auto_unpause_at: Optional[str] = ""
send_email_from_email_address: Optional[str] = ""
send_email_from_email_account_id: Optional[str] = ""
manually_set_unpause: Optional[str] = ""
failure_reason: Optional[str] = ""
current_step_id: Optional[str] = ""
in_response_to_emailer_message_id: Optional[str] = ""
cc_emails: Optional[str] = ""
bcc_emails: Optional[str] = ""
to_emails: Optional[str] = ""
class Account(BaseModel):
"""An account in Apollo"""
id: Optional[str] = ""
name: Optional[str] = ""
website_url: Optional[str] = ""
blog_url: Optional[str] = ""
angellist_url: Optional[str] = ""
linkedin_url: Optional[str] = ""
twitter_url: Optional[str] = ""
facebook_url: Optional[str] = ""
primary_phone: Optional[PrimaryPhone] = PrimaryPhone()
languages: Optional[list[str]] = []
alexa_ranking: Optional[int] = 0
phone: Optional[str] = ""
linkedin_uid: Optional[str] = ""
founded_year: Optional[int] = 0
publicly_traded_symbol: Optional[str] = ""
publicly_traded_exchange: Optional[str] = ""
logo_url: Optional[str] = ""
chrunchbase_url: Optional[str] = ""
primary_domain: Optional[str] = ""
domain: Optional[str] = ""
team_id: Optional[str] = ""
organization_id: Optional[str] = ""
account_stage_id: Optional[str] = ""
source: Optional[str] = ""
original_source: Optional[str] = ""
creator_id: Optional[str] = ""
owner_id: Optional[str] = ""
created_at: Optional[str] = ""
phone_status: Optional[str] = ""
hubspot_id: Optional[str] = ""
salesforce_id: Optional[str] = ""
crm_owner_id: Optional[str] = ""
parent_account_id: Optional[str] = ""
sanitized_phone: Optional[str] = ""
# no listed type on the API docs
account_playbook_statues: Optional[list[Any]] = []
account_rule_config_statuses: Optional[list[RuleConfigStatus]] = []
existence_level: Optional[str] = ""
label_ids: Optional[list[str]] = []
typed_custom_fields: Optional[Any] = {}
custom_field_errors: Optional[Any] = {}
modality: Optional[str] = ""
source_display_name: Optional[str] = ""
salesforce_record_id: Optional[str] = ""
crm_record_url: Optional[str] = ""
class ContactEmail(BaseModel):
"""A contact email in Apollo"""
email: Optional[str] = ""
email_md5: Optional[str] = ""
email_sha256: Optional[str] = ""
email_status: Optional[str] = ""
email_source: Optional[str] = ""
extrapolated_email_confidence: Optional[str] = ""
position: Optional[int] = 0
email_from_customer: Optional[str] = ""
free_domain: Optional[bool] = True
class EmploymentHistory(BaseModel):
"""An employment history in Apollo"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
_id: Optional[str] = ""
created_at: Optional[str] = ""
current: Optional[bool] = False
degree: Optional[str] = ""
description: Optional[str] = ""
emails: Optional[str] = ""
end_date: Optional[str] = ""
grade_level: Optional[str] = ""
kind: Optional[str] = ""
major: Optional[str] = ""
organization_id: Optional[str] = ""
organization_name: Optional[str] = ""
raw_address: Optional[str] = ""
start_date: Optional[str] = ""
title: Optional[str] = ""
updated_at: Optional[str] = ""
id: Optional[str] = ""
key: Optional[str] = ""
class Breadcrumb(BaseModel):
"""A breadcrumb in Apollo"""
label: Optional[str] = ""
signal_field_name: Optional[str] = ""
value: str | list | None = ""
display_name: Optional[str] = ""
class TypedCustomField(BaseModel):
"""A typed custom field in Apollo"""
id: Optional[str] = ""
value: Optional[str] = ""
class Pagination(BaseModel):
"""Pagination in Apollo"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
page: int = 0
per_page: int = 0
total_entries: int = 0
total_pages: int = 0
class DialerFlags(BaseModel):
"""A dialer flags in Apollo"""
country_name: Optional[str] = ""
country_enabled: Optional[bool] = True
high_risk_calling_enabled: Optional[bool] = True
potential_high_risk_number: Optional[bool] = True
class PhoneNumber(BaseModel):
"""A phone number in Apollo"""
raw_number: Optional[str] = ""
sanitized_number: Optional[str] = ""
type: Optional[str] = ""
position: Optional[int] = 0
status: Optional[str] = ""
dnc_status: Optional[str] = ""
dnc_other_info: Optional[str] = ""
dailer_flags: Optional[DialerFlags] = DialerFlags(
country_name="",
country_enabled=True,
high_risk_calling_enabled=True,
potential_high_risk_number=True,
)
class Organization(BaseModel):
"""An organization in Apollo"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
id: Optional[str] = ""
name: Optional[str] = ""
website_url: Optional[str] = ""
blog_url: Optional[str] = ""
angellist_url: Optional[str] = ""
linkedin_url: Optional[str] = ""
twitter_url: Optional[str] = ""
facebook_url: Optional[str] = ""
primary_phone: Optional[PrimaryPhone] = PrimaryPhone()
languages: Optional[list[str]] = []
alexa_ranking: Optional[int] = 0
phone: Optional[str] = ""
linkedin_uid: Optional[str] = ""
founded_year: Optional[int] = 0
publicly_traded_symbol: Optional[str] = ""
publicly_traded_exchange: Optional[str] = ""
logo_url: Optional[str] = ""
chrunchbase_url: Optional[str] = ""
primary_domain: Optional[str] = ""
sanitized_phone: Optional[str] = ""
owned_by_organization_id: Optional[str] = ""
intent_strength: Optional[str] = ""
show_intent: Optional[bool] = True
has_intent_signal_account: Optional[bool] = True
intent_signal_account: Optional[str] = ""
class Contact(BaseModel):
"""A contact in Apollo"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
contact_roles: Optional[list[Any]] = []
id: Optional[str] = ""
first_name: Optional[str] = ""
last_name: Optional[str] = ""
name: Optional[str] = ""
linkedin_url: Optional[str] = ""
title: Optional[str] = ""
contact_stage_id: Optional[str] = ""
owner_id: Optional[str] = ""
creator_id: Optional[str] = ""
person_id: Optional[str] = ""
email_needs_tickling: Optional[bool] = True
organization_name: Optional[str] = ""
source: Optional[str] = ""
original_source: Optional[str] = ""
organization_id: Optional[str] = ""
headline: Optional[str] = ""
photo_url: Optional[str] = ""
present_raw_address: Optional[str] = ""
linkededin_uid: Optional[str] = ""
extrapolated_email_confidence: Optional[float] = 0.0
salesforce_id: Optional[str] = ""
salesforce_lead_id: Optional[str] = ""
salesforce_contact_id: Optional[str] = ""
saleforce_account_id: Optional[str] = ""
crm_owner_id: Optional[str] = ""
created_at: Optional[str] = ""
emailer_campaign_ids: Optional[list[str]] = []
direct_dial_status: Optional[str] = ""
direct_dial_enrichment_failed_at: Optional[str] = ""
email_status: Optional[str] = ""
email_source: Optional[str] = ""
account_id: Optional[str] = ""
last_activity_date: Optional[str] = ""
hubspot_vid: Optional[str] = ""
hubspot_company_id: Optional[str] = ""
crm_id: Optional[str] = ""
sanitized_phone: Optional[str] = ""
merged_crm_ids: Optional[str] = ""
updated_at: Optional[str] = ""
queued_for_crm_push: Optional[bool] = True
suggested_from_rule_engine_config_id: Optional[str] = ""
email_unsubscribed: Optional[str] = ""
label_ids: Optional[list[Any]] = []
has_pending_email_arcgate_request: Optional[bool] = True
has_email_arcgate_request: Optional[bool] = True
existence_level: Optional[str] = ""
email: Optional[str] = ""
email_from_customer: Optional[str] = ""
typed_custom_fields: Optional[list[TypedCustomField]] = []
custom_field_errors: Optional[Any] = {}
salesforce_record_id: Optional[str] = ""
crm_record_url: Optional[str] = ""
email_status_unavailable_reason: Optional[str] = ""
email_true_status: Optional[str] = ""
updated_email_true_status: Optional[bool] = True
contact_rule_config_statuses: Optional[list[RuleConfigStatus]] = []
source_display_name: Optional[str] = ""
twitter_url: Optional[str] = ""
contact_campaign_statuses: Optional[list[ContactCampaignStatus]] = []
state: Optional[str] = ""
city: Optional[str] = ""
country: Optional[str] = ""
account: Optional[Account] = Account()
contact_emails: Optional[list[ContactEmail]] = []
organization: Optional[Organization] = Organization()
employment_history: Optional[list[EmploymentHistory]] = []
time_zone: Optional[str] = ""
intent_strength: Optional[str] = ""
show_intent: Optional[bool] = True
phone_numbers: Optional[list[PhoneNumber]] = []
account_phone_note: Optional[str] = ""
free_domain: Optional[bool] = True
is_likely_to_engage: Optional[bool] = True
email_domain_catchall: Optional[bool] = True
contact_job_change_event: Optional[str] = ""
class SearchOrganizationsRequest(BaseModel):
"""Request for Apollo's search organizations API"""
organization_num_employees_range: Optional[list[int]] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default=[0, 1000000],
)
organization_locations: Optional[list[str]] = SchemaField(
description="""The location of the company headquarters. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
To exclude companies based on location, use the organization_not_locations parameter.
""",
default_factory=list,
)
organizations_not_locations: Optional[list[str]] = SchemaField(
description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
""",
default_factory=list,
)
q_organization_keyword_tags: Optional[list[str]] = SchemaField(
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""",
default_factory=list,
)
q_organization_name: Optional[str] = SchemaField(
description="""Filter search results to include a specific company name.
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""",
default="",
)
organization_ids: Optional[list[str]] = SchemaField(
description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, identify the values for organization_id when you call this endpoint.""",
default_factory=list,
)
max_results: Optional[int] = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
le=50000,
advanced=True,
)
page: int = SchemaField(
description="""The page number of the Apollo data that you want to retrieve.
Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
default=1,
)
per_page: int = SchemaField(
description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
Use the page parameter to search the different pages of data.""",
default=100,
)
class SearchOrganizationsResponse(BaseModel):
"""Response from Apollo's search organizations API"""
breadcrumbs: Optional[list[Breadcrumb]] = []
partial_results_only: Optional[bool] = True
has_join: Optional[bool] = True
disable_eu_prospecting: Optional[bool] = True
partial_results_limit: Optional[int] = 0
pagination: Pagination = Pagination(
page=0, per_page=0, total_entries=0, total_pages=0
)
# no listed type on the API docs
accounts: list[Any] = []
organizations: list[Organization] = []
models_ids: list[str] = []
num_fetch_result: Optional[str] = ""
derived_params: Optional[str] = ""
class SearchPeopleRequest(BaseModel):
"""Request for Apollo's search people API"""
person_titles: Optional[list[str]] = SchemaField(
description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
""",
default_factory=list,
placeholder="marketing manager",
)
person_locations: Optional[list[str]] = SchemaField(
description="""The location where people live. You can search across cities, US states, and countries.
To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
default_factory=list,
)
person_seniorities: Optional[list[SenorityLevels]] = SchemaField(
description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
default_factory=list,
)
organization_locations: Optional[list[str]] = SchemaField(
description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
To find people based on their personal location, use the person_locations parameter.""",
default_factory=list,
)
q_organization_domains: Optional[list[str]] = SchemaField(
description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
You can add multiple domains to search across companies.
Examples: apollo.io and microsoft.com""",
default_factory=list,
)
contact_email_statuses: Optional[list[ContactEmailStatuses]] = SchemaField(
description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
default_factory=list,
)
organization_ids: Optional[list[str]] = SchemaField(
description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
default_factory=list,
)
organization_num_employees_range: Optional[list[int]] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default_factory=list,
)
q_keywords: Optional[str] = SchemaField(
description="""A string of words over which we want to filter the results""",
default="",
)
page: int = SchemaField(
description="""The page number of the Apollo data that you want to retrieve.
Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
default=1,
)
per_page: int = SchemaField(
description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
Use the page parameter to search the different pages of data.""",
default=100,
)
max_results: Optional[int] = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
le=50000,
advanced=True,
)
class SearchPeopleResponse(BaseModel):
"""Response from Apollo's search people API"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
breadcrumbs: Optional[list[Breadcrumb]] = []
partial_results_only: Optional[bool] = True
has_join: Optional[bool] = True
disable_eu_prospecting: Optional[bool] = True
partial_results_limit: Optional[int] = 0
pagination: Pagination = Pagination(
page=0, per_page=0, total_entries=0, total_pages=0
)
contacts: list[Contact] = []
people: list[Contact] = []
model_ids: list[str] = []
num_fetch_result: Optional[str] = ""
derived_params: Optional[str] = ""
class EnrichPersonRequest(BaseModel):
"""Request for Apollo's person enrichment API"""
person_id: Optional[str] = SchemaField(
description="Apollo person ID to enrich (most accurate method)",
default="",
)
first_name: Optional[str] = SchemaField(
description="First name of the person to enrich",
default="",
)
last_name: Optional[str] = SchemaField(
description="Last name of the person to enrich",
default="",
)
name: Optional[str] = SchemaField(
description="Full name of the person to enrich",
default="",
)
email: Optional[str] = SchemaField(
description="Email address of the person to enrich",
default="",
)
domain: Optional[str] = SchemaField(
description="Company domain of the person to enrich",
default="",
)
company: Optional[str] = SchemaField(
description="Company name of the person to enrich",
default="",
)
linkedin_url: Optional[str] = SchemaField(
description="LinkedIn URL of the person to enrich",
default="",
)
organization_id: Optional[str] = SchemaField(
description="Apollo organization ID of the person's company",
default="",
)
title: Optional[str] = SchemaField(
description="Job title of the person to enrich",
default="",
)

View File

@@ -1,217 +0,0 @@
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
ApolloCredentials,
ApolloCredentialsInput,
)
from backend.blocks.apollo.models import (
Organization,
PrimaryPhone,
SearchOrganizationsRequest,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, SchemaField
class SearchOrganizationsBlock(Block):
"""Search for organizations in Apollo"""
class Input(BlockSchema):
organization_num_employees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default=[0, 1000000],
)
organization_locations: list[str] = SchemaField(
description="""The location of the company headquarters. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
To exclude companies based on location, use the organization_not_locations parameter.
""",
default_factory=list,
)
organizations_not_locations: list[str] = SchemaField(
description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
""",
default_factory=list,
)
q_organization_keyword_tags: list[str] = SchemaField(
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""",
default_factory=list,
)
q_organization_name: str = SchemaField(
description="""Filter search results to include a specific company name.
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""",
default="",
advanced=False,
)
organization_ids: list[str] = SchemaField(
description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, identify the values for organization_id when you call this endpoint.""",
default_factory=list,
)
max_results: int = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
le=50000,
advanced=True,
)
credentials: ApolloCredentialsInput = CredentialsField(
description="Apollo credentials",
)
class Output(BlockSchema):
organizations: list[Organization] = SchemaField(
description="List of organizations found",
default_factory=list,
)
organization: Organization = SchemaField(
description="Each found organization, one at a time",
)
error: str = SchemaField(
description="Error message if the search failed",
default="",
)
def __init__(self):
super().__init__(
id="3d71270d-599e-4148-9b95-71b35d2f44f0",
description="Search for organizations in Apollo",
categories={BlockCategory.SEARCH},
input_schema=SearchOrganizationsBlock.Input,
output_schema=SearchOrganizationsBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={"query": "Google", "credentials": TEST_CREDENTIALS_INPUT},
test_output=[
(
"organization",
Organization(
id="1",
name="Google",
website_url="https://google.com",
blog_url="https://google.com/blog",
angellist_url="https://angel.co/google",
linkedin_url="https://linkedin.com/company/google",
twitter_url="https://twitter.com/google",
facebook_url="https://facebook.com/google",
primary_phone=PrimaryPhone(
source="google",
number="1234567890",
sanitized_number="1234567890",
),
languages=["en"],
alexa_ranking=1000,
phone="1234567890",
linkedin_uid="1234567890",
founded_year=2000,
publicly_traded_symbol="GOOGL",
publicly_traded_exchange="NASDAQ",
logo_url="https://google.com/logo.png",
chrunchbase_url="https://chrunchbase.com/google",
primary_domain="google.com",
sanitized_phone="1234567890",
owned_by_organization_id="1",
intent_strength="strong",
show_intent=True,
has_intent_signal_account=True,
intent_signal_account="1",
),
),
(
"organizations",
[
Organization(
id="1",
name="Google",
website_url="https://google.com",
blog_url="https://google.com/blog",
angellist_url="https://angel.co/google",
linkedin_url="https://linkedin.com/company/google",
twitter_url="https://twitter.com/google",
facebook_url="https://facebook.com/google",
primary_phone=PrimaryPhone(
source="google",
number="1234567890",
sanitized_number="1234567890",
),
languages=["en"],
alexa_ranking=1000,
phone="1234567890",
linkedin_uid="1234567890",
founded_year=2000,
publicly_traded_symbol="GOOGL",
publicly_traded_exchange="NASDAQ",
logo_url="https://google.com/logo.png",
chrunchbase_url="https://chrunchbase.com/google",
primary_domain="google.com",
sanitized_phone="1234567890",
owned_by_organization_id="1",
intent_strength="strong",
show_intent=True,
has_intent_signal_account=True,
intent_signal_account="1",
),
],
),
],
test_mock={
"search_organizations": lambda *args, **kwargs: [
Organization(
id="1",
name="Google",
website_url="https://google.com",
blog_url="https://google.com/blog",
angellist_url="https://angel.co/google",
linkedin_url="https://linkedin.com/company/google",
twitter_url="https://twitter.com/google",
facebook_url="https://facebook.com/google",
primary_phone=PrimaryPhone(
source="google",
number="1234567890",
sanitized_number="1234567890",
),
languages=["en"],
alexa_ranking=1000,
phone="1234567890",
linkedin_uid="1234567890",
founded_year=2000,
publicly_traded_symbol="GOOGL",
publicly_traded_exchange="NASDAQ",
logo_url="https://google.com/logo.png",
chrunchbase_url="https://chrunchbase.com/google",
primary_domain="google.com",
sanitized_phone="1234567890",
owned_by_organization_id="1",
intent_strength="strong",
show_intent=True,
has_intent_signal_account=True,
intent_signal_account="1",
)
]
},
)
@staticmethod
async def search_organizations(
query: SearchOrganizationsRequest, credentials: ApolloCredentials
) -> list[Organization]:
client = ApolloClient(credentials)
return await client.search_organizations(query)
async def run(
self, input_data: Input, *, credentials: ApolloCredentials, **kwargs
) -> BlockOutput:
query = SearchOrganizationsRequest(**input_data.model_dump())
organizations = await self.search_organizations(query, credentials)
for organization in organizations:
yield "organization", organization
yield "organizations", organizations

View File

@@ -1,363 +0,0 @@
import asyncio
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
ApolloCredentials,
ApolloCredentialsInput,
)
from backend.blocks.apollo.models import (
Contact,
ContactEmailStatuses,
EnrichPersonRequest,
SearchPeopleRequest,
SenorityLevels,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, SchemaField
class SearchPeopleBlock(Block):
"""Search for people in Apollo"""
class Input(BlockSchema):
person_titles: list[str] = SchemaField(
description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
""",
default_factory=list,
advanced=False,
)
person_locations: list[str] = SchemaField(
description="""The location where people live. You can search across cities, US states, and countries.
To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
default_factory=list,
advanced=False,
)
person_seniorities: list[SenorityLevels] = SchemaField(
description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
default_factory=list,
advanced=False,
)
organization_locations: list[str] = SchemaField(
description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
To find people based on their personal location, use the person_locations parameter.""",
default_factory=list,
advanced=False,
)
q_organization_domains: list[str] = SchemaField(
description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
You can add multiple domains to search across companies.
Examples: apollo.io and microsoft.com""",
default_factory=list,
advanced=False,
)
contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
default_factory=list,
advanced=False,
)
organization_ids: list[str] = SchemaField(
description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
default_factory=list,
advanced=False,
)
organization_num_employees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default_factory=list,
advanced=False,
)
q_keywords: str = SchemaField(
description="""A string of words over which we want to filter the results""",
default="",
advanced=False,
)
max_results: int = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 25. Limited to 500 to prevent overspending.""",
default=25,
ge=1,
le=500,
advanced=True,
)
enrich_info: bool = SchemaField(
description="""Whether to enrich contacts with detailed information including real email addresses. This will double the search cost.""",
default=False,
advanced=True,
)
credentials: ApolloCredentialsInput = CredentialsField(
description="Apollo credentials",
)
class Output(BlockSchema):
people: list[Contact] = SchemaField(
description="List of people found",
default_factory=list,
)
error: str = SchemaField(
description="Error message if the search failed",
default="",
)
def __init__(self):
super().__init__(
id="c2adb3aa-5aae-488d-8a6e-4eb8c23e2ed6",
description="Search for people in Apollo",
categories={BlockCategory.SEARCH},
input_schema=SearchPeopleBlock.Input,
output_schema=SearchPeopleBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={"credentials": TEST_CREDENTIALS_INPUT},
test_output=[
(
"people",
[
Contact(
contact_roles=[],
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
linkedin_url="https://www.linkedin.com/in/johndoe",
title="Software Engineer",
organization_name="Google",
organization_id="123456",
contact_stage_id="1",
owner_id="1",
creator_id="1",
person_id="1",
email_needs_tickling=True,
source="apollo",
original_source="apollo",
headline="Software Engineer",
photo_url="https://www.linkedin.com/in/johndoe",
present_raw_address="123 Main St, Anytown, USA",
linkededin_uid="123456",
extrapolated_email_confidence=0.8,
salesforce_id="123456",
salesforce_lead_id="123456",
salesforce_contact_id="123456",
saleforce_account_id="123456",
crm_owner_id="123456",
created_at="2021-01-01",
emailer_campaign_ids=[],
direct_dial_status="active",
direct_dial_enrichment_failed_at="2021-01-01",
email_status="active",
email_source="apollo",
account_id="123456",
last_activity_date="2021-01-01",
hubspot_vid="123456",
hubspot_company_id="123456",
crm_id="123456",
sanitized_phone="123456",
merged_crm_ids="123456",
updated_at="2021-01-01",
queued_for_crm_push=True,
suggested_from_rule_engine_config_id="123456",
email_unsubscribed=None,
label_ids=[],
has_pending_email_arcgate_request=True,
has_email_arcgate_request=True,
existence_level=None,
email=None,
email_from_customer=None,
typed_custom_fields=[],
custom_field_errors=None,
salesforce_record_id=None,
crm_record_url=None,
email_status_unavailable_reason=None,
email_true_status=None,
updated_email_true_status=True,
contact_rule_config_statuses=[],
source_display_name=None,
twitter_url=None,
contact_campaign_statuses=[],
state=None,
city=None,
country=None,
account=None,
contact_emails=[],
organization=None,
employment_history=[],
time_zone=None,
intent_strength=None,
show_intent=True,
phone_numbers=[],
account_phone_note=None,
free_domain=True,
is_likely_to_engage=True,
email_domain_catchall=True,
contact_job_change_event=None,
),
],
),
],
test_mock={
"search_people": lambda query, credentials: [
Contact(
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
linkedin_url="https://www.linkedin.com/in/johndoe",
title="Software Engineer",
organization_name="Google",
organization_id="123456",
contact_stage_id="1",
owner_id="1",
creator_id="1",
person_id="1",
email_needs_tickling=True,
source="apollo",
original_source="apollo",
headline="Software Engineer",
photo_url="https://www.linkedin.com/in/johndoe",
present_raw_address="123 Main St, Anytown, USA",
linkededin_uid="123456",
extrapolated_email_confidence=0.8,
salesforce_id="123456",
salesforce_lead_id="123456",
salesforce_contact_id="123456",
saleforce_account_id="123456",
crm_owner_id="123456",
created_at="2021-01-01",
emailer_campaign_ids=[],
direct_dial_status="active",
direct_dial_enrichment_failed_at="2021-01-01",
email_status="active",
email_source="apollo",
account_id="123456",
last_activity_date="2021-01-01",
hubspot_vid="123456",
hubspot_company_id="123456",
crm_id="123456",
sanitized_phone="123456",
merged_crm_ids="123456",
updated_at="2021-01-01",
queued_for_crm_push=True,
suggested_from_rule_engine_config_id="123456",
email_unsubscribed=None,
label_ids=[],
has_pending_email_arcgate_request=True,
has_email_arcgate_request=True,
existence_level=None,
email=None,
email_from_customer=None,
typed_custom_fields=[],
custom_field_errors=None,
salesforce_record_id=None,
crm_record_url=None,
email_status_unavailable_reason=None,
email_true_status=None,
updated_email_true_status=True,
contact_rule_config_statuses=[],
source_display_name=None,
twitter_url=None,
contact_campaign_statuses=[],
state=None,
city=None,
country=None,
account=None,
contact_emails=[],
organization=None,
employment_history=[],
time_zone=None,
intent_strength=None,
show_intent=True,
phone_numbers=[],
account_phone_note=None,
free_domain=True,
is_likely_to_engage=True,
email_domain_catchall=True,
contact_job_change_event=None,
),
]
},
)
@staticmethod
async def search_people(
query: SearchPeopleRequest, credentials: ApolloCredentials
) -> list[Contact]:
client = ApolloClient(credentials)
return await client.search_people(query)
@staticmethod
async def enrich_person(
query: EnrichPersonRequest, credentials: ApolloCredentials
) -> Contact:
client = ApolloClient(credentials)
return await client.enrich_person(query)
@staticmethod
def merge_contact_data(original: Contact, enriched: Contact) -> Contact:
"""
Merge contact data from original search with enriched data.
Enriched data complements original data, only filling in missing values.
"""
merged_data = original.model_dump()
enriched_data = enriched.model_dump()
# Only update fields that are None, empty string, empty list, or default values in original
for key, enriched_value in enriched_data.items():
# Skip if enriched value is None, empty string, or empty list
if enriched_value is None or enriched_value == "" or enriched_value == []:
continue
# Update if original value is None, empty string, empty list, or zero
if enriched_value:
merged_data[key] = enriched_value
return Contact(**merged_data)
async def run(
self,
input_data: Input,
*,
credentials: ApolloCredentials,
**kwargs,
) -> BlockOutput:
query = SearchPeopleRequest(**input_data.model_dump())
people = await self.search_people(query, credentials)
# Enrich with detailed info if requested
if input_data.enrich_info:
async def enrich_or_fallback(person: Contact):
try:
enrich_query = EnrichPersonRequest(person_id=person.id)
enriched_person = await self.enrich_person(
enrich_query, credentials
)
# Merge enriched data with original data, complementing instead of replacing
return self.merge_contact_data(person, enriched_person)
except Exception:
return person # If enrichment fails, use original person data
people = await asyncio.gather(
*(enrich_or_fallback(person) for person in people)
)
yield "people", people

View File

@@ -1,138 +0,0 @@
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
ApolloCredentials,
ApolloCredentialsInput,
)
from backend.blocks.apollo.models import Contact, EnrichPersonRequest
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, SchemaField
class GetPersonDetailBlock(Block):
"""Get detailed person data with Apollo API, including email reveal"""
class Input(BlockSchema):
person_id: str = SchemaField(
description="Apollo person ID to enrich (most accurate method)",
default="",
advanced=False,
)
first_name: str = SchemaField(
description="First name of the person to enrich",
default="",
advanced=False,
)
last_name: str = SchemaField(
description="Last name of the person to enrich",
default="",
advanced=False,
)
name: str = SchemaField(
description="Full name of the person to enrich (alternative to first_name + last_name)",
default="",
advanced=False,
)
email: str = SchemaField(
description="Known email address of the person (helps with matching)",
default="",
advanced=False,
)
domain: str = SchemaField(
description="Company domain of the person (e.g., 'google.com')",
default="",
advanced=False,
)
company: str = SchemaField(
description="Company name of the person",
default="",
advanced=False,
)
linkedin_url: str = SchemaField(
description="LinkedIn URL of the person",
default="",
advanced=False,
)
organization_id: str = SchemaField(
description="Apollo organization ID of the person's company",
default="",
advanced=True,
)
title: str = SchemaField(
description="Job title of the person to enrich",
default="",
advanced=True,
)
credentials: ApolloCredentialsInput = CredentialsField(
description="Apollo credentials",
)
class Output(BlockSchema):
contact: Contact = SchemaField(
description="Enriched contact information",
)
error: str = SchemaField(
description="Error message if enrichment failed",
default="",
)
def __init__(self):
super().__init__(
id="3b18d46c-3db6-42ae-a228-0ba441bdd176",
description="Get detailed person data with Apollo API, including email reveal",
categories={BlockCategory.SEARCH},
input_schema=GetPersonDetailBlock.Input,
output_schema=GetPersonDetailBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"first_name": "John",
"last_name": "Doe",
"company": "Google",
},
test_output=[
(
"contact",
Contact(
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
email="john.doe@gmail.com",
title="Software Engineer",
organization_name="Google",
linkedin_url="https://www.linkedin.com/in/johndoe",
),
),
],
test_mock={
"enrich_person": lambda query, credentials: Contact(
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
email="john.doe@gmail.com",
title="Software Engineer",
organization_name="Google",
linkedin_url="https://www.linkedin.com/in/johndoe",
)
},
)
@staticmethod
async def enrich_person(
query: EnrichPersonRequest, credentials: ApolloCredentials
) -> Contact:
client = ApolloClient(credentials)
return await client.enrich_person(query)
async def run(
self,
input_data: Input,
*,
credentials: ApolloCredentials,
**kwargs,
) -> BlockOutput:
query = EnrichPersonRequest(**input_data.model_dump())
yield "contact", await self.enrich_person(query, credentials)

View File

@@ -1,66 +0,0 @@
"""
Meeting BaaS integration for AutoGPT Platform.
This integration provides comprehensive access to the Meeting BaaS API,
including:
- Bot management for meeting recordings
- Calendar integration (Google/Microsoft)
- Event management and scheduling
- Webhook triggers for real-time events
"""
# Bot (Recording) Blocks
from .bots import (
BaasBotDeleteRecordingBlock,
BaasBotFetchMeetingDataBlock,
BaasBotFetchScreenshotsBlock,
BaasBotJoinMeetingBlock,
BaasBotLeaveMeetingBlock,
BaasBotRetranscribeBlock,
)
# Calendar Blocks
from .calendars import (
BaasCalendarConnectBlock,
BaasCalendarDeleteBlock,
BaasCalendarListAllBlock,
BaasCalendarResyncAllBlock,
BaasCalendarUpdateCredsBlock,
)
# Event Blocks
from .events import (
BaasEventGetDetailsBlock,
BaasEventListBlock,
BaasEventPatchBotBlock,
BaasEventScheduleBotBlock,
BaasEventUnscheduleBotBlock,
)
# Webhook Triggers
from .triggers import BaasOnCalendarEventBlock, BaasOnMeetingEventBlock
__all__ = [
# Bot (Recording) Blocks
"BaasBotJoinMeetingBlock",
"BaasBotLeaveMeetingBlock",
"BaasBotFetchMeetingDataBlock",
"BaasBotFetchScreenshotsBlock",
"BaasBotDeleteRecordingBlock",
"BaasBotRetranscribeBlock",
# Calendar Blocks
"BaasCalendarConnectBlock",
"BaasCalendarListAllBlock",
"BaasCalendarUpdateCredsBlock",
"BaasCalendarDeleteBlock",
"BaasCalendarResyncAllBlock",
# Event Blocks
"BaasEventListBlock",
"BaasEventGetDetailsBlock",
"BaasEventScheduleBotBlock",
"BaasEventUnscheduleBotBlock",
"BaasEventPatchBotBlock",
# Webhook Triggers
"BaasOnMeetingEventBlock",
"BaasOnCalendarEventBlock",
]

View File

@@ -1,16 +0,0 @@
"""
Shared configuration for all Meeting BaaS blocks using the SDK pattern.
"""
from backend.sdk import BlockCostType, ProviderBuilder
from ._webhook import BaasWebhookManager
# Configure the Meeting BaaS provider with API key authentication
baas = (
ProviderBuilder("baas")
.with_api_key("MEETING_BAAS_API_KEY", "Meeting BaaS API Key")
.with_webhook_manager(BaasWebhookManager)
.with_base_cost(5, BlockCostType.RUN) # Higher cost for meeting recording service
.build()
)

View File

@@ -1,83 +0,0 @@
"""
Webhook management for Meeting BaaS blocks.
"""
from enum import Enum
from typing import Tuple
from backend.sdk import (
APIKeyCredentials,
BaseWebhooksManager,
Credentials,
ProviderName,
Webhook,
)
class BaasWebhookManager(BaseWebhooksManager):
"""Webhook manager for Meeting BaaS API."""
PROVIDER_NAME = ProviderName("baas")
class WebhookType(str, Enum):
MEETING_EVENT = "meeting_event"
CALENDAR_EVENT = "calendar_event"
@classmethod
async def validate_payload(cls, webhook: Webhook, request) -> Tuple[dict, str]:
"""Validate incoming webhook payload."""
payload = await request.json()
# Verify API key in header
api_key_header = request.headers.get("x-meeting-baas-api-key")
if webhook.secret and api_key_header != webhook.secret:
raise ValueError("Invalid webhook API key")
# Extract event type from payload
event_type = payload.get("event", "unknown")
return payload, event_type
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> Tuple[str, dict]:
"""
Register webhook with Meeting BaaS.
Note: Meeting BaaS doesn't have a webhook registration API.
Webhooks are configured per-bot or as account defaults.
This returns a synthetic webhook ID.
"""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("Meeting BaaS webhooks require API key credentials")
# Generate a synthetic webhook ID since BaaS doesn't provide one
import uuid
webhook_id = str(uuid.uuid4())
return webhook_id, {
"webhook_type": webhook_type,
"resource": resource,
"events": events,
"ingress_url": ingress_url,
"api_key": credentials.api_key.get_secret_value(),
}
async def _deregister_webhook(
self, webhook: Webhook, credentials: Credentials
) -> None:
"""
Deregister webhook from Meeting BaaS.
Note: Meeting BaaS doesn't have a webhook deregistration API.
Webhooks are removed by updating bot/calendar configurations.
"""
# No-op since BaaS doesn't have webhook deregistration
pass

View File

@@ -1,367 +0,0 @@
"""
Meeting BaaS bot (recording) blocks.
"""
from typing import Optional
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import baas
class BaasBotJoinMeetingBlock(Block):
"""
Deploy a bot immediately or at a scheduled start_time to join and record a meeting.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
meeting_url: str = SchemaField(
description="The URL of the meeting the bot should join"
)
bot_name: str = SchemaField(
description="Display name for the bot in the meeting"
)
bot_image: str = SchemaField(
description="URL to an image for the bot's avatar (16:9 ratio recommended)",
default="",
)
entry_message: str = SchemaField(
description="Chat message the bot will post upon entry", default=""
)
reserved: bool = SchemaField(
description="Use a reserved bot slot (joins 4 min before meeting)",
default=False,
)
start_time: Optional[int] = SchemaField(
description="Unix timestamp (ms) when bot should join", default=None
)
speech_to_text: dict = SchemaField(
description="Speech-to-text configuration", default={"provider": "Gladia"}
)
webhook_url: str = SchemaField(
description="URL to receive webhook events for this bot", default=""
)
timeouts: dict = SchemaField(
description="Automatic leave timeouts configuration", default={}
)
extra: dict = SchemaField(
description="Custom metadata to attach to the bot", default={}
)
class Output(BlockSchema):
bot_id: str = SchemaField(description="UUID of the deployed bot")
join_response: dict = SchemaField(
description="Full response from join operation"
)
def __init__(self):
super().__init__(
id="7f8e9d0c-1b2a-3c4d-5e6f-7a8b9c0d1e2f",
description="Deploy a bot to join and record a meeting",
categories={BlockCategory.COMMUNICATION},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build request body
body = {
"meeting_url": input_data.meeting_url,
"bot_name": input_data.bot_name,
"reserved": input_data.reserved,
"speech_to_text": input_data.speech_to_text,
}
# Add optional fields
if input_data.bot_image:
body["bot_image"] = input_data.bot_image
if input_data.entry_message:
body["entry_message"] = input_data.entry_message
if input_data.start_time is not None:
body["start_time"] = input_data.start_time
if input_data.webhook_url:
body["webhook_url"] = input_data.webhook_url
if input_data.timeouts:
body["automatic_leave"] = input_data.timeouts
if input_data.extra:
body["extra"] = input_data.extra
# Join meeting
response = await Requests().post(
"https://api.meetingbaas.com/bots",
headers={"x-meeting-baas-api-key": api_key},
json=body,
)
data = response.json()
yield "bot_id", data.get("bot_id", "")
yield "join_response", data
class BaasBotLeaveMeetingBlock(Block):
"""
Force the bot to exit the call.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
bot_id: str = SchemaField(description="UUID of the bot to remove from meeting")
class Output(BlockSchema):
left: bool = SchemaField(description="Whether the bot successfully left")
def __init__(self):
super().__init__(
id="8a9b0c1d-2e3f-4a5b-6c7d-8e9f0a1b2c3d",
description="Remove a bot from an ongoing meeting",
categories={BlockCategory.COMMUNICATION},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Leave meeting
response = await Requests().delete(
f"https://api.meetingbaas.com/bots/{input_data.bot_id}",
headers={"x-meeting-baas-api-key": api_key},
)
# Check if successful
left = response.status in [200, 204]
yield "left", left
class BaasBotFetchMeetingDataBlock(Block):
"""
Pull MP4 URL, transcript & metadata for a completed meeting.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
bot_id: str = SchemaField(description="UUID of the bot whose data to fetch")
include_transcripts: bool = SchemaField(
description="Include transcript data in response", default=True
)
class Output(BlockSchema):
mp4_url: str = SchemaField(
description="URL to download the meeting recording (time-limited)"
)
transcript: list = SchemaField(description="Meeting transcript data")
metadata: dict = SchemaField(description="Meeting metadata and bot information")
def __init__(self):
super().__init__(
id="9b0c1d2e-3f4a-5b6c-7d8e-9f0a1b2c3d4e",
description="Retrieve recorded meeting data",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params = {
"bot_id": input_data.bot_id,
"include_transcripts": str(input_data.include_transcripts).lower(),
}
# Fetch meeting data
response = await Requests().get(
"https://api.meetingbaas.com/bots/meeting_data",
headers={"x-meeting-baas-api-key": api_key},
params=params,
)
data = response.json()
yield "mp4_url", data.get("mp4", "")
yield "transcript", data.get("bot_data", {}).get("transcripts", [])
yield "metadata", data.get("bot_data", {}).get("bot", {})
class BaasBotFetchScreenshotsBlock(Block):
"""
List screenshots captured during the call.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
bot_id: str = SchemaField(
description="UUID of the bot whose screenshots to fetch"
)
class Output(BlockSchema):
screenshots: list[dict] = SchemaField(
description="Array of screenshot objects with date and url"
)
def __init__(self):
super().__init__(
id="0c1d2e3f-4a5b-6c7d-8e9f-0a1b2c3d4e5f",
description="Retrieve screenshots captured during a meeting",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Fetch screenshots
response = await Requests().get(
f"https://api.meetingbaas.com/bots/{input_data.bot_id}/screenshots",
headers={"x-meeting-baas-api-key": api_key},
)
screenshots = response.json()
yield "screenshots", screenshots
class BaasBotDeleteRecordingBlock(Block):
"""
Purge MP4 + transcript data for privacy or storage management.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
bot_id: str = SchemaField(description="UUID of the bot whose data to delete")
class Output(BlockSchema):
deleted: bool = SchemaField(
description="Whether the data was successfully deleted"
)
def __init__(self):
super().__init__(
id="1d2e3f4a-5b6c-7d8e-9f0a-1b2c3d4e5f6a",
description="Permanently delete a meeting's recorded data",
categories={BlockCategory.DATA},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Delete recording data
response = await Requests().post(
f"https://api.meetingbaas.com/bots/{input_data.bot_id}/delete_data",
headers={"x-meeting-baas-api-key": api_key},
)
# Check if successful
deleted = response.status == 200
yield "deleted", deleted
class BaasBotRetranscribeBlock(Block):
"""
Re-run STT on past audio with a different provider or settings.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
bot_id: str = SchemaField(
description="UUID of the bot whose audio to retranscribe"
)
provider: str = SchemaField(
description="Speech-to-text provider to use (e.g., Gladia, Deepgram)"
)
webhook_url: str = SchemaField(
description="URL to receive transcription complete event", default=""
)
custom_options: dict = SchemaField(
description="Provider-specific options", default={}
)
class Output(BlockSchema):
job_id: Optional[str] = SchemaField(
description="Transcription job ID if available"
)
accepted: bool = SchemaField(
description="Whether the retranscription request was accepted"
)
def __init__(self):
super().__init__(
id="2e3f4a5b-6c7d-8e9f-0a1b-2c3d4e5f6a7b",
description="Re-run transcription on a meeting's audio",
categories={BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build request body
body = {"bot_uuid": input_data.bot_id, "provider": input_data.provider}
if input_data.webhook_url:
body["webhook_url"] = input_data.webhook_url
if input_data.custom_options:
body.update(input_data.custom_options)
# Start retranscription
response = await Requests().post(
"https://api.meetingbaas.com/bots/retranscribe",
headers={"x-meeting-baas-api-key": api_key},
json=body,
)
# Check if accepted
accepted = response.status in [200, 202]
job_id = None
if accepted and response.status == 200:
data = response.json()
job_id = data.get("job_id")
yield "job_id", job_id
yield "accepted", accepted

View File

@@ -1,265 +0,0 @@
"""
Meeting BaaS calendar blocks.
"""
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import baas
class BaasCalendarConnectBlock(Block):
"""
One-time integration of a Google or Microsoft calendar.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
oauth_client_id: str = SchemaField(description="OAuth client ID from provider")
oauth_client_secret: str = SchemaField(description="OAuth client secret")
oauth_refresh_token: str = SchemaField(
description="OAuth refresh token with calendar access"
)
platform: str = SchemaField(
description="Calendar platform (Google or Microsoft)"
)
calendar_email_or_id: str = SchemaField(
description="Specific calendar email/ID to connect", default=""
)
class Output(BlockSchema):
calendar_id: str = SchemaField(description="UUID of the connected calendar")
calendar_obj: dict = SchemaField(description="Full calendar object")
def __init__(self):
super().__init__(
id="3f4a5b6c-7d8e-9f0a-1b2c-3d4e5f6a7b8c",
description="Connect a Google or Microsoft calendar for integration",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build request body
body = {
"oauth_client_id": input_data.oauth_client_id,
"oauth_client_secret": input_data.oauth_client_secret,
"oauth_refresh_token": input_data.oauth_refresh_token,
"platform": input_data.platform,
}
if input_data.calendar_email_or_id:
body["calendar_email"] = input_data.calendar_email_or_id
# Connect calendar
response = await Requests().post(
"https://api.meetingbaas.com/calendars",
headers={"x-meeting-baas-api-key": api_key},
json=body,
)
calendar = response.json()
yield "calendar_id", calendar.get("uuid", "")
yield "calendar_obj", calendar
class BaasCalendarListAllBlock(Block):
"""
Enumerate connected calendars.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
class Output(BlockSchema):
calendars: list[dict] = SchemaField(
description="Array of connected calendar objects"
)
def __init__(self):
super().__init__(
id="4a5b6c7d-8e9f-0a1b-2c3d-4e5f6a7b8c9d",
description="List all integrated calendars",
categories={BlockCategory.SEARCH},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# List calendars
response = await Requests().get(
"https://api.meetingbaas.com/calendars",
headers={"x-meeting-baas-api-key": api_key},
)
calendars = response.json()
yield "calendars", calendars
class BaasCalendarUpdateCredsBlock(Block):
"""
Refresh OAuth or switch provider for an existing calendar.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
calendar_id: str = SchemaField(description="UUID of the calendar to update")
oauth_client_id: str = SchemaField(
description="New OAuth client ID", default=""
)
oauth_client_secret: str = SchemaField(
description="New OAuth client secret", default=""
)
oauth_refresh_token: str = SchemaField(
description="New OAuth refresh token", default=""
)
platform: str = SchemaField(description="New platform if switching", default="")
class Output(BlockSchema):
calendar_obj: dict = SchemaField(description="Updated calendar object")
def __init__(self):
super().__init__(
id="5b6c7d8e-9f0a-1b2c-3d4e-5f6a7b8c9d0e",
description="Update calendar credentials or platform",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build request body with only provided fields
body = {}
if input_data.oauth_client_id:
body["oauth_client_id"] = input_data.oauth_client_id
if input_data.oauth_client_secret:
body["oauth_client_secret"] = input_data.oauth_client_secret
if input_data.oauth_refresh_token:
body["oauth_refresh_token"] = input_data.oauth_refresh_token
if input_data.platform:
body["platform"] = input_data.platform
# Update calendar
response = await Requests().patch(
f"https://api.meetingbaas.com/calendars/{input_data.calendar_id}",
headers={"x-meeting-baas-api-key": api_key},
json=body,
)
calendar = response.json()
yield "calendar_obj", calendar
class BaasCalendarDeleteBlock(Block):
"""
Disconnect calendar & unschedule future bots.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
calendar_id: str = SchemaField(description="UUID of the calendar to delete")
class Output(BlockSchema):
deleted: bool = SchemaField(
description="Whether the calendar was successfully deleted"
)
def __init__(self):
super().__init__(
id="6c7d8e9f-0a1b-2c3d-4e5f-6a7b8c9d0e1f",
description="Remove a calendar integration",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Delete calendar
response = await Requests().delete(
f"https://api.meetingbaas.com/calendars/{input_data.calendar_id}",
headers={"x-meeting-baas-api-key": api_key},
)
deleted = response.status in [200, 204]
yield "deleted", deleted
class BaasCalendarResyncAllBlock(Block):
"""
Force full sync now (maintenance).
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
class Output(BlockSchema):
synced_ids: list[str] = SchemaField(
description="Calendar UUIDs that synced successfully"
)
errors: list[list] = SchemaField(
description="Array of [calendar_id, error_message] tuples"
)
def __init__(self):
super().__init__(
id="7d8e9f0a-1b2c-3d4e-5f6a-7b8c9d0e1f2a",
description="Force immediate re-sync of all connected calendars",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Resync all calendars
response = await Requests().post(
"https://api.meetingbaas.com/internal/calendar/resync_all",
headers={"x-meeting-baas-api-key": api_key},
)
data = response.json()
yield "synced_ids", data.get("synced_calendars", [])
yield "errors", data.get("errors", [])

View File

@@ -1,276 +0,0 @@
"""
Meeting BaaS calendar event blocks.
"""
from typing import Union
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import baas
class BaasEventListBlock(Block):
"""
Get events for a calendar & date range.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
calendar_id: str = SchemaField(
description="UUID of the calendar to list events from"
)
start_date_gte: str = SchemaField(
description="ISO date string for start date (greater than or equal)",
default="",
)
start_date_lte: str = SchemaField(
description="ISO date string for start date (less than or equal)",
default="",
)
cursor: str = SchemaField(
description="Pagination cursor from previous request", default=""
)
class Output(BlockSchema):
events: list[dict] = SchemaField(description="Array of calendar events")
next_cursor: str = SchemaField(description="Cursor for next page of results")
def __init__(self):
super().__init__(
id="8e9f0a1b-2c3d-4e5f-6a7b-8c9d0e1f2a3b",
description="List calendar events with optional date filtering",
categories={BlockCategory.SEARCH},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params = {"calendar_id": input_data.calendar_id}
if input_data.start_date_gte:
params["start_date_gte"] = input_data.start_date_gte
if input_data.start_date_lte:
params["start_date_lte"] = input_data.start_date_lte
if input_data.cursor:
params["cursor"] = input_data.cursor
# List events
response = await Requests().get(
"https://api.meetingbaas.com/calendar_events",
headers={"x-meeting-baas-api-key": api_key},
params=params,
)
data = response.json()
yield "events", data.get("events", [])
yield "next_cursor", data.get("next", "")
class BaasEventGetDetailsBlock(Block):
"""
Fetch full object for one event.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
event_id: str = SchemaField(description="UUID of the event to retrieve")
class Output(BlockSchema):
event: dict = SchemaField(description="Full event object with all details")
def __init__(self):
super().__init__(
id="9f0a1b2c-3d4e-5f6a-7b8c-9d0e1f2a3b4c",
description="Get detailed information for a specific calendar event",
categories={BlockCategory.SEARCH},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Get event details
response = await Requests().get(
f"https://api.meetingbaas.com/calendar_events/{input_data.event_id}",
headers={"x-meeting-baas-api-key": api_key},
)
event = response.json()
yield "event", event
class BaasEventScheduleBotBlock(Block):
"""
Attach bot config to the event for automatic recording.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
event_id: str = SchemaField(description="UUID of the event to schedule bot for")
all_occurrences: bool = SchemaField(
description="Apply to all occurrences of recurring event", default=False
)
bot_config: dict = SchemaField(
description="Bot configuration (same as Bot → Join Meeting)"
)
class Output(BlockSchema):
events: Union[dict, list[dict]] = SchemaField(
description="Updated event(s) with bot scheduled"
)
def __init__(self):
super().__init__(
id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
description="Schedule a recording bot for a calendar event",
categories={BlockCategory.COMMUNICATION},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params = {"all_occurrences": str(input_data.all_occurrences).lower()}
# Schedule bot
response = await Requests().post(
f"https://api.meetingbaas.com/calendar_events/{input_data.event_id}/bot",
headers={"x-meeting-baas-api-key": api_key},
params=params,
json=input_data.bot_config,
)
events = response.json()
yield "events", events
class BaasEventUnscheduleBotBlock(Block):
"""
Remove bot from event/series.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
event_id: str = SchemaField(
description="UUID of the event to unschedule bot from"
)
all_occurrences: bool = SchemaField(
description="Apply to all occurrences of recurring event", default=False
)
class Output(BlockSchema):
events: Union[dict, list[dict]] = SchemaField(
description="Updated event(s) with bot removed"
)
def __init__(self):
super().__init__(
id="1b2c3d4e-5f6a-7b8c-9d0e-1f2a3b4c5d6e",
description="Cancel a scheduled recording for an event",
categories={BlockCategory.COMMUNICATION},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params = {"all_occurrences": str(input_data.all_occurrences).lower()}
# Unschedule bot
response = await Requests().delete(
f"https://api.meetingbaas.com/calendar_events/{input_data.event_id}/bot",
headers={"x-meeting-baas-api-key": api_key},
params=params,
)
events = response.json()
yield "events", events
class BaasEventPatchBotBlock(Block):
"""
Modify an already-scheduled bot configuration.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
event_id: str = SchemaField(description="UUID of the event with scheduled bot")
all_occurrences: bool = SchemaField(
description="Apply to all occurrences of recurring event", default=False
)
bot_patch: dict = SchemaField(description="Bot configuration fields to update")
class Output(BlockSchema):
events: Union[dict, list[dict]] = SchemaField(
description="Updated event(s) with modified bot config"
)
def __init__(self):
super().__init__(
id="2c3d4e5f-6a7b-8c9d-0e1f-2a3b4c5d6e7f",
description="Update configuration of a scheduled bot",
categories={BlockCategory.COMMUNICATION},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params = {}
if input_data.all_occurrences is not None:
params["all_occurrences"] = str(input_data.all_occurrences).lower()
# Patch bot
response = await Requests().patch(
f"https://api.meetingbaas.com/calendar_events/{input_data.event_id}/bot",
headers={"x-meeting-baas-api-key": api_key},
params=params,
json=input_data.bot_patch,
)
events = response.json()
yield "events", events

View File

@@ -1,185 +0,0 @@
"""
Meeting BaaS webhook trigger blocks.
"""
from pydantic import BaseModel
from backend.sdk import (
Block,
BlockCategory,
BlockOutput,
BlockSchema,
BlockType,
BlockWebhookConfig,
CredentialsMetaInput,
ProviderName,
SchemaField,
)
from ._config import baas
class BaasOnMeetingEventBlock(Block):
"""
Trigger when Meeting BaaS sends meeting-related events:
bot.status_change, complete, failed, transcription_complete
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
webhook_url: str = SchemaField(
description="URL to receive webhooks (auto-generated)",
default="",
hidden=True,
)
class EventsFilter(BaseModel):
"""Meeting event types to subscribe to"""
bot_status_change: bool = SchemaField(
description="Bot status changes", default=True
)
complete: bool = SchemaField(description="Meeting completed", default=True)
failed: bool = SchemaField(description="Meeting failed", default=True)
transcription_complete: bool = SchemaField(
description="Transcription completed", default=True
)
events: EventsFilter = SchemaField(
title="Events", description="The events to subscribe to"
)
payload: dict = SchemaField(
description="Webhook payload data",
default={},
hidden=True,
)
class Output(BlockSchema):
event_type: str = SchemaField(description="Type of event received")
data: dict = SchemaField(description="Event data payload")
def __init__(self):
super().__init__(
id="3d4e5f6a-7b8c-9d0e-1f2a-3b4c5d6e7f8a",
description="Receive meeting events from Meeting BaaS webhooks",
categories={BlockCategory.INPUT},
input_schema=self.Input,
output_schema=self.Output,
block_type=BlockType.WEBHOOK,
webhook_config=BlockWebhookConfig(
provider=ProviderName("baas"),
webhook_type="meeting_event",
event_filter_input="events",
resource_format="meeting",
),
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
payload = input_data.payload
# Extract event type and data
event_type = payload.get("event", "unknown")
data = payload.get("data", {})
# Map event types to filter fields
event_filter_map = {
"bot.status_change": input_data.events.bot_status_change,
"complete": input_data.events.complete,
"failed": input_data.events.failed,
"transcription_complete": input_data.events.transcription_complete,
}
# Filter events if needed
if not event_filter_map.get(event_type, False):
return # Skip unwanted events
yield "event_type", event_type
yield "data", data
class BaasOnCalendarEventBlock(Block):
"""
Trigger when Meeting BaaS sends calendar-related events:
event.added, event.updated, event.deleted, calendar.synced
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = baas.credentials_field(
description="Meeting BaaS API credentials"
)
webhook_url: str = SchemaField(
description="URL to receive webhooks (auto-generated)",
default="",
hidden=True,
)
class EventsFilter(BaseModel):
"""Calendar event types to subscribe to"""
event_added: bool = SchemaField(
description="Calendar event added", default=True
)
event_updated: bool = SchemaField(
description="Calendar event updated", default=True
)
event_deleted: bool = SchemaField(
description="Calendar event deleted", default=True
)
calendar_synced: bool = SchemaField(
description="Calendar synced", default=True
)
events: EventsFilter = SchemaField(
title="Events", description="The events to subscribe to"
)
payload: dict = SchemaField(
description="Webhook payload data",
default={},
hidden=True,
)
class Output(BlockSchema):
event_type: str = SchemaField(description="Type of event received")
data: dict = SchemaField(description="Event data payload")
def __init__(self):
super().__init__(
id="4e5f6a7b-8c9d-0e1f-2a3b-4c5d6e7f8a9b",
description="Receive calendar events from Meeting BaaS webhooks",
categories={BlockCategory.INPUT},
input_schema=self.Input,
output_schema=self.Output,
block_type=BlockType.WEBHOOK,
webhook_config=BlockWebhookConfig(
provider=ProviderName("baas"),
webhook_type="calendar_event",
event_filter_input="events",
resource_format="calendar",
),
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
payload = input_data.payload
# Extract event type and data
event_type = payload.get("event", "unknown")
data = payload.get("data", {})
# Map event types to filter fields
event_filter_map = {
"event.added": input_data.events.event_added,
"event.updated": input_data.events.event_updated,
"event.deleted": input_data.events.event_deleted,
"calendar.synced": input_data.events.calendar_synced,
}
# Filter events if needed
if not event_filter_map.get(event_type, False):
return # Skip unwanted events
yield "event_type", event_type
yield "data", data

View File

@@ -1,51 +1,11 @@
import enum
from typing import Any
from typing import Any, List
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
from backend.data.model import SchemaField
from backend.util.file import store_media_file
from backend.util.type import MediaFileType, convert
from backend.util.mock import MockObject
from backend.util.text import TextFormatter
class FileStoreBlock(Block):
class Input(BlockSchema):
file_in: MediaFileType = SchemaField(
description="The file to store in the temporary directory, it can be a URL, data URI, or local path."
)
base_64: bool = SchemaField(
description="Whether produce an output in base64 format (not recommended, you can pass the string path just fine accross blocks).",
default=False,
advanced=True,
title="Produce Base64 Output",
)
class Output(BlockSchema):
file_out: MediaFileType = SchemaField(
description="The relative path to the stored file in the temporary directory."
)
def __init__(self):
super().__init__(
id="cbb50872-625b-42f0-8203-a2ae78242d8a",
description="Stores the input file in the temporary directory.",
categories={BlockCategory.BASIC, BlockCategory.MULTIMEDIA},
input_schema=FileStoreBlock.Input,
output_schema=FileStoreBlock.Output,
static_output=True,
)
async def run(
self,
input_data: Input,
*,
graph_exec_id: str,
**kwargs,
) -> BlockOutput:
yield "file_out", await store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.file_in,
return_content=input_data.base_64,
)
formatter = TextFormatter()
class StoreValueBlock(Block):
@@ -87,16 +47,15 @@ class StoreValueBlock(Block):
static_output=True,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "output", input_data.data or input_data.input
class PrintToConsoleBlock(Block):
class Input(BlockSchema):
text: Any = SchemaField(description="The data to print to the console.")
text: str = SchemaField(description="The text to print to the console.")
class Output(BlockSchema):
output: Any = SchemaField(description="The data printed to the console.")
status: str = SchemaField(description="The status of the print operation.")
def __init__(self):
@@ -107,15 +66,407 @@ class PrintToConsoleBlock(Block):
input_schema=PrintToConsoleBlock.Input,
output_schema=PrintToConsoleBlock.Output,
test_input={"text": "Hello, World!"},
test_output=("status", "printed"),
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
print(">>>>> Print: ", input_data.text)
yield "status", "printed"
class FindInDictionaryBlock(Block):
class Input(BlockSchema):
input: Any = SchemaField(description="Dictionary to lookup from")
key: str | int = SchemaField(description="Key to lookup in the dictionary")
class Output(BlockSchema):
output: Any = SchemaField(description="Value found for the given key")
missing: Any = SchemaField(
description="Value of the input that missing the key"
)
def __init__(self):
super().__init__(
id="0e50422c-6dee-4145-83d6-3a5a392f65de",
description="Lookup the given key in the input dictionary/object/list and return the value.",
input_schema=FindInDictionaryBlock.Input,
output_schema=FindInDictionaryBlock.Output,
test_input=[
{"input": {"apple": 1, "banana": 2, "cherry": 3}, "key": "banana"},
{"input": {"x": 10, "y": 20, "z": 30}, "key": "w"},
{"input": [1, 2, 3], "key": 1},
{"input": [1, 2, 3], "key": 3},
{"input": MockObject(value="!!", key="key"), "key": "key"},
{"input": [{"k1": "v1"}, {"k2": "v2"}, {"k1": "v3"}], "key": "k1"},
],
test_output=[
("output", "Hello, World!"),
("status", "printed"),
("output", 2),
("missing", {"x": 10, "y": 20, "z": 30}),
("output", 2),
("missing", [1, 2, 3]),
("output", "key"),
("output", ["v1", "v3"]),
],
categories={BlockCategory.BASIC},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
obj = input_data.input
key = input_data.key
if isinstance(obj, dict) and key in obj:
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, int) and 0 <= key < len(obj):
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, str):
if len(obj) == 0:
yield "output", []
elif isinstance(obj[0], dict) and key in obj[0]:
yield "output", [item[key] for item in obj if key in item]
else:
yield "output", [getattr(val, key) for val in obj if hasattr(val, key)]
elif isinstance(obj, object) and isinstance(key, str) and hasattr(obj, key):
yield "output", getattr(obj, key)
else:
yield "missing", input_data.input
class AgentInputBlock(Block):
"""
This block is used to provide input to the graph.
It takes in a value, name, description, default values list and bool to limit selection to default values.
It Outputs the value passed as input.
"""
class Input(BlockSchema):
name: str = SchemaField(description="The name of the input.")
value: Any = SchemaField(
description="The value to be passed as input.",
default=None,
)
title: str | None = SchemaField(
description="The title of the input.", default=None, advanced=True
)
description: str | None = SchemaField(
description="The description of the input.",
default=None,
advanced=True,
)
placeholder_values: List[Any] = SchemaField(
description="The placeholder values to be passed as input.",
default=[],
advanced=True,
)
limit_to_placeholder_values: bool = SchemaField(
description="Whether to limit the selection to placeholder values.",
default=False,
advanced=True,
)
advanced: bool = SchemaField(
description="Whether to show the input in the advanced section, if the field is not required.",
default=False,
advanced=True,
)
secret: bool = SchemaField(
description="Whether the input should be treated as a secret.",
default=False,
advanced=True,
)
class Output(BlockSchema):
result: Any = SchemaField(description="The value passed as input.")
def __init__(self):
super().__init__(
id="c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
description="This block is used to provide input to the graph.",
input_schema=AgentInputBlock.Input,
output_schema=AgentInputBlock.Output,
test_input=[
{
"value": "Hello, World!",
"name": "input_1",
"description": "This is a test input.",
"placeholder_values": [],
"limit_to_placeholder_values": False,
},
{
"value": "Hello, World!",
"name": "input_2",
"description": "This is a test input.",
"placeholder_values": ["Hello, World!"],
"limit_to_placeholder_values": True,
},
],
test_output=[
("result", "Hello, World!"),
("result", "Hello, World!"),
],
categories={BlockCategory.INPUT, BlockCategory.BASIC},
block_type=BlockType.INPUT,
static_output=True,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "result", input_data.value
class AgentOutputBlock(Block):
"""
Records the output of the graph for users to see.
Behavior:
If `format` is provided and the `value` is of a type that can be formatted,
the block attempts to format the recorded_value using the `format`.
If formatting fails or no `format` is provided, the raw `value` is output.
"""
class Input(BlockSchema):
value: Any = SchemaField(
description="The value to be recorded as output.",
default=None,
advanced=False,
)
name: str = SchemaField(description="The name of the output.")
title: str | None = SchemaField(
description="The title of the output.",
default=None,
advanced=True,
)
description: str | None = SchemaField(
description="The description of the output.",
default=None,
advanced=True,
)
format: str = SchemaField(
description="The format string to be used to format the recorded_value.",
default="",
advanced=True,
)
advanced: bool = SchemaField(
description="Whether to treat the output as advanced.",
default=False,
advanced=True,
)
secret: bool = SchemaField(
description="Whether the output should be treated as a secret.",
default=False,
advanced=True,
)
class Output(BlockSchema):
output: Any = SchemaField(description="The value recorded as output.")
def __init__(self):
super().__init__(
id="363ae599-353e-4804-937e-b2ee3cef3da4",
description="Stores the output of the graph for users to see.",
input_schema=AgentOutputBlock.Input,
output_schema=AgentOutputBlock.Output,
test_input=[
{
"value": "Hello, World!",
"name": "output_1",
"description": "This is a test output.",
"format": "{{ output_1 }}!!",
},
{
"value": "42",
"name": "output_2",
"description": "This is another test output.",
"format": "{{ output_2 }}",
},
{
"value": MockObject(value="!!", key="key"),
"name": "output_3",
"description": "This is a test output with a mock object.",
"format": "{{ output_3 }}",
},
],
test_output=[
("output", "Hello, World!!!"),
("output", "42"),
("output", MockObject(value="!!", key="key")),
],
categories={BlockCategory.OUTPUT, BlockCategory.BASIC},
block_type=BlockType.OUTPUT,
static_output=True,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
"""
Attempts to format the recorded_value using the fmt_string if provided.
If formatting fails or no fmt_string is given, returns the original recorded_value.
"""
if input_data.format:
try:
yield "output", formatter.format_string(
input_data.format, {input_data.name: input_data.value}
)
except Exception as e:
yield "output", f"Error: {e}, {input_data.value}"
else:
yield "output", input_data.value
class AddToDictionaryBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(
default={},
description="The dictionary to add the entry to. If not provided, a new dictionary will be created.",
)
key: str = SchemaField(
default="",
description="The key for the new entry.",
placeholder="new_key",
advanced=False,
)
value: Any = SchemaField(
default=None,
description="The value for the new entry.",
placeholder="new_value",
advanced=False,
)
entries: dict[Any, Any] = SchemaField(
default={},
description="The entries to add to the dictionary. This is the batch version of the `key` and `value` fields.",
advanced=True,
)
class Output(BlockSchema):
updated_dictionary: dict = SchemaField(
description="The dictionary with the new entry added."
)
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="31d1064e-7446-4693-a7d4-65e5ca1180d1",
description="Adds a new key-value pair to a dictionary. If no dictionary is provided, a new one is created.",
categories={BlockCategory.BASIC},
input_schema=AddToDictionaryBlock.Input,
output_schema=AddToDictionaryBlock.Output,
test_input=[
{
"dictionary": {"existing_key": "existing_value"},
"key": "new_key",
"value": "new_value",
},
{"key": "first_key", "value": "first_value"},
{
"dictionary": {"existing_key": "existing_value"},
"entries": {"new_key": "new_value", "first_key": "first_value"},
},
],
test_output=[
(
"updated_dictionary",
{"existing_key": "existing_value", "new_key": "new_value"},
),
("updated_dictionary", {"first_key": "first_value"}),
(
"updated_dictionary",
{
"existing_key": "existing_value",
"new_key": "new_value",
"first_key": "first_value",
},
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "output", input_data.text
yield "status", "printed"
def run(self, input_data: Input, **kwargs) -> BlockOutput:
updated_dict = input_data.dictionary.copy()
if input_data.value is not None and input_data.key:
updated_dict[input_data.key] = input_data.value
for key, value in input_data.entries.items():
updated_dict[key] = value
yield "updated_dictionary", updated_dict
class AddToListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(
default=[],
advanced=False,
description="The list to add the entry to. If not provided, a new list will be created.",
)
entry: Any = SchemaField(
description="The entry to add to the list. Can be of any type (string, int, dict, etc.).",
advanced=False,
default=None,
)
entries: List[Any] = SchemaField(
default=[],
description="The entries to add to the list. This is the batch version of the `entry` field.",
advanced=True,
)
position: int | None = SchemaField(
default=None,
description="The position to insert the new entry. If not provided, the entry will be appended to the end of the list.",
)
class Output(BlockSchema):
updated_list: List[Any] = SchemaField(
description="The list with the new entry added."
)
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="aeb08fc1-2fc1-4141-bc8e-f758f183a822",
description="Adds a new entry to a list. The entry can be of any type. If no list is provided, a new one is created.",
categories={BlockCategory.BASIC},
input_schema=AddToListBlock.Input,
output_schema=AddToListBlock.Output,
test_input=[
{
"list": [1, "string", {"existing_key": "existing_value"}],
"entry": {"new_key": "new_value"},
"position": 1,
},
{"entry": "first_entry"},
{"list": ["a", "b", "c"], "entry": "d"},
{
"entry": "e",
"entries": ["f", "g"],
"list": ["a", "b"],
"position": 1,
},
],
test_output=[
(
"updated_list",
[
1,
{"new_key": "new_value"},
"string",
{"existing_key": "existing_value"},
],
),
("updated_list", ["first_entry"]),
("updated_list", ["a", "b", "c", "d"]),
("updated_list", ["a", "f", "g", "e", "b"]),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
entries_added = input_data.entries.copy()
if input_data.entry:
entries_added.append(input_data.entry)
updated_list = input_data.list.copy()
if (pos := input_data.position) is not None:
updated_list = updated_list[:pos] + entries_added + updated_list[pos:]
else:
updated_list += entries_added
yield "updated_list", updated_list
class NoteBlock(Block):
@@ -139,78 +490,103 @@ class NoteBlock(Block):
block_type=BlockType.NOTE,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "output", input_data.text
class TypeOptions(enum.Enum):
STRING = "string"
NUMBER = "number"
BOOLEAN = "boolean"
LIST = "list"
DICTIONARY = "dictionary"
class UniversalTypeConverterBlock(Block):
class CreateDictionaryBlock(Block):
class Input(BlockSchema):
value: Any = SchemaField(
description="The value to convert to a universal type."
values: dict[str, Any] = SchemaField(
description="Key-value pairs to create the dictionary with",
placeholder="e.g., {'name': 'Alice', 'age': 25}",
)
type: TypeOptions = SchemaField(description="The type to convert the value to.")
class Output(BlockSchema):
value: Any = SchemaField(description="The converted value.")
error: str = SchemaField(description="Error message if conversion failed.")
dictionary: dict[str, Any] = SchemaField(
description="The created dictionary containing the specified key-value pairs"
)
error: str = SchemaField(
description="Error message if dictionary creation failed"
)
def __init__(self):
super().__init__(
id="95d1b990-ce13-4d88-9737-ba5c2070c97b",
description="This block is used to convert a value to a universal type.",
categories={BlockCategory.BASIC},
input_schema=UniversalTypeConverterBlock.Input,
output_schema=UniversalTypeConverterBlock.Output,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
converted_value = convert(
input_data.value,
id="b924ddf4-de4f-4b56-9a85-358930dcbc91",
description="Creates a dictionary with the specified key-value pairs. Use this when you know all the values you want to add upfront.",
categories={BlockCategory.DATA},
input_schema=CreateDictionaryBlock.Input,
output_schema=CreateDictionaryBlock.Output,
test_input=[
{
TypeOptions.STRING: str,
TypeOptions.NUMBER: float,
TypeOptions.BOOLEAN: bool,
TypeOptions.LIST: list,
TypeOptions.DICTIONARY: dict,
}[input_data.type],
)
yield "value", converted_value
"values": {"name": "Alice", "age": 25, "city": "New York"},
},
{
"values": {"numbers": [1, 2, 3], "active": True, "score": 95.5},
},
],
test_output=[
(
"dictionary",
{"name": "Alice", "age": 25, "city": "New York"},
),
(
"dictionary",
{"numbers": [1, 2, 3], "active": True, "score": 95.5},
),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
# The values are already validated by Pydantic schema
yield "dictionary", input_data.values
except Exception as e:
yield "error", f"Failed to convert value: {str(e)}"
yield "error", f"Failed to create dictionary: {str(e)}"
class ReverseListOrderBlock(Block):
"""
A block which takes in a list and returns it in the opposite order.
"""
class CreateListBlock(Block):
class Input(BlockSchema):
input_list: list[Any] = SchemaField(description="The list to reverse")
values: List[Any] = SchemaField(
description="A list of values to be combined into a new list.",
placeholder="e.g., ['Alice', 25, True]",
)
class Output(BlockSchema):
reversed_list: list[Any] = SchemaField(description="The list in reversed order")
list: List[Any] = SchemaField(
description="The created list containing the specified values."
)
error: str = SchemaField(description="Error message if list creation failed.")
def __init__(self):
super().__init__(
id="422cb708-3109-4277-bfe3-bc2ae5812777",
description="Reverses the order of elements in a list",
categories={BlockCategory.BASIC},
input_schema=ReverseListOrderBlock.Input,
output_schema=ReverseListOrderBlock.Output,
test_input={"input_list": [1, 2, 3, 4, 5]},
test_output=[("reversed_list", [5, 4, 3, 2, 1])],
id="a912d5c7-6e00-4542-b2a9-8034136930e4",
description="Creates a list with the specified values. Use this when you know all the values you want to add upfront.",
categories={BlockCategory.DATA},
input_schema=CreateListBlock.Input,
output_schema=CreateListBlock.Output,
test_input=[
{
"values": ["Alice", 25, True],
},
{
"values": [1, 2, 3, "four", {"key": "value"}],
},
],
test_output=[
(
"list",
["Alice", 25, True],
),
(
"list",
[1, 2, 3, "four", {"key": "value"}],
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
reversed_list = list(input_data.input_list)
reversed_list.reverse()
yield "reversed_list", reversed_list
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
# The values are already validated by Pydantic schema
yield "list", input_data.values
except Exception as e:
yield "error", f"Failed to create list: {str(e)}"

View File

@@ -38,7 +38,7 @@ class BlockInstallationBlock(Block):
disabled=True,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
code = input_data.code
if search := re.search(r"class (\w+)\(Block\):", code):
@@ -64,7 +64,7 @@ class BlockInstallationBlock(Block):
from backend.util.test import execute_block_test
await execute_block_test(block)
execute_block_test(block)
yield "success", "Block installed successfully."
except Exception as e:
os.remove(file_path)

View File

@@ -3,7 +3,6 @@ from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.type import convert
class ComparisonOperator(Enum):
@@ -71,7 +70,7 @@ class ConditionBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
operator = input_data.operator
value1 = input_data.value1
@@ -108,99 +107,3 @@ class ConditionBlock(Block):
yield "yes_output", yes_value
else:
yield "no_output", no_value
class IfInputMatchesBlock(Block):
class Input(BlockSchema):
input: Any = SchemaField(
description="The input to match against",
placeholder="For example: 10 or 'hello' or True",
)
value: Any = SchemaField(
description="The value to output if the input matches",
placeholder="For example: 'Greater' or 20 or False",
)
yes_value: Any = SchemaField(
description="The value to output if the input matches",
placeholder="For example: 'Greater' or 20 or False",
default=None,
)
no_value: Any = SchemaField(
description="The value to output if the input does not match",
placeholder="For example: 'Greater' or 20 or False",
default=None,
)
class Output(BlockSchema):
result: bool = SchemaField(
description="The result of the condition evaluation (True or False)"
)
yes_output: Any = SchemaField(
description="The output value if the condition is true"
)
no_output: Any = SchemaField(
description="The output value if the condition is false"
)
def __init__(self):
super().__init__(
id="6dbbc4b3-ca6c-42b6-b508-da52d23e13f2",
input_schema=IfInputMatchesBlock.Input,
output_schema=IfInputMatchesBlock.Output,
description="Handles conditional logic based on comparison operators",
categories={BlockCategory.LOGIC},
test_input=[
{
"input": 10,
"value": 10,
"yes_value": "Greater",
"no_value": "Not greater",
},
{
"input": 10,
"value": 20,
"yes_value": "Greater",
"no_value": "Not greater",
},
{
"input": 10,
"value": "None",
"yes_value": "Yes",
"no_value": "No",
},
],
test_output=[
("result", True),
("yes_output", "Greater"),
("result", False),
("no_output", "Not greater"),
("result", False),
("no_output", "No"),
# ("result", True),
# ("yes_output", "Yes"),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
# If input_data.value is not matching input_data.input, convert value to type of input
if (
input_data.input != input_data.value
and input_data.input is not input_data.value
):
try:
# Only attempt conversion if input is not None and value is not None
if input_data.input is not None and input_data.value is not None:
input_type = type(input_data.input)
# Avoid converting if input_type is Any or object
if input_type not in (Any, object):
input_data.value = convert(input_data.value, input_type)
except Exception:
pass # If conversion fails, just leave value as is
if input_data.input == input_data.value:
yield "result", True
yield "yes_output", input_data.yes_value
else:
yield "result", False
yield "no_output", input_data.no_value

View File

@@ -1,7 +1,7 @@
from enum import Enum
from typing import Literal
from e2b_code_interpreter import AsyncSandbox
from e2b_code_interpreter import Sandbox
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
@@ -55,7 +55,7 @@ class CodeExecutionBlock(Block):
"These commands are executed with `sh`, in the foreground."
),
placeholder="pip install cowsay",
default_factory=list,
default=[],
advanced=False,
)
@@ -123,7 +123,7 @@ class CodeExecutionBlock(Block):
},
)
async def execute_code(
def execute_code(
self,
code: str,
language: ProgrammingLanguage,
@@ -135,21 +135,21 @@ class CodeExecutionBlock(Block):
try:
sandbox = None
if template_id:
sandbox = await AsyncSandbox.create(
sandbox = Sandbox(
template=template_id, api_key=api_key, timeout=timeout
)
else:
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
sandbox = Sandbox(api_key=api_key, timeout=timeout)
if not sandbox:
raise Exception("Sandbox not created")
# Running setup commands
for cmd in setup_commands:
await sandbox.commands.run(cmd)
sandbox.commands.run(cmd)
# Executing the code
execution = await sandbox.run_code(
execution = sandbox.run_code(
code,
language=language.value,
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
@@ -167,11 +167,11 @@ class CodeExecutionBlock(Block):
except Exception as e:
raise e
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
response, stdout_logs, stderr_logs = await self.execute_code(
response, stdout_logs, stderr_logs = self.execute_code(
input_data.code,
input_data.language,
input_data.setup_commands,
@@ -188,270 +188,3 @@ class CodeExecutionBlock(Block):
yield "stderr_logs", stderr_logs
except Exception as e:
yield "error", str(e)
class InstantiationBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.E2B], Literal["api_key"]
] = CredentialsField(
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
)
# Todo : Option to run commond in background
setup_commands: list[str] = SchemaField(
description=(
"Shell commands to set up the sandbox before running the code. "
"You can use `curl` or `git` to install your desired Debian based "
"package manager. `pip` and `npm` are pre-installed.\n\n"
"These commands are executed with `sh`, in the foreground."
),
placeholder="pip install cowsay",
default_factory=list,
advanced=False,
)
setup_code: str = SchemaField(
description="Code to execute in the sandbox",
placeholder="print('Hello, World!')",
default="",
advanced=False,
)
language: ProgrammingLanguage = SchemaField(
description="Programming language to execute",
default=ProgrammingLanguage.PYTHON,
advanced=False,
)
timeout: int = SchemaField(
description="Execution timeout in seconds", default=300
)
template_id: str = SchemaField(
description=(
"You can use an E2B sandbox template by entering its ID here. "
"Check out the E2B docs for more details: "
"[E2B - Sandbox template](https://e2b.dev/docs/sandbox-template)"
),
default="",
advanced=True,
)
class Output(BlockSchema):
sandbox_id: str = SchemaField(description="ID of the sandbox instance")
response: str = SchemaField(description="Response from code execution")
stdout_logs: str = SchemaField(
description="Standard output logs from execution"
)
stderr_logs: str = SchemaField(description="Standard error logs from execution")
error: str = SchemaField(description="Error message if execution failed")
def __init__(self):
super().__init__(
id="ff0861c9-1726-4aec-9e5b-bf53f3622112",
description="Instantiate an isolated sandbox environment with internet access where to execute code in.",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=InstantiationBlock.Input,
output_schema=InstantiationBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"setup_code": "print('Hello World')",
"language": ProgrammingLanguage.PYTHON.value,
"setup_commands": [],
"timeout": 300,
"template_id": "",
},
test_output=[
("sandbox_id", str),
("response", "Hello World"),
("stdout_logs", "Hello World\n"),
],
test_mock={
"execute_code": lambda setup_code, language, setup_commands, timeout, api_key, template_id: (
"sandbox_id",
"Hello World",
"Hello World\n",
"",
),
},
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
sandbox_id, response, stdout_logs, stderr_logs = await self.execute_code(
input_data.setup_code,
input_data.language,
input_data.setup_commands,
input_data.timeout,
credentials.api_key.get_secret_value(),
input_data.template_id,
)
if sandbox_id:
yield "sandbox_id", sandbox_id
else:
yield "error", "Sandbox ID not found"
if response:
yield "response", response
if stdout_logs:
yield "stdout_logs", stdout_logs
if stderr_logs:
yield "stderr_logs", stderr_logs
except Exception as e:
yield "error", str(e)
async def execute_code(
self,
code: str,
language: ProgrammingLanguage,
setup_commands: list[str],
timeout: int,
api_key: str,
template_id: str,
):
try:
sandbox = None
if template_id:
sandbox = await AsyncSandbox.create(
template=template_id, api_key=api_key, timeout=timeout
)
else:
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
if not sandbox:
raise Exception("Sandbox not created")
# Running setup commands
for cmd in setup_commands:
await sandbox.commands.run(cmd)
# Executing the code
execution = await sandbox.run_code(
code,
language=language.value,
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
)
if execution.error:
raise Exception(execution.error)
response = execution.text
stdout_logs = "".join(execution.logs.stdout)
stderr_logs = "".join(execution.logs.stderr)
return sandbox.sandbox_id, response, stdout_logs, stderr_logs
except Exception as e:
raise e
class StepExecutionBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.E2B], Literal["api_key"]
] = CredentialsField(
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
)
sandbox_id: str = SchemaField(
description="ID of the sandbox instance to execute the code in",
advanced=False,
)
step_code: str = SchemaField(
description="Code to execute in the sandbox",
placeholder="print('Hello, World!')",
default="",
advanced=False,
)
language: ProgrammingLanguage = SchemaField(
description="Programming language to execute",
default=ProgrammingLanguage.PYTHON,
advanced=False,
)
class Output(BlockSchema):
response: str = SchemaField(description="Response from code execution")
stdout_logs: str = SchemaField(
description="Standard output logs from execution"
)
stderr_logs: str = SchemaField(description="Standard error logs from execution")
error: str = SchemaField(description="Error message if execution failed")
def __init__(self):
super().__init__(
id="82b59b8e-ea10-4d57-9161-8b169b0adba6",
description="Execute code in a previously instantiated sandbox environment.",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=StepExecutionBlock.Input,
output_schema=StepExecutionBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"sandbox_id": "sandbox_id",
"step_code": "print('Hello World')",
"language": ProgrammingLanguage.PYTHON.value,
},
test_output=[
("response", "Hello World"),
("stdout_logs", "Hello World\n"),
],
test_mock={
"execute_step_code": lambda sandbox_id, step_code, language, api_key: (
"Hello World",
"Hello World\n",
"",
),
},
)
async def execute_step_code(
self,
sandbox_id: str,
code: str,
language: ProgrammingLanguage,
api_key: str,
):
try:
sandbox = await AsyncSandbox.connect(sandbox_id=sandbox_id, api_key=api_key)
if not sandbox:
raise Exception("Sandbox not found")
# Executing the code
execution = await sandbox.run_code(code, language=language.value)
if execution.error:
raise Exception(execution.error)
response = execution.text
stdout_logs = "".join(execution.logs.stdout)
stderr_logs = "".join(execution.logs.stderr)
return response, stdout_logs, stderr_logs
except Exception as e:
raise e
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
response, stdout_logs, stderr_logs = await self.execute_step_code(
input_data.sandbox_id,
input_data.step_code,
input_data.language,
credentials.api_key.get_secret_value(),
)
if response:
yield "response", response
if stdout_logs:
yield "stdout_logs", stdout_logs
if stderr_logs:
yield "stderr_logs", stderr_logs
except Exception as e:
yield "error", str(e)

View File

@@ -49,7 +49,7 @@ class CodeExtractionBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
# List of supported programming languages with mapped aliases
language_aliases = {
"html": ["html", "htm"],

View File

@@ -8,7 +8,6 @@ from backend.data.block import (
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.compass import CompassWebhookType
@@ -43,7 +42,7 @@ class CompassAITriggerBlock(Block):
input_schema=CompassAITriggerBlock.Input,
output_schema=CompassAITriggerBlock.Output,
webhook_config=BlockManualWebhookConfig(
provider=ProviderName.COMPASS,
provider="compass",
webhook_type=CompassWebhookType.TRANSCRIPTION,
),
test_input=[
@@ -56,5 +55,5 @@ class CompassAITriggerBlock(Block):
# ],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "transcription", input_data.payload.transcription

View File

@@ -30,7 +30,7 @@ class WordCharacterCountBlock(Block):
test_output=[("word_count", 4), ("character_count", 19)],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
text = input_data.text
word_count = len(text.split())

View File

@@ -34,7 +34,7 @@ class ReadCsvBlock(Block):
)
skip_columns: list[str] = SchemaField(
description="The columns to skip from the start of the row",
default_factory=list,
default=[],
)
class Output(BlockSchema):
@@ -69,7 +69,7 @@ class ReadCsvBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
import csv
from io import StringIO

View File

@@ -1,683 +0,0 @@
from typing import Any, List
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.json import loads
from backend.util.mock import MockObject
from backend.util.prompt import estimate_token_count_str
# =============================================================================
# Dictionary Manipulation Blocks
# =============================================================================
class CreateDictionaryBlock(Block):
class Input(BlockSchema):
values: dict[str, Any] = SchemaField(
description="Key-value pairs to create the dictionary with",
placeholder="e.g., {'name': 'Alice', 'age': 25}",
)
class Output(BlockSchema):
dictionary: dict[str, Any] = SchemaField(
description="The created dictionary containing the specified key-value pairs"
)
error: str = SchemaField(
description="Error message if dictionary creation failed"
)
def __init__(self):
super().__init__(
id="b924ddf4-de4f-4b56-9a85-358930dcbc91",
description="Creates a dictionary with the specified key-value pairs. Use this when you know all the values you want to add upfront.",
categories={BlockCategory.DATA},
input_schema=CreateDictionaryBlock.Input,
output_schema=CreateDictionaryBlock.Output,
test_input=[
{
"values": {"name": "Alice", "age": 25, "city": "New York"},
},
{
"values": {"numbers": [1, 2, 3], "active": True, "score": 95.5},
},
],
test_output=[
(
"dictionary",
{"name": "Alice", "age": 25, "city": "New York"},
),
(
"dictionary",
{"numbers": [1, 2, 3], "active": True, "score": 95.5},
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
# The values are already validated by Pydantic schema
yield "dictionary", input_data.values
except Exception as e:
yield "error", f"Failed to create dictionary: {str(e)}"
class AddToDictionaryBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(
default_factory=dict,
description="The dictionary to add the entry to. If not provided, a new dictionary will be created.",
)
key: str = SchemaField(
default="",
description="The key for the new entry.",
placeholder="new_key",
advanced=False,
)
value: Any = SchemaField(
default=None,
description="The value for the new entry.",
placeholder="new_value",
advanced=False,
)
entries: dict[Any, Any] = SchemaField(
default_factory=dict,
description="The entries to add to the dictionary. This is the batch version of the `key` and `value` fields.",
advanced=True,
)
class Output(BlockSchema):
updated_dictionary: dict = SchemaField(
description="The dictionary with the new entry added."
)
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="31d1064e-7446-4693-a7d4-65e5ca1180d1",
description="Adds a new key-value pair to a dictionary. If no dictionary is provided, a new one is created.",
categories={BlockCategory.BASIC},
input_schema=AddToDictionaryBlock.Input,
output_schema=AddToDictionaryBlock.Output,
test_input=[
{
"dictionary": {"existing_key": "existing_value"},
"key": "new_key",
"value": "new_value",
},
{"key": "first_key", "value": "first_value"},
{
"dictionary": {"existing_key": "existing_value"},
"entries": {"new_key": "new_value", "first_key": "first_value"},
},
],
test_output=[
(
"updated_dictionary",
{"existing_key": "existing_value", "new_key": "new_value"},
),
("updated_dictionary", {"first_key": "first_value"}),
(
"updated_dictionary",
{
"existing_key": "existing_value",
"new_key": "new_value",
"first_key": "first_value",
},
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
updated_dict = input_data.dictionary.copy()
if input_data.value is not None and input_data.key:
updated_dict[input_data.key] = input_data.value
for key, value in input_data.entries.items():
updated_dict[key] = value
yield "updated_dictionary", updated_dict
class FindInDictionaryBlock(Block):
class Input(BlockSchema):
input: Any = SchemaField(description="Dictionary to lookup from")
key: str | int = SchemaField(description="Key to lookup in the dictionary")
class Output(BlockSchema):
output: Any = SchemaField(description="Value found for the given key")
missing: Any = SchemaField(
description="Value of the input that missing the key"
)
def __init__(self):
super().__init__(
id="0e50422c-6dee-4145-83d6-3a5a392f65de",
description="Lookup the given key in the input dictionary/object/list and return the value.",
input_schema=FindInDictionaryBlock.Input,
output_schema=FindInDictionaryBlock.Output,
test_input=[
{"input": {"apple": 1, "banana": 2, "cherry": 3}, "key": "banana"},
{"input": {"x": 10, "y": 20, "z": 30}, "key": "w"},
{"input": [1, 2, 3], "key": 1},
{"input": [1, 2, 3], "key": 3},
{"input": MockObject(value="!!", key="key"), "key": "key"},
{"input": [{"k1": "v1"}, {"k2": "v2"}, {"k1": "v3"}], "key": "k1"},
],
test_output=[
("output", 2),
("missing", {"x": 10, "y": 20, "z": 30}),
("output", 2),
("missing", [1, 2, 3]),
("output", "key"),
("output", ["v1", "v3"]),
],
categories={BlockCategory.BASIC},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
obj = input_data.input
key = input_data.key
if isinstance(obj, str):
obj = loads(obj)
if isinstance(obj, dict) and key in obj:
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, int) and 0 <= key < len(obj):
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, str):
if len(obj) == 0:
yield "output", []
elif isinstance(obj[0], dict) and key in obj[0]:
yield "output", [item[key] for item in obj if key in item]
else:
yield "output", [getattr(val, key) for val in obj if hasattr(val, key)]
elif isinstance(obj, object) and isinstance(key, str) and hasattr(obj, key):
yield "output", getattr(obj, key)
else:
yield "missing", input_data.input
class RemoveFromDictionaryBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(
description="The dictionary to modify."
)
key: str | int = SchemaField(description="Key to remove from the dictionary.")
return_value: bool = SchemaField(
default=False, description="Whether to return the removed value."
)
class Output(BlockSchema):
updated_dictionary: dict[Any, Any] = SchemaField(
description="The dictionary after removal."
)
removed_value: Any = SchemaField(description="The removed value if requested.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="46afe2ea-c613-43f8-95ff-6692c3ef6876",
description="Removes a key-value pair from a dictionary.",
categories={BlockCategory.BASIC},
input_schema=RemoveFromDictionaryBlock.Input,
output_schema=RemoveFromDictionaryBlock.Output,
test_input=[
{
"dictionary": {"a": 1, "b": 2, "c": 3},
"key": "b",
"return_value": True,
},
{"dictionary": {"x": "hello", "y": "world"}, "key": "x"},
],
test_output=[
("updated_dictionary", {"a": 1, "c": 3}),
("removed_value", 2),
("updated_dictionary", {"y": "world"}),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
updated_dict = input_data.dictionary.copy()
try:
removed_value = updated_dict.pop(input_data.key)
yield "updated_dictionary", updated_dict
if input_data.return_value:
yield "removed_value", removed_value
except KeyError:
yield "error", f"Key '{input_data.key}' not found in dictionary"
class ReplaceDictionaryValueBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(
description="The dictionary to modify."
)
key: str | int = SchemaField(description="Key to replace the value for.")
value: Any = SchemaField(description="The new value for the given key.")
class Output(BlockSchema):
updated_dictionary: dict[Any, Any] = SchemaField(
description="The dictionary after replacement."
)
old_value: Any = SchemaField(description="The value that was replaced.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="27e31876-18b6-44f3-ab97-f6226d8b3889",
description="Replaces the value for a specified key in a dictionary.",
categories={BlockCategory.BASIC},
input_schema=ReplaceDictionaryValueBlock.Input,
output_schema=ReplaceDictionaryValueBlock.Output,
test_input=[
{"dictionary": {"a": 1, "b": 2, "c": 3}, "key": "b", "value": 99},
{
"dictionary": {"x": "hello", "y": "world"},
"key": "y",
"value": "universe",
},
],
test_output=[
("updated_dictionary", {"a": 1, "b": 99, "c": 3}),
("old_value", 2),
("updated_dictionary", {"x": "hello", "y": "universe"}),
("old_value", "world"),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
updated_dict = input_data.dictionary.copy()
try:
old_value = updated_dict[input_data.key]
updated_dict[input_data.key] = input_data.value
yield "updated_dictionary", updated_dict
yield "old_value", old_value
except KeyError:
yield "error", f"Key '{input_data.key}' not found in dictionary"
class DictionaryIsEmptyBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(description="The dictionary to check.")
class Output(BlockSchema):
is_empty: bool = SchemaField(description="True if the dictionary is empty.")
def __init__(self):
super().__init__(
id="a3cf3f64-6bb9-4cc6-9900-608a0b3359b0",
description="Checks if a dictionary is empty.",
categories={BlockCategory.BASIC},
input_schema=DictionaryIsEmptyBlock.Input,
output_schema=DictionaryIsEmptyBlock.Output,
test_input=[{"dictionary": {}}, {"dictionary": {"a": 1}}],
test_output=[("is_empty", True), ("is_empty", False)],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "is_empty", len(input_data.dictionary) == 0
# =============================================================================
# List Manipulation Blocks
# =============================================================================
class CreateListBlock(Block):
class Input(BlockSchema):
values: List[Any] = SchemaField(
description="A list of values to be combined into a new list.",
placeholder="e.g., ['Alice', 25, True]",
)
max_size: int | None = SchemaField(
default=None,
description="Maximum size of the list. If provided, the list will be yielded in chunks of this size.",
advanced=True,
)
max_tokens: int | None = SchemaField(
default=None,
description="Maximum tokens for the list. If provided, the list will be yielded in chunks that fit within this token limit.",
advanced=True,
)
class Output(BlockSchema):
list: List[Any] = SchemaField(
description="The created list containing the specified values."
)
error: str = SchemaField(description="Error message if list creation failed.")
def __init__(self):
super().__init__(
id="a912d5c7-6e00-4542-b2a9-8034136930e4",
description="Creates a list with the specified values. Use this when you know all the values you want to add upfront. This block can also yield the list in batches based on a maximum size or token limit.",
categories={BlockCategory.DATA},
input_schema=CreateListBlock.Input,
output_schema=CreateListBlock.Output,
test_input=[
{
"values": ["Alice", 25, True],
},
{
"values": [1, 2, 3, "four", {"key": "value"}],
},
],
test_output=[
(
"list",
["Alice", 25, True],
),
(
"list",
[1, 2, 3, "four", {"key": "value"}],
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
chunk = []
cur_tokens, max_tokens = 0, input_data.max_tokens
cur_size, max_size = 0, input_data.max_size
for value in input_data.values:
if max_tokens:
tokens = estimate_token_count_str(value)
else:
tokens = 0
# Check if adding this value would exceed either limit
if (max_tokens and (cur_tokens + tokens > max_tokens)) or (
max_size and (cur_size + 1 > max_size)
):
yield "list", chunk
chunk = [value]
cur_size, cur_tokens = 1, tokens
else:
chunk.append(value)
cur_size, cur_tokens = cur_size + 1, cur_tokens + tokens
# Yield final chunk if any
if chunk or not input_data.values:
yield "list", chunk
class AddToListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(
default_factory=list,
advanced=False,
description="The list to add the entry to. If not provided, a new list will be created.",
)
entry: Any = SchemaField(
description="The entry to add to the list. Can be of any type (string, int, dict, etc.).",
advanced=False,
default=None,
)
entries: List[Any] = SchemaField(
default_factory=lambda: list(),
description="The entries to add to the list. This is the batch version of the `entry` field.",
advanced=True,
)
position: int | None = SchemaField(
default=None,
description="The position to insert the new entry. If not provided, the entry will be appended to the end of the list.",
)
class Output(BlockSchema):
updated_list: List[Any] = SchemaField(
description="The list with the new entry added."
)
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="aeb08fc1-2fc1-4141-bc8e-f758f183a822",
description="Adds a new entry to a list. The entry can be of any type. If no list is provided, a new one is created.",
categories={BlockCategory.BASIC},
input_schema=AddToListBlock.Input,
output_schema=AddToListBlock.Output,
test_input=[
{
"list": [1, "string", {"existing_key": "existing_value"}],
"entry": {"new_key": "new_value"},
"position": 1,
},
{"entry": "first_entry"},
{"list": ["a", "b", "c"], "entry": "d"},
{
"entry": "e",
"entries": ["f", "g"],
"list": ["a", "b"],
"position": 1,
},
],
test_output=[
(
"updated_list",
[
1,
{"new_key": "new_value"},
"string",
{"existing_key": "existing_value"},
],
),
("updated_list", ["first_entry"]),
("updated_list", ["a", "b", "c", "d"]),
("updated_list", ["a", "f", "g", "e", "b"]),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
entries_added = input_data.entries.copy()
if input_data.entry:
entries_added.append(input_data.entry)
updated_list = input_data.list.copy()
if (pos := input_data.position) is not None:
updated_list = updated_list[:pos] + entries_added + updated_list[pos:]
else:
updated_list += entries_added
yield "updated_list", updated_list
class FindInListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to search in.")
value: Any = SchemaField(description="The value to search for.")
class Output(BlockSchema):
index: int = SchemaField(description="The index of the value in the list.")
found: bool = SchemaField(
description="Whether the value was found in the list."
)
not_found_value: Any = SchemaField(
description="The value that was not found in the list."
)
def __init__(self):
super().__init__(
id="5e2c6d0a-1e37-489f-b1d0-8e1812b23333",
description="Finds the index of the value in the list.",
categories={BlockCategory.BASIC},
input_schema=FindInListBlock.Input,
output_schema=FindInListBlock.Output,
test_input=[
{"list": [1, 2, 3, 4, 5], "value": 3},
{"list": [1, 2, 3, 4, 5], "value": 6},
],
test_output=[
("index", 2),
("found", True),
("found", False),
("not_found_value", 6),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
yield "index", input_data.list.index(input_data.value)
yield "found", True
except ValueError:
yield "found", False
yield "not_found_value", input_data.value
class GetListItemBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to get the item from.")
index: int = SchemaField(
description="The 0-based index of the item (supports negative indices)."
)
class Output(BlockSchema):
item: Any = SchemaField(description="The item at the specified index.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="262ca24c-1025-43cf-a578-534e23234e97",
description="Returns the element at the given index.",
categories={BlockCategory.BASIC},
input_schema=GetListItemBlock.Input,
output_schema=GetListItemBlock.Output,
test_input=[
{"list": [1, 2, 3], "index": 1},
{"list": [1, 2, 3], "index": -1},
],
test_output=[
("item", 2),
("item", 3),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
yield "item", input_data.list[input_data.index]
except IndexError:
yield "error", "Index out of range"
class RemoveFromListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to modify.")
value: Any = SchemaField(
default=None, description="Value to remove from the list."
)
index: int | None = SchemaField(
default=None,
description="Index of the item to pop (supports negative indices).",
)
return_item: bool = SchemaField(
default=False, description="Whether to return the removed item."
)
class Output(BlockSchema):
updated_list: List[Any] = SchemaField(description="The list after removal.")
removed_item: Any = SchemaField(description="The removed item if requested.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="d93c5a93-ac7e-41c1-ae5c-ef67e6e9b826",
description="Removes an item from a list by value or index.",
categories={BlockCategory.BASIC},
input_schema=RemoveFromListBlock.Input,
output_schema=RemoveFromListBlock.Output,
test_input=[
{"list": [1, 2, 3], "index": 1, "return_item": True},
{"list": ["a", "b", "c"], "value": "b"},
],
test_output=[
("updated_list", [1, 3]),
("removed_item", 2),
("updated_list", ["a", "c"]),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
lst = input_data.list.copy()
removed = None
try:
if input_data.index is not None:
removed = lst.pop(input_data.index)
elif input_data.value is not None:
lst.remove(input_data.value)
removed = input_data.value
else:
raise ValueError("No index or value provided for removal")
except (IndexError, ValueError):
yield "error", "Index or value not found"
return
yield "updated_list", lst
if input_data.return_item:
yield "removed_item", removed
class ReplaceListItemBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to modify.")
index: int = SchemaField(
description="Index of the item to replace (supports negative indices)."
)
value: Any = SchemaField(description="The new value for the given index.")
class Output(BlockSchema):
updated_list: List[Any] = SchemaField(description="The list after replacement.")
old_item: Any = SchemaField(description="The item that was replaced.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="fbf62922-bea1-4a3d-8bac-23587f810b38",
description="Replaces an item at the specified index.",
categories={BlockCategory.BASIC},
input_schema=ReplaceListItemBlock.Input,
output_schema=ReplaceListItemBlock.Output,
test_input=[
{"list": [1, 2, 3], "index": 1, "value": 99},
{"list": ["a", "b"], "index": -1, "value": "c"},
],
test_output=[
("updated_list", [1, 99, 3]),
("old_item", 2),
("updated_list", ["a", "c"]),
("old_item", "b"),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
lst = input_data.list.copy()
try:
old = lst[input_data.index]
lst[input_data.index] = input_data.value
except IndexError:
yield "error", "Index out of range"
return
yield "updated_list", lst
yield "old_item", old
class ListIsEmptyBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to check.")
class Output(BlockSchema):
is_empty: bool = SchemaField(description="True if the list is empty.")
def __init__(self):
super().__init__(
id="896ed73b-27d0-41be-813c-c1c1dc856c03",
description="Checks if a list is empty.",
categories={BlockCategory.BASIC},
input_schema=ListIsEmptyBlock.Input,
output_schema=ListIsEmptyBlock.Output,
test_input=[{"list": []}, {"list": [1]}],
test_output=[("is_empty", True), ("is_empty", False)],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "is_empty", len(input_data.list) == 0

View File

@@ -34,6 +34,6 @@ This is a "quoted" string.""",
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
decoded_text = codecs.decode(input_data.text, "unicode_escape")
yield "decoded_text", decoded_text

View File

@@ -1,3 +1,4 @@
import asyncio
from typing import Literal
import aiohttp
@@ -73,11 +74,7 @@ class ReadDiscordMessagesBlock(Block):
("username", "test_user"),
],
test_mock={
"run_bot": lambda token: {
"output_data": "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.",
"channel_name": "general",
"username": "test_user",
}
"run_bot": lambda token: asyncio.Future() # Create a Future object for mocking
},
)
@@ -109,24 +106,37 @@ class ReadDiscordMessagesBlock(Block):
if attachment.filename.endswith((".txt", ".py")):
async with aiohttp.ClientSession() as session:
async with session.get(attachment.url) as response:
file_content = response.text()
file_content = await response.text()
self.output_data += f"\n\nFile from user: {attachment.filename}\nContent: {file_content}"
await client.close()
await client.start(token.get_secret_value())
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
async for output_name, output_value in self.__run(input_data, credentials):
yield output_name, output_value
while True:
for output_name, output_value in self.__run(input_data, credentials):
yield output_name, output_value
break
async def __run(
self, input_data: Input, credentials: APIKeyCredentials
) -> BlockOutput:
def __run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput:
try:
result = await self.run_bot(credentials.api_key)
loop = asyncio.get_event_loop()
future = self.run_bot(credentials.api_key)
# If it's a Future (mock), set the result
if isinstance(future, asyncio.Future):
future.set_result(
{
"output_data": "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.",
"channel_name": "general",
"username": "test_user",
}
)
result = loop.run_until_complete(future)
# For testing purposes, use the mocked result
if isinstance(result, dict):
@@ -180,7 +190,7 @@ class SendDiscordMessageBlock(Block):
},
test_output=[("status", "Message sent")],
test_mock={
"send_message": lambda token, channel_name, message_content: "Message sent"
"send_message": lambda token, channel_name, message_content: asyncio.Future()
},
test_credentials=TEST_CREDENTIALS,
)
@@ -212,16 +222,23 @@ class SendDiscordMessageBlock(Block):
"""Splits a message into chunks not exceeding the Discord limit."""
return [message[i : i + limit] for i in range(0, len(message), limit)]
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self.send_message(
loop = asyncio.get_event_loop()
future = self.send_message(
credentials.api_key.get_secret_value(),
input_data.channel_name,
input_data.message_content,
)
# If it's a Future (mock), set the result
if isinstance(future, asyncio.Future):
future.set_result("Message sent")
result = loop.run_until_complete(future)
# For testing purposes, use the mocked result
if isinstance(result, str):
self.output_data = result

View File

@@ -1,48 +0,0 @@
"""
ElevenLabs integration blocks for AutoGPT Platform.
"""
# Speech generation blocks
from .speech import (
ElevenLabsGenerateSpeechBlock,
ElevenLabsGenerateSpeechWithTimestampsBlock,
)
# Speech-to-text blocks
from .transcription import (
ElevenLabsTranscribeAudioAsyncBlock,
ElevenLabsTranscribeAudioSyncBlock,
)
# Webhook trigger blocks
from .triggers import ElevenLabsWebhookTriggerBlock
# Utility blocks
from .utility import ElevenLabsGetUsageStatsBlock, ElevenLabsListModelsBlock
# Voice management blocks
from .voices import (
ElevenLabsCreateVoiceCloneBlock,
ElevenLabsDeleteVoiceBlock,
ElevenLabsGetVoiceDetailsBlock,
ElevenLabsListVoicesBlock,
)
__all__ = [
# Voice management
"ElevenLabsListVoicesBlock",
"ElevenLabsGetVoiceDetailsBlock",
"ElevenLabsCreateVoiceCloneBlock",
"ElevenLabsDeleteVoiceBlock",
# Speech generation
"ElevenLabsGenerateSpeechBlock",
"ElevenLabsGenerateSpeechWithTimestampsBlock",
# Speech-to-text
"ElevenLabsTranscribeAudioSyncBlock",
"ElevenLabsTranscribeAudioAsyncBlock",
# Utility
"ElevenLabsListModelsBlock",
"ElevenLabsGetUsageStatsBlock",
# Webhook triggers
"ElevenLabsWebhookTriggerBlock",
]

View File

@@ -1,16 +0,0 @@
"""
Shared configuration for all ElevenLabs blocks using the SDK pattern.
"""
from backend.sdk import BlockCostType, ProviderBuilder
from ._webhook import ElevenLabsWebhookManager
# Configure the ElevenLabs provider with API key authentication
elevenlabs = (
ProviderBuilder("elevenlabs")
.with_api_key("ELEVENLABS_API_KEY", "ElevenLabs API Key")
.with_webhook_manager(ElevenLabsWebhookManager)
.with_base_cost(2, BlockCostType.RUN) # Base cost for API calls
.build()
)

View File

@@ -1,82 +0,0 @@
"""
ElevenLabs webhook manager for handling webhook events.
"""
import hashlib
import hmac
from typing import Tuple
from backend.data.model import Credentials
from backend.sdk import BaseWebhooksManager, ProviderName, Webhook
class ElevenLabsWebhookManager(BaseWebhooksManager):
"""Manages ElevenLabs webhook events."""
PROVIDER_NAME = ProviderName("elevenlabs")
@classmethod
async def validate_payload(cls, webhook: Webhook, request) -> Tuple[dict, str]:
"""
Validate incoming webhook payload and signature.
ElevenLabs supports HMAC authentication for webhooks.
"""
payload = await request.json()
# Verify webhook signature if configured
if webhook.secret:
webhook_secret = webhook.config.get("webhook_secret")
if webhook_secret:
# Get the raw body for signature verification
body = await request.body()
# Calculate expected signature
expected_signature = hmac.new(
webhook_secret.encode(), body, hashlib.sha256
).hexdigest()
# Get signature from headers
signature = request.headers.get("x-elevenlabs-signature")
if signature and not hmac.compare_digest(signature, expected_signature):
raise ValueError("Invalid webhook signature")
# Extract event type from payload
event_type = payload.get("type", "unknown")
return payload, event_type
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
"""
Register a webhook with ElevenLabs.
Note: ElevenLabs webhook registration is done through their dashboard,
not via API. This is a placeholder implementation.
"""
# ElevenLabs requires manual webhook setup through dashboard
# Return empty webhook ID and config with instructions
config = {
"manual_setup_required": True,
"webhook_secret": secret,
"instructions": "Please configure webhook URL in ElevenLabs dashboard",
}
return "", config
async def _deregister_webhook(
self, webhook: Webhook, credentials: Credentials
) -> None:
"""
Deregister a webhook with ElevenLabs.
Note: ElevenLabs webhook removal is done through their dashboard.
"""
# ElevenLabs requires manual webhook removal through dashboard
pass

View File

@@ -1,179 +0,0 @@
"""
ElevenLabs speech generation (text-to-speech) blocks.
"""
from typing import Optional
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import elevenlabs
class ElevenLabsGenerateSpeechBlock(Block):
"""
Turn text into audio (binary).
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
voice_id: str = SchemaField(description="ID of the voice to use")
text: str = SchemaField(description="Text to convert to speech")
model_id: str = SchemaField(
description="Model ID to use for generation",
default="eleven_multilingual_v2",
)
output_format: str = SchemaField(
description="Audio format (e.g., mp3_44100_128)",
default="mp3_44100_128",
)
voice_settings: Optional[dict] = SchemaField(
description="Override voice settings (stability, similarity_boost, etc.)",
default=None,
)
language_code: Optional[str] = SchemaField(
description="Language code to enforce output language", default=None
)
seed: Optional[int] = SchemaField(
description="Seed for reproducible output", default=None
)
class Output(BlockSchema):
audio: str = SchemaField(description="Base64-encoded audio data")
def __init__(self):
super().__init__(
id="c5d6e7f8-a9b0-c1d2-e3f4-a5b6c7d8e9f0",
description="Generate speech audio from text using a specified voice",
categories={BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
import base64
api_key = credentials.api_key.get_secret_value()
# Build request body
body: dict[str, str | int | dict] = {
"text": input_data.text,
"model_id": input_data.model_id,
}
# Add optional fields
if input_data.voice_settings:
body["voice_settings"] = input_data.voice_settings
if input_data.language_code:
body["language_code"] = input_data.language_code
if input_data.seed is not None:
body["seed"] = input_data.seed
# Generate speech
response = await Requests().post(
f"https://api.elevenlabs.io/v1/text-to-speech/{input_data.voice_id}",
headers={
"xi-api-key": api_key,
"Content-Type": "application/json",
},
json=body,
params={"output_format": input_data.output_format},
)
# Get audio data and encode to base64
audio_data = response.content
audio_base64 = base64.b64encode(audio_data).decode("utf-8")
yield "audio", audio_base64
class ElevenLabsGenerateSpeechWithTimestampsBlock(Block):
"""
Text to audio AND per-character timing data.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
voice_id: str = SchemaField(description="ID of the voice to use")
text: str = SchemaField(description="Text to convert to speech")
model_id: str = SchemaField(
description="Model ID to use for generation",
default="eleven_multilingual_v2",
)
output_format: str = SchemaField(
description="Audio format (e.g., mp3_44100_128)",
default="mp3_44100_128",
)
voice_settings: Optional[dict] = SchemaField(
description="Override voice settings (stability, similarity_boost, etc.)",
default=None,
)
language_code: Optional[str] = SchemaField(
description="Language code to enforce output language", default=None
)
class Output(BlockSchema):
audio_base64: str = SchemaField(description="Base64-encoded audio data")
alignment: dict = SchemaField(
description="Character-level timing alignment data"
)
normalized_alignment: dict = SchemaField(
description="Normalized text alignment data"
)
def __init__(self):
super().__init__(
id="d6e7f8a9-b0c1-d2e3-f4a5-b6c7d8e9f0a1",
description="Generate speech with character-level timestamp information",
categories={BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build request body
body: dict[str, str | dict] = {
"text": input_data.text,
"model_id": input_data.model_id,
}
# Add optional fields
if input_data.voice_settings:
body["voice_settings"] = input_data.voice_settings
if input_data.language_code:
body["language_code"] = input_data.language_code
# Generate speech with timestamps
response = await Requests().post(
f"https://api.elevenlabs.io/v1/text-to-speech/{input_data.voice_id}/with-timestamps",
headers={
"xi-api-key": api_key,
"Content-Type": "application/json",
},
json=body,
params={"output_format": input_data.output_format},
)
data = response.json()
yield "audio_base64", data.get("audio_base64", "")
yield "alignment", data.get("alignment", {})
yield "normalized_alignment", data.get("normalized_alignment", {})

View File

@@ -1,232 +0,0 @@
"""
ElevenLabs speech-to-text (transcription) blocks.
"""
from typing import Optional
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import elevenlabs
class ElevenLabsTranscribeAudioSyncBlock(Block):
"""
Synchronously convert audio to text (+ word timestamps, diarization).
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
model_id: str = SchemaField(
description="Model ID for transcription", default="scribe_v1"
)
file: Optional[str] = SchemaField(
description="Base64-encoded audio file", default=None
)
cloud_storage_url: Optional[str] = SchemaField(
description="URL to audio file in cloud storage", default=None
)
language_code: Optional[str] = SchemaField(
description="Language code (ISO 639-1 or -3) to improve accuracy",
default=None,
)
diarize: bool = SchemaField(
description="Enable speaker diarization", default=False
)
num_speakers: Optional[int] = SchemaField(
description="Expected number of speakers (max 32)", default=None
)
timestamps_granularity: str = SchemaField(
description="Timestamp detail level: word, character, or none",
default="word",
)
tag_audio_events: bool = SchemaField(
description="Tag non-speech sounds (laughter, noise)", default=True
)
class Output(BlockSchema):
text: str = SchemaField(description="Full transcribed text")
words: list[dict] = SchemaField(
description="Array with word timing and speaker info"
)
language_code: str = SchemaField(description="Detected language code")
language_probability: float = SchemaField(
description="Confidence in language detection"
)
def __init__(self):
super().__init__(
id="e7f8a9b0-c1d2-e3f4-a5b6-c7d8e9f0a1b2",
description="Transcribe audio to text with timing and speaker information",
categories={BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
import base64
from io import BytesIO
api_key = credentials.api_key.get_secret_value()
# Validate input - must have either file or URL
if not input_data.file and not input_data.cloud_storage_url:
raise ValueError("Either 'file' or 'cloud_storage_url' must be provided")
if input_data.file and input_data.cloud_storage_url:
raise ValueError(
"Only one of 'file' or 'cloud_storage_url' should be provided"
)
# Build form data
form_data = {
"model_id": input_data.model_id,
"diarize": str(input_data.diarize).lower(),
"timestamps_granularity": input_data.timestamps_granularity,
"tag_audio_events": str(input_data.tag_audio_events).lower(),
}
if input_data.language_code:
form_data["language_code"] = input_data.language_code
if input_data.num_speakers is not None:
form_data["num_speakers"] = str(input_data.num_speakers)
# Handle file or URL
files = None
if input_data.file:
# Decode base64 file
file_data = base64.b64decode(input_data.file)
files = [("file", ("audio.wav", BytesIO(file_data), "audio/wav"))]
elif input_data.cloud_storage_url:
form_data["cloud_storage_url"] = input_data.cloud_storage_url
# Transcribe audio
response = await Requests().post(
"https://api.elevenlabs.io/v1/speech-to-text",
headers={"xi-api-key": api_key},
data=form_data,
files=files,
)
data = response.json()
yield "text", data.get("text", "")
yield "words", data.get("words", [])
yield "language_code", data.get("language_code", "")
yield "language_probability", data.get("language_probability", 0.0)
class ElevenLabsTranscribeAudioAsyncBlock(Block):
"""
Kick off transcription that returns quickly; result arrives via webhook.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
model_id: str = SchemaField(
description="Model ID for transcription", default="scribe_v1"
)
file: Optional[str] = SchemaField(
description="Base64-encoded audio file", default=None
)
cloud_storage_url: Optional[str] = SchemaField(
description="URL to audio file in cloud storage", default=None
)
language_code: Optional[str] = SchemaField(
description="Language code (ISO 639-1 or -3) to improve accuracy",
default=None,
)
diarize: bool = SchemaField(
description="Enable speaker diarization", default=False
)
num_speakers: Optional[int] = SchemaField(
description="Expected number of speakers (max 32)", default=None
)
timestamps_granularity: str = SchemaField(
description="Timestamp detail level: word, character, or none",
default="word",
)
webhook_url: str = SchemaField(
description="URL to receive transcription result",
default="",
)
class Output(BlockSchema):
tracking_id: str = SchemaField(description="ID to track the transcription job")
def __init__(self):
super().__init__(
id="f8a9b0c1-d2e3-f4a5-b6c7-d8e9f0a1b2c3",
description="Start async transcription with webhook callback",
categories={BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
import base64
import uuid
from io import BytesIO
api_key = credentials.api_key.get_secret_value()
# Validate input
if not input_data.file and not input_data.cloud_storage_url:
raise ValueError("Either 'file' or 'cloud_storage_url' must be provided")
if input_data.file and input_data.cloud_storage_url:
raise ValueError(
"Only one of 'file' or 'cloud_storage_url' should be provided"
)
# Build form data
form_data = {
"model_id": input_data.model_id,
"diarize": str(input_data.diarize).lower(),
"timestamps_granularity": input_data.timestamps_granularity,
"webhook": "true", # Enable async mode
}
if input_data.language_code:
form_data["language_code"] = input_data.language_code
if input_data.num_speakers is not None:
form_data["num_speakers"] = str(input_data.num_speakers)
if input_data.webhook_url:
form_data["webhook_url"] = input_data.webhook_url
# Handle file or URL
files = None
if input_data.file:
# Decode base64 file
file_data = base64.b64decode(input_data.file)
files = [("file", ("audio.wav", BytesIO(file_data), "audio/wav"))]
elif input_data.cloud_storage_url:
form_data["cloud_storage_url"] = input_data.cloud_storage_url
# Start async transcription
response = await Requests().post(
"https://api.elevenlabs.io/v1/speech-to-text",
headers={"xi-api-key": api_key},
data=form_data,
files=files,
)
# Generate tracking ID (API might return one)
data = response.json()
tracking_id = data.get("tracking_id", str(uuid.uuid4()))
yield "tracking_id", tracking_id

View File

@@ -1,160 +0,0 @@
"""
ElevenLabs webhook trigger blocks.
"""
from pydantic import BaseModel
from backend.sdk import (
Block,
BlockCategory,
BlockOutput,
BlockSchema,
BlockType,
BlockWebhookConfig,
CredentialsMetaInput,
ProviderName,
SchemaField,
)
from ._config import elevenlabs
class ElevenLabsWebhookTriggerBlock(Block):
"""
Starts a flow when ElevenLabs POSTs an event (STT finished, voice removal, etc.).
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
webhook_url: str = SchemaField(
description="URL to receive webhooks (auto-generated)",
default="",
hidden=True,
)
class EventsFilter(BaseModel):
"""ElevenLabs event types to subscribe to"""
speech_to_text_completed: bool = SchemaField(
description="Speech-to-text transcription completed", default=True
)
post_call_transcription: bool = SchemaField(
description="Conversational AI call transcription completed",
default=True,
)
voice_removal_notice: bool = SchemaField(
description="Voice scheduled for removal", default=True
)
voice_removed: bool = SchemaField(
description="Voice has been removed", default=True
)
voice_removal_notice_withdrawn: bool = SchemaField(
description="Voice removal cancelled", default=True
)
events: EventsFilter = SchemaField(
title="Events", description="The events to subscribe to"
)
# Webhook payload - populated by the system
payload: dict = SchemaField(
description="Webhook payload data",
default={},
hidden=True,
)
class Output(BlockSchema):
type: str = SchemaField(description="Event type")
event_timestamp: int = SchemaField(description="Unix timestamp of the event")
data: dict = SchemaField(description="Event-specific data payload")
def __init__(self):
super().__init__(
id="c1d2e3f4-a5b6-c7d8-e9f0-a1b2c3d4e5f6",
description="Receive webhook events from ElevenLabs",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
block_type=BlockType.WEBHOOK,
webhook_config=BlockWebhookConfig(
provider=ProviderName("elevenlabs"),
webhook_type="notification",
event_filter_input="events",
resource_format="",
),
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
# Extract webhook data
payload = input_data.payload
# Extract event type
event_type = payload.get("type", "unknown")
# Map event types to filter fields
event_filter_map = {
"speech_to_text_completed": input_data.events.speech_to_text_completed,
"post_call_transcription": input_data.events.post_call_transcription,
"voice_removal_notice": input_data.events.voice_removal_notice,
"voice_removed": input_data.events.voice_removed,
"voice_removal_notice_withdrawn": input_data.events.voice_removal_notice_withdrawn,
}
# Check if this event type is enabled
if not event_filter_map.get(event_type, False):
# Skip this event
return
# Extract common fields
yield "type", event_type
yield "event_timestamp", payload.get("event_timestamp", 0)
# Extract event-specific data
data = payload.get("data", {})
# Process based on event type
if event_type == "speech_to_text_completed":
# STT transcription completed
processed_data = {
"transcription_id": data.get("transcription_id"),
"text": data.get("text"),
"words": data.get("words", []),
"language_code": data.get("language_code"),
"language_probability": data.get("language_probability"),
}
elif event_type == "post_call_transcription":
# Conversational AI call transcription
processed_data = {
"agent_id": data.get("agent_id"),
"conversation_id": data.get("conversation_id"),
"transcript": data.get("transcript"),
"metadata": data.get("metadata", {}),
}
elif event_type == "voice_removal_notice":
# Voice scheduled for removal
processed_data = {
"voice_id": data.get("voice_id"),
"voice_name": data.get("voice_name"),
"removal_date": data.get("removal_date"),
"reason": data.get("reason"),
}
elif event_type == "voice_removal_notice_withdrawn":
# Voice removal cancelled
processed_data = {
"voice_id": data.get("voice_id"),
"voice_name": data.get("voice_name"),
}
elif event_type == "voice_removed":
# Voice has been removed
processed_data = {
"voice_id": data.get("voice_id"),
"voice_name": data.get("voice_name"),
"removed_at": data.get("removed_at"),
}
else:
# Unknown event type, pass through raw data
processed_data = data
yield "data", processed_data

View File

@@ -1,116 +0,0 @@
"""
ElevenLabs utility blocks for models and usage stats.
"""
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import elevenlabs
class ElevenLabsListModelsBlock(Block):
"""
Get all available model IDs & capabilities.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
class Output(BlockSchema):
models: list[dict] = SchemaField(
description="Array of model objects with capabilities"
)
def __init__(self):
super().__init__(
id="a9b0c1d2-e3f4-a5b6-c7d8-e9f0a1b2c3d4",
description="List all available voice models and their capabilities",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Fetch models
response = await Requests().get(
"https://api.elevenlabs.io/v1/models",
headers={"xi-api-key": api_key},
)
models = response.json()
yield "models", models
class ElevenLabsGetUsageStatsBlock(Block):
"""
Character / credit usage for billing dashboards.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
start_unix: int = SchemaField(
description="Start timestamp in Unix epoch seconds"
)
end_unix: int = SchemaField(description="End timestamp in Unix epoch seconds")
aggregation_interval: str = SchemaField(
description="Aggregation interval: daily or monthly",
default="daily",
)
class Output(BlockSchema):
usage: list[dict] = SchemaField(description="Array of usage data per interval")
total_character_count: int = SchemaField(
description="Total characters used in period"
)
total_requests: int = SchemaField(description="Total API requests in period")
def __init__(self):
super().__init__(
id="b0c1d2e3-f4a5-b6c7-d8e9-f0a1b2c3d4e5",
description="Get character and credit usage statistics",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params = {
"start_unix": input_data.start_unix,
"end_unix": input_data.end_unix,
"aggregation_interval": input_data.aggregation_interval,
}
# Fetch usage stats
response = await Requests().get(
"https://api.elevenlabs.io/v1/usage/character-stats",
headers={"xi-api-key": api_key},
params=params,
)
data = response.json()
yield "usage", data.get("usage", [])
yield "total_character_count", data.get("total_character_count", 0)
yield "total_requests", data.get("total_requests", 0)

View File

@@ -1,249 +0,0 @@
"""
ElevenLabs voice management blocks.
"""
from typing import Optional
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import elevenlabs
class ElevenLabsListVoicesBlock(Block):
"""
Fetch all voices the account can use (for pick-lists, UI menus, etc.).
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
search: str = SchemaField(
description="Search term to filter voices", default=""
)
voice_type: Optional[str] = SchemaField(
description="Filter by voice type: premade, cloned, or professional",
default=None,
)
page_size: int = SchemaField(
description="Number of voices per page (max 100)", default=10
)
next_page_token: str = SchemaField(
description="Token for fetching next page", default=""
)
class Output(BlockSchema):
voices: list[dict] = SchemaField(
description="Array of voice objects with id, name, category, etc."
)
next_page_token: Optional[str] = SchemaField(
description="Token for fetching next page, null if no more pages"
)
def __init__(self):
super().__init__(
id="e1a2b3c4-d5e6-f7a8-b9c0-d1e2f3a4b5c6",
description="List all available voices with filtering and pagination",
categories={BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Build query parameters
params: dict[str, str | int] = {"page_size": input_data.page_size}
if input_data.search:
params["search"] = input_data.search
if input_data.voice_type:
params["voice_type"] = input_data.voice_type
if input_data.next_page_token:
params["next_page_token"] = input_data.next_page_token
# Fetch voices
response = await Requests().get(
"https://api.elevenlabs.io/v2/voices",
headers={"xi-api-key": api_key},
params=params,
)
data = response.json()
yield "voices", data.get("voices", [])
yield "next_page_token", data.get("next_page_token")
class ElevenLabsGetVoiceDetailsBlock(Block):
"""
Retrieve metadata/settings for a single voice.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
voice_id: str = SchemaField(description="The ID of the voice to retrieve")
class Output(BlockSchema):
voice: dict = SchemaField(
description="Voice object with name, labels, settings, etc."
)
def __init__(self):
super().__init__(
id="f2a3b4c5-d6e7-f8a9-b0c1-d2e3f4a5b6c7",
description="Get detailed information about a specific voice",
categories={BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Fetch voice details
response = await Requests().get(
f"https://api.elevenlabs.io/v1/voices/{input_data.voice_id}",
headers={"xi-api-key": api_key},
)
voice = response.json()
yield "voice", voice
class ElevenLabsCreateVoiceCloneBlock(Block):
"""
Upload sample clips to create a custom (IVC) voice.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
name: str = SchemaField(description="Name for the new voice")
files: list[str] = SchemaField(
description="Base64-encoded audio files (1-10 files, max 25MB each)"
)
description: str = SchemaField(
description="Description of the voice", default=""
)
labels: dict = SchemaField(
description="Metadata labels (e.g., accent, age)", default={}
)
remove_background_noise: bool = SchemaField(
description="Whether to remove background noise from samples", default=False
)
class Output(BlockSchema):
voice_id: str = SchemaField(description="ID of the newly created voice")
requires_verification: bool = SchemaField(
description="Whether the voice requires verification"
)
def __init__(self):
super().__init__(
id="a3b4c5d6-e7f8-a9b0-c1d2-e3f4a5b6c7d8",
description="Create a new voice clone from audio samples",
categories={BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
import base64
import json
from io import BytesIO
api_key = credentials.api_key.get_secret_value()
# Prepare multipart form data
form_data = {
"name": input_data.name,
}
if input_data.description:
form_data["description"] = input_data.description
if input_data.labels:
form_data["labels"] = json.dumps(input_data.labels)
if input_data.remove_background_noise:
form_data["remove_background_noise"] = "true"
# Prepare files
files = []
for i, file_b64 in enumerate(input_data.files):
file_data = base64.b64decode(file_b64)
files.append(
("files", (f"sample_{i}.mp3", BytesIO(file_data), "audio/mpeg"))
)
# Create voice
response = await Requests().post(
"https://api.elevenlabs.io/v1/voices/add",
headers={"xi-api-key": api_key},
data=form_data,
files=files,
)
result = response.json()
yield "voice_id", result.get("voice_id", "")
yield "requires_verification", result.get("requires_verification", False)
class ElevenLabsDeleteVoiceBlock(Block):
"""
Permanently remove a custom voice.
"""
class Input(BlockSchema):
credentials: CredentialsMetaInput = elevenlabs.credentials_field(
description="ElevenLabs API credentials"
)
voice_id: str = SchemaField(description="The ID of the voice to delete")
class Output(BlockSchema):
status: str = SchemaField(description="Deletion status (ok or error)")
def __init__(self):
super().__init__(
id="b4c5d6e7-f8a9-b0c1-d2e3-f4a5b6c7d8e9",
description="Delete a custom voice from your account",
categories={BlockCategory.AI},
input_schema=self.Input,
output_schema=self.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
api_key = credentials.api_key.get_secret_value()
# Delete voice
response = await Requests().delete(
f"https://api.elevenlabs.io/v1/voices/{input_data.voice_id}",
headers={"xi-api-key": api_key},
)
# Check if successful
if response.status in [200, 204]:
yield "status", "ok"
else:
yield "status", "error"

View File

@@ -1,53 +1,22 @@
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Literal
from pydantic import BaseModel, ConfigDict, SecretStr
from pydantic import BaseModel, ConfigDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
CredentialsField,
CredentialsMetaInput,
SchemaField,
UserPasswordCredentials,
)
from backend.integrations.providers import ProviderName
TEST_CREDENTIALS = UserPasswordCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="smtp",
username=SecretStr("mock-smtp-username"),
password=SecretStr("mock-smtp-password"),
title="Mock SMTP credentials",
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
SMTPCredentials = UserPasswordCredentials
SMTPCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.SMTP],
Literal["user_password"],
]
from backend.data.model import BlockSecret, SchemaField, SecretField
def SMTPCredentialsField() -> SMTPCredentialsInput:
return CredentialsField(
description="The SMTP integration requires a username and password.",
)
class SMTPConfig(BaseModel):
class EmailCredentials(BaseModel):
smtp_server: str = SchemaField(
default="smtp.example.com", description="SMTP server address"
default="smtp.gmail.com", description="SMTP server address"
)
smtp_port: int = SchemaField(default=25, description="SMTP port number")
smtp_username: BlockSecret = SecretField(key="smtp_username")
smtp_password: BlockSecret = SecretField(key="smtp_password")
model_config = ConfigDict(title="SMTP Config")
model_config = ConfigDict(title="Email Credentials")
class SendEmailBlock(Block):
@@ -61,11 +30,10 @@ class SendEmailBlock(Block):
body: str = SchemaField(
description="Body of the email", placeholder="Enter the email body"
)
config: SMTPConfig = SchemaField(
description="SMTP Config",
default=SMTPConfig(),
creds: EmailCredentials = SchemaField(
description="SMTP credentials",
default=EmailCredentials(),
)
credentials: SMTPCredentialsInput = SMTPCredentialsField()
class Output(BlockSchema):
status: str = SchemaField(description="Status of the email sending operation")
@@ -75,6 +43,7 @@ class SendEmailBlock(Block):
def __init__(self):
super().__init__(
disabled=True,
id="4335878a-394e-4e67-adf2-919877ff49ae",
description="This block sends an email using the provided SMTP credentials.",
categories={BlockCategory.OUTPUT},
@@ -84,29 +53,25 @@ class SendEmailBlock(Block):
"to_email": "recipient@example.com",
"subject": "Test Email",
"body": "This is a test email.",
"config": {
"creds": {
"smtp_server": "smtp.gmail.com",
"smtp_port": 25,
"smtp_username": "your-email@gmail.com",
"smtp_password": "your-gmail-password",
},
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[("status", "Email sent successfully")],
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
)
@staticmethod
def send_email(
config: SMTPConfig,
to_email: str,
subject: str,
body: str,
credentials: SMTPCredentials,
creds: EmailCredentials, to_email: str, subject: str, body: str
) -> str:
smtp_server = config.smtp_server
smtp_port = config.smtp_port
smtp_username = credentials.username.get_secret_value()
smtp_password = credentials.password.get_secret_value()
smtp_server = creds.smtp_server
smtp_port = creds.smtp_port
smtp_username = creds.smtp_username.get_secret_value()
smtp_password = creds.smtp_password.get_secret_value()
msg = MIMEMultipart()
msg["From"] = smtp_username
@@ -121,13 +86,10 @@ class SendEmailBlock(Block):
return "Email sent successfully"
async def run(
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "status", self.send_email(
config=input_data.config,
to_email=input_data.to_email,
subject=input_data.subject,
body=input_data.body,
credentials=credentials,
input_data.creds,
input_data.to_email,
input_data.subject,
input_data.body,
)

View File

@@ -5,19 +5,20 @@ from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
from backend.integrations.providers import ProviderName
ApolloCredentials = APIKeyCredentials
ApolloCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.APOLLO],
ExaCredentials = APIKeyCredentials
ExaCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.EXA],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="apollo",
api_key=SecretStr("mock-apollo-api-key"),
title="Mock Apollo API key",
provider="exa",
api_key=SecretStr("mock-exa-api-key"),
title="Mock Exa API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
@@ -26,10 +27,6 @@ TEST_CREDENTIALS_INPUT = {
}
def ApolloCredentialsField() -> ApolloCredentialsInput:
"""
Creates a Apollo credentials input on a block.
"""
return CredentialsField(
description="The Apollo integration can be used with an API Key.",
)
def ExaCredentialsField() -> ExaCredentialsInput:
"""Creates an Exa credentials input on a block."""
return CredentialsField(description="The Exa integration requires an API Key.")

View File

@@ -1,16 +0,0 @@
"""
Shared configuration for all Exa blocks using the new SDK pattern.
"""
from backend.sdk import BlockCostType, ProviderBuilder
from ._webhook import ExaWebhookManager
# Configure the Exa provider once for all blocks
exa = (
ProviderBuilder("exa")
.with_api_key("EXA_API_KEY", "Exa API Key")
.with_webhook_manager(ExaWebhookManager)
.with_base_cost(1, BlockCostType.RUN)
.build()
)

View File

@@ -1,134 +0,0 @@
"""
Exa Webhook Manager implementation.
"""
import hashlib
import hmac
from enum import Enum
from backend.sdk import (
APIKeyCredentials,
BaseWebhooksManager,
Credentials,
ProviderName,
Requests,
Webhook,
)
class ExaWebhookType(str, Enum):
"""Available webhook types for Exa."""
WEBSET = "webset"
class ExaEventType(str, Enum):
"""Available event types for Exa webhooks."""
WEBSET_CREATED = "webset.created"
WEBSET_DELETED = "webset.deleted"
WEBSET_PAUSED = "webset.paused"
WEBSET_IDLE = "webset.idle"
WEBSET_SEARCH_CREATED = "webset.search.created"
WEBSET_SEARCH_CANCELED = "webset.search.canceled"
WEBSET_SEARCH_COMPLETED = "webset.search.completed"
WEBSET_SEARCH_UPDATED = "webset.search.updated"
IMPORT_CREATED = "import.created"
IMPORT_COMPLETED = "import.completed"
IMPORT_PROCESSING = "import.processing"
WEBSET_ITEM_CREATED = "webset.item.created"
WEBSET_ITEM_ENRICHED = "webset.item.enriched"
WEBSET_EXPORT_CREATED = "webset.export.created"
WEBSET_EXPORT_COMPLETED = "webset.export.completed"
class ExaWebhookManager(BaseWebhooksManager):
"""Webhook manager for Exa API."""
PROVIDER_NAME = ProviderName("exa")
class WebhookType(str, Enum):
WEBSET = "webset"
@classmethod
async def validate_payload(cls, webhook: Webhook, request) -> tuple[dict, str]:
"""Validate incoming webhook payload and signature."""
payload = await request.json()
# Get event type from payload
event_type = payload.get("eventType", "unknown")
# Verify webhook signature if secret is available
if webhook.secret:
signature = request.headers.get("X-Exa-Signature")
if signature:
# Compute expected signature
body = await request.body()
expected_signature = hmac.new(
webhook.secret.encode(), body, hashlib.sha256
).hexdigest()
# Compare signatures
if not hmac.compare_digest(signature, expected_signature):
raise ValueError("Invalid webhook signature")
return payload, event_type
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
"""Register webhook with Exa API."""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("Exa webhooks require API key credentials")
api_key = credentials.api_key.get_secret_value()
# Create webhook via Exa API
response = await Requests().post(
"https://api.exa.ai/v0/webhooks",
headers={"x-api-key": api_key},
json={
"url": ingress_url,
"events": events,
"metadata": {
"resource": resource,
"webhook_type": webhook_type,
},
},
)
if not response.ok:
error_data = response.json()
raise Exception(f"Failed to create Exa webhook: {error_data}")
webhook_data = response.json()
# Store the secret returned by Exa
return webhook_data["id"], {
"events": events,
"resource": resource,
"exa_secret": webhook_data.get("secret"),
}
async def _deregister_webhook(
self, webhook: Webhook, credentials: Credentials
) -> None:
"""Deregister webhook from Exa API."""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("Exa webhooks require API key credentials")
api_key = credentials.api_key.get_secret_value()
# Delete webhook via Exa API
response = await Requests().delete(
f"https://api.exa.ai/v0/webhooks/{webhook.provider_webhook_id}",
headers={"x-api-key": api_key},
)
if not response.ok and response.status != 404:
error_data = response.json()
raise Exception(f"Failed to delete Exa webhook: {error_data}")

View File

@@ -1,190 +0,0 @@
Exa home pagelight logo
Search or ask...
⌘K
Exa Search
Log In
API Dashboard
Documentation
Examples
Integrations
SDKs
Websets
Changelog
Discord
Blog
Getting Started
Overview
Quickstart
API Reference
POST
Search
POST
Get contents
POST
Find similar links
POST
Answer
OpenAPI Specification
RAG Quick Start Guide
RAG with Exa and OpenAI
RAG with LangChain
OpenAI Exa Wrapper
CrewAI agents with Exa
RAG with LlamaIndex
Tool calling with GPT
Tool calling with Claude
OpenAI Chat Completions
OpenAI Responses API
Concepts
How Exa Search Works
The Exa Index
Contents retrieval with Exa API
Exa's Capabilities Explained
FAQs
Crawling Subpages with Exa
Exa LiveCrawl
Admin
Setting Up and Managing Your Team
Rate Limits
Enterprise Documentation & Security
API Reference
Answer
Get an LLM answer to a question informed by Exa search results. Fully compatible with OpenAIs chat completions endpoint - docs here. /answer performs an Exa search and uses an LLM to generate either:
A direct answer for specific queries. (i.e. “What is the capital of France?” would return “Paris”)
A detailed summary with citations for open-ended queries (i.e. “What is the state of ai in healthcare?” would return a summary with citations to relevant sources)
The response includes both the generated answer and the sources used to create it. The endpoint also supports streaming (as stream=True), which will returns tokens as they are generated.
POST
/
answer
Try it
Get your Exa API key
Authorizations
x-api-key
stringheaderrequired
API key can be provided either via x-api-key header or Authorization header with Bearer scheme
Body
application/json
query
stringrequired
The question or query to answer.
Minimum length: 1
Example:
"What is the latest valuation of SpaceX?"
stream
booleandefault:false
If true, the response is returned as a server-sent events (SSS) stream.
text
booleandefault:false
If true, the response includes full text content in the search results
model
enum<string>default:exa
The search model to use for the answer. Exa passes only one query to exa, while exa-pro also passes 2 expanded queries to our search model.
Available options: exa, exa-pro
Response
200
application/json
OK
answer
string
The generated answer based on search results.
Example:
"$350 billion."
citations
object[]
Search results used to generate the answer.
Show child attributes
costDollars
object
Show child attributes
Find similar links
OpenAPI Specification
x
discord
Powered by Mintlify
cURL
Python
JavaScript
Copy
# pip install exa-py
from exa_py import Exa
exa = Exa('YOUR_EXA_API_KEY')
result = exa.answer(
"What is the latest valuation of SpaceX?",
text=True
)
print(result)
200
Copy
{
"answer": "$350 billion.",
"citations": [
{
"id": "https://www.theguardian.com/science/2024/dec/11/spacex-valued-at-350bn-as-company-agrees-to-buy-shares-from-employees",
"url": "https://www.theguardian.com/science/2024/dec/11/spacex-valued-at-350bn-as-company-agrees-to-buy-shares-from-employees",
"title": "SpaceX valued at $350bn as company agrees to buy shares from ...",
"author": "Dan Milmon",
"publishedDate": "2023-11-16T01:36:32.547Z",
"text": "SpaceX valued at $350bn as company agrees to buy shares from ...",
"image": "https://i.guim.co.uk/img/media/7cfee7e84b24b73c97a079c402642a333ad31e77/0_380_6176_3706/master/6176.jpg?width=1200&height=630&quality=85&auto=format&fit=crop&overlay-align=bottom%2Cleft&overlay-width=100p&overlay-base64=L2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc&enable=upscale&s=71ebb2fbf458c185229d02d380c01530",
"favicon": "https://assets.guim.co.uk/static/frontend/icons/homescreen/apple-touch-icon.svg"
}
],
"costDollars": {
"total": 0.005,
"breakDown": [
{
"search": 0.005,
"contents": 0,
"breakdown": {
"keywordSearch": 0,
"neuralSearch": 0.005,
"contentText": 0,
"contentHighlight": 0,
"contentSummary": 0
}
}
],
"perRequestPrices": {
"neuralSearch_1_25_results": 0.005,
"neuralSearch_26_100_results": 0.025,
"neuralSearch_100_plus_results": 1,
"keywordSearch_1_100_results": 0.0025,
"keywordSearch_100_plus_results": 3
},
"perPagePrices": {
"contentText": 0.001,
"contentHighlight": 0.001,
"contentSummary": 0.001
}
}
}

View File

@@ -1,124 +0,0 @@
from backend.sdk import (
APIKeyCredentials,
BaseModel,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from ._config import exa
class CostBreakdown(BaseModel):
keywordSearch: float
neuralSearch: float
contentText: float
contentHighlight: float
contentSummary: float
class SearchBreakdown(BaseModel):
search: float
contents: float
breakdown: CostBreakdown
class PerRequestPrices(BaseModel):
neuralSearch_1_25_results: float
neuralSearch_26_100_results: float
neuralSearch_100_plus_results: float
keywordSearch_1_100_results: float
keywordSearch_100_plus_results: float
class PerPagePrices(BaseModel):
contentText: float
contentHighlight: float
contentSummary: float
class CostDollars(BaseModel):
total: float
breakDown: list[SearchBreakdown]
perRequestPrices: PerRequestPrices
perPagePrices: PerPagePrices
class ExaAnswerBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput = exa.credentials_field(
description="The Exa integration requires an API Key."
)
query: str = SchemaField(
description="The question or query to answer",
placeholder="What is the latest valuation of SpaceX?",
)
text: bool = SchemaField(
default=False,
description="If true, the response includes full text content in the search results",
advanced=True,
)
model: str = SchemaField(
default="exa",
description="The search model to use (exa or exa-pro)",
placeholder="exa",
advanced=True,
)
class Output(BlockSchema):
answer: str = SchemaField(
description="The generated answer based on search results"
)
citations: list[dict] = SchemaField(
description="Search results used to generate the answer",
default_factory=list,
)
cost_dollars: CostDollars = SchemaField(
description="Cost breakdown of the request"
)
error: str = SchemaField(
description="Error message if the request failed", default=""
)
def __init__(self):
super().__init__(
id="b79ca4cc-9d5e-47d1-9d4f-e3a2d7f28df5",
description="Get an LLM answer to a question informed by Exa search results",
categories={BlockCategory.SEARCH, BlockCategory.AI},
input_schema=ExaAnswerBlock.Input,
output_schema=ExaAnswerBlock.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/answer"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
# Build the payload
payload = {
"query": input_data.query,
"text": input_data.text,
"model": input_data.model,
}
try:
response = await Requests().post(url, headers=headers, json=payload)
data = response.json()
yield "answer", data.get("answer", "")
yield "citations", data.get("citations", [])
yield "cost_dollars", data.get("costDollars", {})
except Exception as e:
yield "error", str(e)
yield "answer", ""
yield "citations", []
yield "cost_dollars", {}

View File

@@ -1,38 +1,55 @@
from backend.sdk import (
APIKeyCredentials,
Block,
BlockCategory,
BlockOutput,
BlockSchema,
CredentialsMetaInput,
Requests,
SchemaField,
)
from typing import List, Optional
from ._config import exa
from .helpers import ContentSettings
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: Optional[dict] = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: Optional[dict] = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: Optional[dict] = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput = exa.credentials_field(
description="The Exa integration requires an API Key."
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
ids: list[str] = SchemaField(
description="Array of document IDs obtained from searches"
)
contents: ContentSettings = SchemaField(
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentSettings(),
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents", default_factory=list
)
error: str = SchemaField(
description="Error message if the request failed", default=""
description="List of document contents",
default=[],
)
def __init__(self):
@@ -44,8 +61,8 @@ class ExaContentsBlock(Block):
output_schema=ExaContentsBlock.Output,
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
@@ -53,7 +70,6 @@ class ExaContentsBlock(Block):
"x-api-key": credentials.api_key.get_secret_value(),
}
# Convert ContentSettings to API format
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
@@ -62,8 +78,10 @@ class ExaContentsBlock(Block):
}
try:
response = await Requests().post(url, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []

View File

@@ -1,6 +1,8 @@
from typing import Optional
from backend.sdk import BaseModel, SchemaField
from pydantic import BaseModel
from backend.data.model import SchemaField
class TextSettings(BaseModel):
@@ -40,90 +42,13 @@ class SummarySettings(BaseModel):
class ContentSettings(BaseModel):
text: TextSettings = SchemaField(
default=TextSettings(),
description="Text content settings",
)
highlights: HighlightSettings = SchemaField(
default=HighlightSettings(),
description="Highlight settings",
)
summary: SummarySettings = SchemaField(
default=SummarySettings(),
)
# Websets Models
class WebsetEntitySettings(BaseModel):
type: Optional[str] = SchemaField(
default=None,
description="Entity type (e.g., 'company', 'person')",
placeholder="company",
)
class WebsetCriterion(BaseModel):
description: str = SchemaField(
description="Description of the criterion",
placeholder="Must be based in the US",
)
success_rate: Optional[int] = SchemaField(
default=None,
description="Success rate percentage",
ge=0,
le=100,
)
class WebsetSearchConfig(BaseModel):
query: str = SchemaField(
description="Search query",
placeholder="Marketing agencies based in the US",
)
count: int = SchemaField(
default=10,
description="Number of results to return",
ge=1,
le=100,
)
entity: Optional[WebsetEntitySettings] = SchemaField(
default=None,
description="Entity settings for the search",
)
criteria: Optional[list[WebsetCriterion]] = SchemaField(
default=None,
description="Search criteria",
)
behavior: Optional[str] = SchemaField(
default="override",
description="Behavior when updating results ('override' or 'append')",
placeholder="override",
)
class EnrichmentOption(BaseModel):
label: str = SchemaField(
description="Label for the enrichment option",
placeholder="Option 1",
)
class WebsetEnrichmentConfig(BaseModel):
title: str = SchemaField(
description="Title of the enrichment",
placeholder="Company Details",
)
description: str = SchemaField(
description="Description of what this enrichment does",
placeholder="Extract company information",
)
format: str = SchemaField(
default="text",
description="Format of the enrichment result",
placeholder="text",
)
instructions: Optional[str] = SchemaField(
default=None,
description="Instructions for the enrichment",
placeholder="Extract key company metrics",
)
options: Optional[list[EnrichmentOption]] = SchemaField(
default=None,
description="Options for the enrichment",
description="Summary settings",
)

Some files were not shown because too many files have changed in this diff Show More