Compare commits

..

40 Commits

Author SHA1 Message Date
Aarushi
c51b8e7291 attempt to fix 2025-01-08 13:54:28 +00:00
Aarushi
f9ae76123a fix lock 2025-01-08 13:34:12 +00:00
Aarushi
281ae65dcb Merge branch 'dev' into twitter-integration 2025-01-08 13:29:54 +00:00
Aarushi
bd815fc9d7 udpate poetry lock 2025-01-08 13:24:39 +00:00
Nicholas Tindle
cdaa2ee456 Merge branch 'dev' into feature/twitter-integration 2025-01-08 00:16:00 -06:00
Nicholas Tindle
70dfaf1ef5 fix: lock 2025-01-07 22:59:46 -06:00
Nicholas Tindle
565774f77a Discard changes to autogpt_platform/frontend/yarn.lock 2025-01-07 22:45:01 -06:00
Nicholas Tindle
4c6dd35310 Merge remote-tracking branch 'origin/dev' into pr/8754 2025-01-07 12:49:31 -06:00
abhi1992002
632a39e877 upadate twitter documentation 2025-01-03 13:02:25 +05:30
abhi1992002
e297eff27c remove multi select 2025-01-03 12:50:24 +05:30
abhi1992002
57aa6745da update twitter env variable comment 2025-01-03 12:48:12 +05:30
abhi1992002
fb9d42f466 fix formatting 2025-01-03 12:35:03 +05:30
abhi1992002
eb25e731fc revert img change 2025-01-03 12:22:16 +05:30
abhi1992002
d75c08e348 adding documentation for twitter block 2025-01-03 11:45:53 +05:30
abhi1992002
428c012a43 write documentation for oneOf 2025-01-02 16:50:19 +05:30
abhi1992002
4fe135e472 add support for oneOf and optional oneOf 2025-01-02 16:24:00 +05:30
abhi1992002
1be3a29dc0 fix yarn lock 2025-01-02 11:01:35 +05:30
abhi1992002
e8ae8ccd6d fix backend tests 2025-01-02 11:01:35 +05:30
abhi1992002
4aa36fda55 fix expansions in tweet block 2025-01-02 11:01:35 +05:30
abhi1992002
2e19f3e9e2 fix tweet blocks expansions 2025-01-02 11:01:35 +05:30
abhi1992002
a85f671237 1. add optional datetime and multi select support 2025-01-02 11:01:27 +05:30
Nicholas Tindle
6c3a401ceb fix: linting came back with a vengence 2025-01-02 11:01:08 +05:30
Nicholas Tindle
949139ed7a fix: credentials issues 2025-01-02 11:01:08 +05:30
Nicholas Tindle
5bdf541bce fix: only add tweepy (and down bumb related :() 2025-01-02 11:01:08 +05:30
Nicholas Tindle
a1a3f9e179 fix: project problems 2025-01-02 11:01:00 +05:30
Nicholas Tindle
d14045c7b7 Discard changes to autogpt_platform/frontend/yarn.lock 2025-01-02 11:00:30 +05:30
Nicholas Tindle
84c30be37d fix: formatting 2025-01-02 11:00:30 +05:30
abhi
bc0aab9c73 fix:oauth2 2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
19b6dfd0f7 Update credentials-input.tsx 2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
e85f593dab Update credentials-provider.tsx 2025-01-02 11:00:30 +05:30
abhi
5200250ffb fix multiselect 2025-01-02 11:00:30 +05:30
abhi
8eba862723 fix multiselect 2025-01-02 11:00:30 +05:30
abhi
240e030a36 initial changes 2025-01-02 11:00:30 +05:30
abhi1992002
8782caf39a refactor: Enhance Supabase integration and Twitter OAuth handling
- Updated `store.py` to improve state token management by adding PKCE support and simplifying code challenge generation.
- Modified environment variable names in `.env.example` for consistency.
- Removed unnecessary `is_multi_select` attributes from various Twitter-related input schemas to streamline the code.
- Cleaned up exception handling in Twitter list management and tweet management blocks by removing redundant error logging.
- Removed debug print statements from various components to clean up the codebase.
- Fixed a minor error message in the Twitter OAuth handler for clarity.
2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
779cec003c Update pyproject.toml 2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
5a48f6cec4 Update credentials-input.tsx 2025-01-02 11:00:30 +05:30
Abhimanyu Yadav
e7056e5642 Update credentials-provider.tsx 2025-01-02 11:00:30 +05:30
abhi1992002
ee0a75027a fix: test 2025-01-02 11:00:30 +05:30
abhi1992002
1fc5a7beae fix linting 2025-01-02 11:00:30 +05:30
abhi1992002
c4f77d4074 add twitter credentials with some frontend changes
# Conflicts:
#	autogpt_platform/backend/backend/data/model.py
#	autogpt_platform/backend/pyproject.toml
#	autogpt_platform/frontend/src/components/integrations/credentials-input.tsx
2025-01-02 11:00:30 +05:30
855 changed files with 31160 additions and 77395 deletions

View File

@@ -1,18 +0,0 @@
version = 1
test_patterns = ["**/*.spec.ts","**/*_test.py","**/*_tests.py","**/test_*.py"]
exclude_patterns = ["classic/**"]
[[analyzers]]
name = "javascript"
[analyzers.meta]
plugins = ["react"]
environment = ["nodejs"]
[[analyzers]]
name = "python"
[analyzers.meta]
runtime_version = "3.x.x"

View File

@@ -27,7 +27,7 @@
!autogpt_platform/frontend/src/
!autogpt_platform/frontend/public/
!autogpt_platform/frontend/package.json
!autogpt_platform/frontend/pnpm-lock.yaml
!autogpt_platform/frontend/yarn.lock
!autogpt_platform/frontend/tsconfig.json
!autogpt_platform/frontend/README.md
## config

View File

@@ -10,19 +10,17 @@ updates:
commit-message:
prefix: "chore(libs/deps)"
prefix-development: "chore(libs/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# backend (Poetry project)
- package-ecosystem: "pip"
@@ -34,19 +32,17 @@ updates:
commit-message:
prefix: "chore(backend/deps)"
prefix-development: "chore(backend/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# frontend (Next.js project)
- package-ecosystem: "npm"
@@ -62,13 +58,13 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# infra (Terraform)
- package-ecosystem: "terraform"
@@ -85,13 +81,14 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# GitHub Actions
- package-ecosystem: "github-actions"
@@ -104,13 +101,14 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# Docker
- package-ecosystem: "docker"
@@ -123,16 +121,40 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# Submodules
- package-ecosystem: "gitsubmodule"
directory: "autogpt_platform/supabase"
schedule:
interval: "weekly"
open-pull-requests-limit: 1
target-branch: "dev"
commit-message:
prefix: "chore(platform/deps)"
prefix-development: "chore(platform/deps-dev)"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
# Docs
- package-ecosystem: "pip"
- package-ecosystem: 'pip'
directory: "docs/"
schedule:
interval: "weekly"
@@ -144,10 +166,10 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"

5
.github/labeler.yml vendored
View File

@@ -24,9 +24,8 @@ platform/frontend:
platform/backend:
- changed-files:
- all-globs-to-any-file:
- autogpt_platform/backend/**
- '!autogpt_platform/backend/backend/blocks/**'
- any-glob-to-any-file: autogpt_platform/backend/**
- all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**'
platform/blocks:
- changed-files:

View File

@@ -115,7 +115,6 @@ jobs:
poetry run pytest -vv \
--cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
tests/unit tests/integration
env:
CI: true
@@ -125,14 +124,8 @@ jobs:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: autogpt-agent,${{ runner.os }}

View File

@@ -87,20 +87,13 @@ jobs:
poetry run pytest -vv \
--cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
tests
env:
CI: true
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: agbenchmark,${{ runner.os }}

View File

@@ -139,7 +139,6 @@ jobs:
poetry run pytest -vv \
--cov=forge --cov-branch --cov-report term-missing --cov-report xml \
--durations=10 \
--junitxml=junit.xml -o junit_family=legacy \
forge
env:
CI: true
@@ -149,14 +148,8 @@ jobs:
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
- name: Upload test results to Codecov
if: ${{ !cancelled() }} # Run even if tests fail
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
flags: forge,${{ runner.os }}

View File

@@ -1,47 +0,0 @@
name: Claude Code
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
issues:
types: [opened, assigned]
pull_request_review:
types: [submitted]
jobs:
claude:
if: |
(
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
) && (
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR' ||
github.event.review.author_association == 'OWNER' ||
github.event.review.author_association == 'MEMBER' ||
github.event.review.author_association == 'COLLABORATOR' ||
github.event.issue.author_association == 'OWNER' ||
github.event.issue.author_association == 'MEMBER' ||
github.event.issue.author_association == 'COLLABORATOR'
)
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
issues: read
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@beta
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}

View File

@@ -34,7 +34,6 @@ jobs:
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:

View File

@@ -36,7 +36,6 @@ jobs:
python -m prisma migrate deploy
env:
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
DIRECT_URL: ${{ secrets.BACKEND_DATABASE_URL }}
trigger:
needs: migrate

View File

@@ -32,7 +32,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.11"]
python-version: ["3.10"]
runs-on: ubuntu-latest
services:
@@ -42,14 +42,6 @@ jobs:
REDIS_PASSWORD: testpassword
ports:
- 6379:6379
rabbitmq:
image: rabbitmq:3.12-management
ports:
- 5672:5672
- 15672:15672
env:
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
steps:
- name: Checkout repository
@@ -66,7 +58,7 @@ jobs:
- name: Setup Supabase
uses: supabase/setup-cli@v1
with:
version: 1.178.1
version: latest
- id: get_date
name: Get date
@@ -80,35 +72,18 @@ jobs:
- name: Install Poetry (Unix)
run: |
# Extract Poetry version from backend/poetry.lock
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
if [ -n "$BASE_REF" ]; then
BASE_BRANCH=${BASE_REF/refs\/heads\//}
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
else
POETRY_VERSION=$HEAD_POETRY_VERSION
fi
echo "Using Poetry version ${POETRY_VERSION}"
# Install Poetry
curl -sSL https://install.python-poetry.org | POETRY_VERSION=$POETRY_VERSION python3 -
curl -sSL https://install.python-poetry.org | python3 -
if [ "${{ runner.os }}" = "macOS" ]; then
PATH="$HOME/.local/bin:$PATH"
echo "$HOME/.local/bin" >> $GITHUB_PATH
fi
env:
BASE_REF: ${{ github.base_ref || github.event.merge_group.base_ref }}
- name: Check poetry.lock
run: |
poetry lock
if ! git diff --quiet --ignore-matching-lines="^# " poetry.lock; then
if ! git diff --quiet poetry.lock; then
echo "Error: poetry.lock not up to date."
echo
git diff poetry.lock
@@ -135,7 +110,6 @@ jobs:
run: poetry run prisma migrate dev --name updates
env:
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
- id: lint
name: Run Linter
@@ -152,13 +126,12 @@ jobs:
env:
LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
DIRECT_URL: ${{ steps.supabase.outputs.DB_URL }}
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
REDIS_HOST: "localhost"
REDIS_PORT: "6379"
REDIS_PASSWORD: "testpassword"
REDIS_HOST: 'localhost'
REDIS_PORT: '6379'
REDIS_PASSWORD: 'testpassword'
env:
CI: true
@@ -166,13 +139,6 @@ jobs:
RUN_ENV: local
PORT: 8080
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
# We know these are here, don't report this as a security vulnerability
# This is used as the default credential for the entire system's RabbitMQ instance
# If you want to replace this, you can do so by making our entire system generate
# new credentials for each local user and update the environment variables in
# the backend service, docker composes, and examples
RABBITMQ_DEFAULT_USER: "rabbitmq_user_default"
RABBITMQ_DEFAULT_PASS: "k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7"
# - name: Upload coverage reports to Codecov
# uses: codecov/codecov-action@v4

View File

@@ -1,198 +0,0 @@
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
on:
pull_request:
types: [closed]
issue_comment:
types: [created]
permissions:
issues: write
pull-requests: write
jobs:
dispatch:
runs-on: ubuntu-latest
steps:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
uses: actions/github-script@v7
with:
script: |
const commentBody = context.payload.comment.body.trim();
const commentUser = context.payload.comment.user.login;
const prAuthor = context.payload.issue.user.login;
const authorAssociation = context.payload.comment.author_association;
// Check permissions
const hasPermission = (
authorAssociation === 'OWNER' ||
authorAssociation === 'MEMBER' ||
authorAssociation === 'COLLABORATOR'
);
core.setOutput('comment_body', commentBody);
core.setOutput('has_permission', hasPermission);
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
core.setOutput('permission_denied', 'true');
return;
}
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
return;
}
// Process deploy command
if (commentBody === '!deploy') {
core.setOutput('should_deploy', 'true');
}
// Process undeploy command
else if (commentBody === '!undeploy') {
core.setOutput('should_undeploy', 'true');
}
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
});
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number
});
core.setOutput('pr_number', pr.data.number);
core.setOutput('pr_title', pr.data.title);
core.setOutput('pr_state', pr.data.state);
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "deploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
});
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
});
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
uses: actions/github-script@v7
with:
script: |
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
let lastDeployIndex = -1;
let lastUndeployIndex = -1;
comments.data.forEach((comment, index) => {
if (comment.body.trim() === '!deploy') {
lastDeployIndex = index;
} else if (comment.body.trim() === '!undeploy') {
lastUndeployIndex = index;
}
});
// Should undeploy if there's a !deploy without a subsequent !undeploy
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
core.setOutput('should_undeploy', shouldUndeploy);
- name: Dispatch Undeploy Event (PR closed with active deployment)
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}
- name: Post PR close undeploy comment
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
});

View File

@@ -29,34 +29,13 @@ jobs:
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile
run: |
yarn install --frozen-lockfile
- name: Run lint
run: pnpm lint
type-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Run tsc check
run: pnpm type-check
run: |
yarn lint
test:
runs-on: ubuntu-latest
@@ -76,18 +55,15 @@ jobs:
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
large-packages: false # slow
docker-images: false # limited benefit
large-packages: false # slow
docker-images: false # limited benefit
- name: Copy default supabase .env
run: |
cp ../.env.example ../.env
cp ../supabase/docker/.env.example ../.env
- name: Copy backend .env
run: |
@@ -98,24 +74,24 @@ jobs:
docker compose -f ../docker-compose.yml up -d
- name: Install dependencies
run: pnpm install --frozen-lockfile
run: |
yarn install --frozen-lockfile
- name: Setup .env
run: cp .env.example .env
- name: Build frontend
run: pnpm build --turbo
# uses Turbopack, much faster and safe enough for a test pipeline
- name: Setup Builder .env
run: |
cp .env.example .env
- name: Install Browser '${{ matrix.browser }}'
run: pnpm playwright install --with-deps ${{ matrix.browser }}
run: yarn playwright install --with-deps ${{ matrix.browser }}
- name: Run Playwright tests
run: pnpm test:no-build --project=${{ matrix.browser }}
- name: Run tests
run: |
yarn test --project=${{ matrix.browser }}
- name: Print Final Docker Compose logs
if: always()
run: docker compose -f ../docker-compose.yml logs
- name: Print Docker Compose logs in debug mode
if: runner.debug
run: |
docker compose -f ../docker-compose.yml logs
- uses: actions/upload-artifact@v4
if: ${{ !cancelled() }}

View File

@@ -16,7 +16,7 @@ jobs:
# operations-per-run: 5000
stale-issue-message: >
This issue has automatically been marked as _stale_ because it has not had
any activity in the last 170 days. You can _unstale_ it by commenting or
any activity in the last 50 days. You can _unstale_ it by commenting or
removing the label. Otherwise, this issue will be closed in 10 days.
stale-pr-message: >
This pull request has automatically been marked as _stale_ because it has
@@ -25,7 +25,7 @@ jobs:
close-issue-message: >
This issue was closed automatically because it has been stale for 10 days
with no activity.
days-before-stale: 170
days-before-stale: 50
days-before-close: 10
# Do not touch meta issues:
exempt-issue-labels: meta,fridge,project management

View File

@@ -1,60 +0,0 @@
#!/usr/bin/env python3
import sys
if sys.version_info < (3, 11):
print("Python version 3.11 or higher required")
sys.exit(1)
import tomllib
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
"""Extract package version from poetry.lock file."""
try:
if lockfile_path == "-":
data = tomllib.load(sys.stdin.buffer)
else:
with open(lockfile_path, "rb") as f:
data = tomllib.load(f)
except FileNotFoundError:
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
sys.exit(1)
except tomllib.TOMLDecodeError as e:
print(f"Error parsing TOML file: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(1)
# Look for the package in the packages list
packages = data.get("package", [])
for package in packages:
if package.get("name", "").lower() == package_name.lower():
return package.get("version")
return None
def main():
if len(sys.argv) not in (2, 3):
print(
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
file=sys.stderr,
)
sys.exit(1)
package_name = sys.argv[1]
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
version = get_package_version(package_name, lockfile_path)
if version:
print(version)
else:
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

1
.gitignore vendored
View File

@@ -176,4 +176,3 @@ autogpt_platform/backend/settings.py
*.ign.*
.test-contents
.claude/settings.local.json

3
.gitmodules vendored
View File

@@ -1,3 +1,6 @@
[submodule "classic/forge/tests/vcr_cassettes"]
path = classic/forge/tests/vcr_cassettes
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
[submodule "autogpt_platform/supabase"]
path = autogpt_platform/supabase
url = https://github.com/supabase/supabase.git

View File

@@ -17,7 +17,7 @@ repos:
name: Detect secrets
description: Detects high entropy strings that are likely to be passwords.
files: ^autogpt_platform/
stages: [pre-push]
stages: [push]
- repo: local
# For proper type checking, all dependencies need to be up-to-date.
@@ -140,7 +140,7 @@ repos:
language: system
- repo: https://github.com/psf/black
rev: 24.10.0
rev: 23.12.1
# Black has sensible defaults, doesn't need package context, and ignores
# everything in .gitignore, so it works fine without any config or arguments.
hooks:
@@ -170,16 +170,6 @@ repos:
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
args: [--config=classic/benchmark/.flake8]
- repo: local
hooks:
- id: prettier
name: Format (Prettier) - AutoGPT Platform - Frontend
alias: format-platform-frontend
entry: bash -c 'cd autogpt_platform/frontend && npx prettier --write $(echo "$@" | sed "s|autogpt_platform/frontend/||g")' --
files: ^autogpt_platform/frontend/
types: [file]
language: system
- repo: local
# To have watertight type checking, we check *all* the files in an affected
# project. To trigger on poetry.lock we also reset the file `types` filter.
@@ -233,46 +223,36 @@ repos:
- repo: local
hooks:
- id: tsc
name: Typecheck - AutoGPT Platform - Frontend
entry: bash -c 'cd autogpt_platform/frontend && pnpm type-check'
files: ^autogpt_platform/frontend/
types: [file]
- id: pytest
name: Run tests - AutoGPT Platform - Backend
alias: pytest-platform-backend
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
# - repo: local
# hooks:
# - id: pytest
# name: Run tests - AutoGPT Platform - Backend
# alias: pytest-platform-backend
# entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
# # include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
# files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
- id: pytest
name: Run tests - Classic - AutoGPT (excl. slow tests)
alias: pytest-classic-autogpt
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
# - id: pytest
# name: Run tests - Classic - AutoGPT (excl. slow tests)
# alias: pytest-classic-autogpt
# entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# # include forge source (since it's a path dependency) but exclude *_test.py files:
# files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
- id: pytest
name: Run tests - Classic - Forge (excl. slow tests)
alias: pytest-classic-forge
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
language: system
pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Forge (excl. slow tests)
# alias: pytest-classic-forge
# entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
# files: ^classic/forge/(forge/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Benchmark
# alias: pytest-classic-benchmark
# entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
# files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false
- id: pytest
name: Run tests - Classic - Benchmark
alias: pytest-classic-benchmark
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

6
.vscode/launch.json vendored
View File

@@ -32,9 +32,9 @@
"type": "debugpy",
"request": "launch",
"module": "backend.app",
"env": {
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY": "YES"
},
// "env": {
// "ENV": "dev"
// },
"envFile": "${workspaceFolder}/backend/.env",
"justMyCode": false,
"cwd": "${workspaceFolder}/autogpt_platform/backend"

View File

@@ -1,53 +0,0 @@
# AutoGPT Platform Contribution Guide
This guide provides context for Codex when updating the **autogpt_platform** folder.
## Directory overview
- `autogpt_platform/backend` FastAPI based backend service.
- `autogpt_platform/autogpt_libs` Shared Python libraries.
- `autogpt_platform/frontend` Next.js + Typescript frontend.
- `autogpt_platform/docker-compose.yml` development stack.
See `docs/content/platform/getting-started.md` for setup instructions.
## Code style
- Format Python code with `poetry run format`.
- Format frontend code using `pnpm format`.
## Testing
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
Always run the relevant linters and tests before committing.
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
Types:
- feat
- fix
- refactor
- ci
- dx (developer experience)
Scopes:
- platform
- platform/library
- platform/marketplace
- backend
- backend/executor
- frontend
- frontend/library
- frontend/marketplace
- blocks
## Pull requests
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
- Rely on the pre-commit checks for linting and formatting
- Fill out the **Changes** section and the checklist.
- Use conventional commit titles with a scope (e.g. `feat(frontend): add feature`).
- Keep out-of-scope changes under 20% of the PR.
- Ensure PR descriptions are complete.
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs

View File

@@ -2,6 +2,9 @@
If you are reading this, you are probably looking for the full **[contribution guide]**,
which is part of our [wiki].
Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
<!-- You can find our immediate priorities and their progress on our public [kanban board]. -->
[contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
[wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
[roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971

View File

@@ -15,38 +15,7 @@
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
### System Requirements
Before proceeding with the installation, ensure your system meets the following requirements:
#### Hardware Requirements
- CPU: 4+ cores recommended
- RAM: Minimum 8GB, 16GB recommended
- Storage: At least 10GB of free space
#### Software Requirements
- Operating Systems:
- Linux (Ubuntu 20.04 or newer recommended)
- macOS (10.15 or newer)
- Windows 10/11 with WSL2
- Required Software (with minimum versions):
- Docker Engine (20.10.0 or newer)
- Docker Compose (2.0.0 or newer)
- Git (2.30 or newer)
- Node.js (16.x or newer)
- npm (8.x or newer)
- VSCode (1.60 or newer) or any modern code editor
#### Network Requirements
- Stable internet connection
- Access to required ports (will be configured in Docker)
- Ability to make outbound HTTPS connections
### Updated Setup Instructions:
We've moved to a fully maintained and regularly updated documentation site.
👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/)
https://github.com/user-attachments/assets/d04273a5-b36a-4a37-818e-f631ce72d603
This tutorial assumes you have Docker, VSCode, git and npm installed.
@@ -179,7 +148,7 @@ Just clone the repo, install dependencies with `./run setup`, and you should be
[![Join us on Discord](https://invidget.switchblade.xyz/autogpt)](https://discord.gg/autogpt)
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn't created an issue for the same topic.
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasnt created an issue for the same topic.
## 🤝 Sister projects

View File

@@ -20,7 +20,6 @@ Instead, please report them via:
- Please provide detailed reports with reproducible steps
- Include the version/commit hash where you discovered the vulnerability
- Allow us a 90-day security fix window before any public disclosure
- After patch is released, allow 30 days for users to update before public disclosure (for a total of 120 days max between update time and fix time)
- Share any potential mitigations or workarounds if known
## Supported Versions

View File

@@ -1,123 +0,0 @@
############
# Secrets
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
############
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-encryption-key-32-chars-min
############
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
############
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=5432
# default user is postgres
############
# Supavisor -- Database pooler
############
POOLER_PROXY_PORT_TRANSACTION=6543
POOLER_DEFAULT_POOL_SIZE=20
POOLER_MAX_CLIENT_CONN=100
POOLER_TENANT_ID=your-tenant-id
############
# API Proxy - Configuration for the Kong Reverse proxy.
############
KONG_HTTP_PORT=8000
KONG_HTTPS_PORT=8443
############
# API - Configuration for PostgREST.
############
PGRST_DB_SCHEMAS=public,storage,graphql_public
############
# Auth - Configuration for the GoTrue authentication server.
############
## General
SITE_URL=http://localhost:3000
ADDITIONAL_REDIRECT_URLS=
JWT_EXPIRY=3600
DISABLE_SIGNUP=false
API_EXTERNAL_URL=http://localhost:8000
## Mailer Config
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
## Email auth
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=false
SMTP_ADMIN_EMAIL=admin@example.com
SMTP_HOST=supabase-mail
SMTP_PORT=2500
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
ENABLE_ANONYMOUS_USERS=false
## Phone auth
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
############
# Studio - Configuration for the Dashboard
############
STUDIO_DEFAULT_ORGANIZATION=Default Organization
STUDIO_DEFAULT_PROJECT=Default Project
STUDIO_PORT=3000
# replace if you intend to use Studio outside of localhost
SUPABASE_PUBLIC_URL=http://localhost:8000
# Enable webp support
IMGPROXY_ENABLE_WEBP_DETECTION=true
# Add your OpenAI API key to enable SQL Editor Assistant
OPENAI_API_KEY=
############
# Functions - Configuration for Functions
############
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
FUNCTIONS_VERIFY_JWT=false
############
# Logs - Configuration for Logflare
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
############
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
# Change vector.toml sinks to reflect this change
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
# Docker socket location - this value will differ depending on your OS
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
# Google Cloud Project details
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER

View File

@@ -1,132 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Repository Overview
AutoGPT Platform is a monorepo containing:
- **Backend** (`/backend`): Python FastAPI server with async support
- **Frontend** (`/frontend`): Next.js React application
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
## Essential Commands
### Backend Development
```bash
# Install dependencies
cd backend && poetry install
# Run database migrations
poetry run prisma migrate dev
# Start all services (database, redis, rabbitmq)
docker compose up -d
# Run the backend server
poetry run serve
# Run tests
poetry run test
# Run specific test
poetry run pytest path/to/test_file.py::test_function_name
# Lint and format
poetry run format # Black + isort
poetry run lint # ruff
```
More details can be found in TESTING.md
#### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Frontend Development
```bash
# Install dependencies
cd frontend && npm install
# Start development server
npm run dev
# Run E2E tests
npm run test
# Run Storybook for component development
npm run storybook
# Build production
npm run build
# Type checking
npm run type-check
```
## Architecture Overview
### Backend Architecture
- **API Layer**: FastAPI with REST and WebSocket endpoints
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
- **Queue System**: RabbitMQ for async task processing
- **Execution Engine**: Separate executor service processes agent workflows
- **Authentication**: JWT-based with Supabase integration
### Frontend Architecture
- **Framework**: Next.js App Router with React Server Components
- **State Management**: React hooks + Supabase client for real-time updates
- **Workflow Builder**: Visual graph editor using @xyflow/react
- **UI Components**: Radix UI primitives with Tailwind CSS styling
- **Feature Flags**: LaunchDarkly integration
### Key Concepts
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
3. **Integrations**: OAuth and API connections stored per user
4. **Store**: Marketplace for sharing agent templates
### Testing Approach
- Backend uses pytest with snapshot testing for API responses
- Test files are colocated with source files (`*_test.py`)
- Frontend uses Playwright for E2E tests
- Component testing via Storybook
### Database Schema
Key models (defined in `/backend/schema.prisma`):
- `User`: Authentication and profile data
- `AgentGraph`: Workflow definitions with version control
- `AgentGraphExecution`: Execution history and results
- `AgentNode`: Individual nodes in a workflow
- `StoreListing`: Marketplace listings for sharing agents
### Environment Configuration
- Backend: `.env` file in `/backend`
- Frontend: `.env.local` file in `/frontend`
- Both require Supabase credentials and API keys for various services
### Common Development Tasks
**Adding a new block:**
1. Create new file in `/backend/backend/blocks/`
2. Inherit from `Block` base class
3. Define input/output schemas
4. Implement `run` method
5. Register in block registry
**Modifying the API:**
1. Update route in `/backend/backend/server/routers/`
2. Add/update Pydantic models in same directory
3. Write tests alongside the route file
4. Run `poetry run test` to verify
**Frontend feature development:**
1. Components go in `/frontend/src/components/`
2. Use existing UI components from `/frontend/src/components/ui/`
3. Add Storybook stories for new components
4. Test with Playwright if user-facing

View File

@@ -15,60 +15,53 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
To run the AutoGPT Platform, follow these steps:
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
```
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
cd AutoGPT/autogpt_platform
```
2. Run the following command:
```
cp .env.example .env
git submodule update --init --recursive
```
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
3. Run the following command:
```
cp supabase/docker/.env.example .env
```
This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables.
4. Run the following command:
```
docker compose up -d
```
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
4. Navigate to `frontend` within the `autogpt_platform` directory:
5. Navigate to `frontend` within the `autogpt_platform` directory:
```
cd frontend
```
You will need to run your frontend application separately on your local machine.
5. Run the following command:
6. Run the following command:
```
cp .env.example .env.local
```
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
6. Run the following command:
Enable corepack and install dependencies by running:
7. Run the following command:
```
corepack enable
pnpm i
npm install
npm run dev
```
This command will install the necessary dependencies and start the frontend application in development mode.
If you are using Yarn, you can run the following commands instead:
```
yarn install && yarn dev
```
Then start the frontend application in development mode:
```
pnpm dev
```
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
8. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
### Docker Compose Commands
@@ -81,52 +74,43 @@ Here are some useful Docker Compose commands for managing your AutoGPT Platform:
- `docker compose down`: Stop and remove containers, networks, and volumes.
- `docker compose watch`: Watch for changes in your services and automatically update them.
### Sample Scenarios
Here are some common scenarios where you might use multiple Docker Compose commands:
1. Updating and restarting a specific service:
```
docker compose build api_srv
docker compose up -d --no-deps api_srv
```
This rebuilds the `api_srv` service and restarts it without affecting other services.
2. Viewing logs for troubleshooting:
```
docker compose logs -f api_srv ws_srv
```
This shows and follows the logs for both `api_srv` and `ws_srv` services.
3. Scaling a service for increased load:
```
docker compose up -d --scale executor=3
```
This scales the `executor` service to 3 instances to handle increased load.
4. Stopping the entire system for maintenance:
```
docker compose stop
docker compose rm -f
docker compose pull
docker compose up -d
```
This stops all services, removes containers, pulls the latest images, and restarts the system.
5. Developing with live updates:
```
docker compose watch
```
This watches for changes in your code and automatically updates the relevant services.
6. Checking the status of services:
@@ -137,6 +121,7 @@ Here are some common scenarios where you might use multiple Docker Compose comma
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
### Persisting Data
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:

View File

@@ -1,3 +1,3 @@
# AutoGPT Libs
This is a new project to store shared functionality across different services in the AutoGPT Platform (e.g. authentication)
This is a new project to store shared functionality across different services in NextGen AutoGPT (e.g. authentication)

View File

@@ -31,5 +31,4 @@ class APIKeyManager:
"""Verify if a provided API key matches the stored hash."""
if not provided_key.startswith(self.PREFIX):
return False
provided_hash = hashlib.sha256(provided_key.encode()).hexdigest()
return secrets.compare_digest(provided_hash, stored_hash)
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash

View File

@@ -1,13 +1,14 @@
from .config import Settings
from .depends import requires_admin_user, requires_user
from .jwt_utils import parse_jwt_token
from .middleware import APIKeyValidator, auth_middleware
from .middleware import auth_middleware
from .models import User
__all__ = [
"Settings",
"parse_jwt_token",
"requires_user",
"requires_admin_user",
"APIKeyValidator",
"auth_middleware",
"User",
]

View File

@@ -1,11 +1,14 @@
import os
from dotenv import load_dotenv
load_dotenv()
class Settings:
def __init__(self):
self.JWT_SECRET_KEY: str = os.getenv("SUPABASE_JWT_SECRET", "")
self.ENABLE_AUTH: bool = os.getenv("ENABLE_AUTH", "false").lower() == "true"
self.JWT_ALGORITHM: str = "HS256"
JWT_SECRET_KEY: str = os.getenv("SUPABASE_JWT_SECRET", "")
ENABLE_AUTH: bool = os.getenv("ENABLE_AUTH", "false").lower() == "true"
JWT_ALGORITHM: str = "HS256"
@property
def is_configured(self) -> bool:

View File

@@ -1,6 +1,6 @@
import fastapi
from .config import settings
from .config import Settings
from .middleware import auth_middleware
from .models import DEFAULT_USER_ID, User
@@ -17,7 +17,7 @@ def requires_admin_user(
def verify_user(payload: dict | None, admin_only: bool) -> User:
if not payload:
if settings.ENABLE_AUTH:
if Settings.ENABLE_AUTH:
raise fastapi.HTTPException(
status_code=401, detail="Authorization header is missing"
)

View File

@@ -1,11 +1,7 @@
import inspect
import logging
import secrets
from typing import Any, Callable, Optional
from fastapi import HTTPException, Request, Security
from fastapi.security import APIKeyHeader, HTTPBearer
from starlette.status import HTTP_401_UNAUTHORIZED
from fastapi import HTTPException, Request
from fastapi.security import HTTPBearer
from .config import settings
from .jwt_utils import parse_jwt_token
@@ -17,7 +13,7 @@ logger = logging.getLogger(__name__)
async def auth_middleware(request: Request):
if not settings.ENABLE_AUTH:
# If authentication is disabled, allow the request to proceed
logger.warning("Auth disabled")
logger.warn("Auth disabled")
return {}
security = HTTPBearer()
@@ -33,108 +29,3 @@ async def auth_middleware(request: Request):
except ValueError as e:
raise HTTPException(status_code=401, detail=str(e))
return payload
class APIKeyValidator:
"""
Configurable API key validator that supports custom validation functions
for FastAPI applications.
This class provides a flexible way to implement API key authentication with optional
custom validation logic. It can be used for simple token matching
or more complex validation scenarios like database lookups.
Examples:
Simple token validation:
```python
validator = APIKeyValidator(
header_name="X-API-Key",
expected_token="your-secret-token"
)
@app.get("/protected", dependencies=[Depends(validator.get_dependency())])
def protected_endpoint():
return {"message": "Access granted"}
```
Custom validation with database lookup:
```python
async def validate_with_db(api_key: str):
api_key_obj = await db.get_api_key(api_key)
return api_key_obj if api_key_obj and api_key_obj.is_active else None
validator = APIKeyValidator(
header_name="X-API-Key",
validate_fn=validate_with_db
)
```
Args:
header_name (str): The name of the header containing the API key
expected_token (Optional[str]): The expected API key value for simple token matching
validate_fn (Optional[Callable]): Custom validation function that takes an API key
string and returns a boolean or object. Can be async.
error_status (int): HTTP status code to use for validation errors
error_message (str): Error message to return when validation fails
"""
def __init__(
self,
header_name: str,
expected_token: Optional[str] = None,
validate_fn: Optional[Callable[[str], bool]] = None,
error_status: int = HTTP_401_UNAUTHORIZED,
error_message: str = "Invalid API key",
):
# Create the APIKeyHeader as a class property
self.security_scheme = APIKeyHeader(name=header_name)
self.expected_token = expected_token
self.custom_validate_fn = validate_fn
self.error_status = error_status
self.error_message = error_message
async def default_validator(self, api_key: str) -> bool:
if not self.expected_token:
raise ValueError(
"Expected Token Required to be set when uisng API Key Validator default validation"
)
return secrets.compare_digest(api_key, self.expected_token)
async def __call__(
self, request: Request, api_key: str = Security(APIKeyHeader)
) -> Any:
if api_key is None:
raise HTTPException(status_code=self.error_status, detail="Missing API key")
# Use custom validation if provided, otherwise use default equality check
validator = self.custom_validate_fn or self.default_validator
result = (
await validator(api_key)
if inspect.iscoroutinefunction(validator)
else validator(api_key)
)
if not result:
raise HTTPException(
status_code=self.error_status, detail=self.error_message
)
# Store validation result in request state if it's not just a boolean
if result is not True:
request.state.api_key = result
return result
def get_dependency(self):
"""
Returns a callable dependency that FastAPI will recognize as a security scheme
"""
async def validate_api_key(
request: Request, api_key: str = Security(self.security_scheme)
) -> Any:
return await self(request, api_key)
# This helps FastAPI recognize it as a security dependency
validate_api_key.__name__ = f"validate_{self.security_scheme.model.name}"
return validate_api_key

View File

@@ -13,6 +13,7 @@ from typing_extensions import ParamSpec
from .config import SETTINGS
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
P = ParamSpec("P")
T = TypeVar("T")

View File

@@ -8,7 +8,7 @@ from pydantic import Field, field_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
from .filters import BelowLevelFilter
from .formatters import AGPTFormatter
from .formatters import AGPTFormatter, StructuredLoggingFormatter
LOG_DIR = Path(__file__).parent.parent.parent.parent / "logs"
LOG_FILE = "activity.log"
@@ -18,7 +18,7 @@ ERROR_LOG_FILE = "error.log"
SIMPLE_LOG_FORMAT = "%(asctime)s %(levelname)s %(title)s%(message)s"
DEBUG_LOG_FORMAT = (
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(title)s%(message)s"
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d" " %(title)s%(message)s"
)
@@ -81,26 +81,9 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
"""
config = LoggingConfig()
log_handlers: list[logging.Handler] = []
# Console output handlers
stdout = logging.StreamHandler(stream=sys.stdout)
stdout.setLevel(config.level)
stdout.addFilter(BelowLevelFilter(logging.WARNING))
if config.level == logging.DEBUG:
stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
else:
stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
stderr = logging.StreamHandler()
stderr.setLevel(logging.WARNING)
if config.level == logging.DEBUG:
stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
else:
stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
log_handlers += [stdout, stderr]
# Cloud logging setup
if config.enable_cloud_logging or force_cloud_logging:
import google.cloud.logging
@@ -114,7 +97,28 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
transport=SyncTransport,
)
cloud_handler.setLevel(config.level)
cloud_handler.setFormatter(StructuredLoggingFormatter())
log_handlers.append(cloud_handler)
print("Cloud logging enabled")
else:
# Console output handlers
stdout = logging.StreamHandler(stream=sys.stdout)
stdout.setLevel(config.level)
stdout.addFilter(BelowLevelFilter(logging.WARNING))
if config.level == logging.DEBUG:
stdout.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
else:
stdout.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
stderr = logging.StreamHandler()
stderr.setLevel(logging.WARNING)
if config.level == logging.DEBUG:
stderr.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT))
else:
stderr.setFormatter(AGPTFormatter(SIMPLE_LOG_FORMAT))
log_handlers += [stdout, stderr]
print("Console logging enabled")
# File logging setup
if config.enable_file_logging:
@@ -152,6 +156,7 @@ def configure_logging(force_cloud_logging: bool = False) -> None:
error_log_handler.setLevel(logging.ERROR)
error_log_handler.setFormatter(AGPTFormatter(DEBUG_LOG_FORMAT, no_color=True))
log_handlers.append(error_log_handler)
print("File logging enabled")
# Configure the root logger
logging.basicConfig(

View File

@@ -1,6 +1,7 @@
import logging
from colorama import Fore, Style
from google.cloud.logging_v2.handlers import CloudLoggingFilter, StructuredLogHandler
from .utils import remove_color_codes
@@ -79,3 +80,16 @@ class AGPTFormatter(FancyConsoleFormatter):
return remove_color_codes(super().format(record))
else:
return super().format(record)
class StructuredLoggingFormatter(StructuredLogHandler, logging.Formatter):
def __init__(self):
# Set up CloudLoggingFilter to add diagnostic info to the log records
self.cloud_logging_filter = CloudLoggingFilter()
# Init StructuredLogHandler
super().__init__()
def format(self, record: logging.LogRecord) -> str:
self.cloud_logging_filter.filter(record)
return super().format(record)

View File

@@ -2,7 +2,6 @@ import logging
import re
from typing import Any
import uvicorn.config
from colorama import Fore
@@ -26,14 +25,3 @@ def print_attribute(
"color": value_color,
},
)
def generate_uvicorn_config():
"""
Generates a uvicorn logging config that silences uvicorn's default logging and tells it to use the native logging module.
"""
log_config = dict(uvicorn.config.LOGGING_CONFIG)
log_config["loggers"]["uvicorn"] = {"handlers": []}
log_config["loggers"]["uvicorn.error"] = {"handlers": []}
log_config["loggers"]["uvicorn.access"] = {"handlers": []}
return log_config

View File

@@ -1,59 +1,20 @@
import inspect
import threading
from typing import Awaitable, Callable, ParamSpec, TypeVar, cast, overload
from typing import Callable, ParamSpec, TypeVar
P = ParamSpec("P")
R = TypeVar("R")
@overload
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ...
@overload
def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ...
def thread_cached(
func: Callable[P, R] | Callable[P, Awaitable[R]],
) -> Callable[P, R] | Callable[P, Awaitable[R]]:
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
thread_local = threading.local()
def _clear():
if hasattr(thread_local, "cache"):
del thread_local.cache
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
if inspect.iscoroutinefunction(func):
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = await cast(Callable[P, Awaitable[R]], func)(
*args, **kwargs
)
return cache[key]
setattr(async_wrapper, "clear_cache", _clear)
return async_wrapper
else:
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
setattr(sync_wrapper, "clear_cache", _clear)
return sync_wrapper
def clear_thread_cache(func: Callable) -> None:
if clear := getattr(func, "clear_cache", None):
clear()
return wrapper

View File

@@ -1,15 +1,15 @@
import asyncio
from contextlib import asynccontextmanager
from contextlib import contextmanager
from threading import Lock
from typing import TYPE_CHECKING, Any
from expiringdict import ExpiringDict
if TYPE_CHECKING:
from redis.asyncio import Redis as AsyncRedis
from redis.asyncio.lock import Lock as AsyncRedisLock
from redis import Redis
from redis.lock import Lock as RedisLock
class AsyncRedisKeyedMutex:
class RedisKeyedMutex:
"""
This class provides a mutex that can be locked and unlocked by a specific key,
using Redis as a distributed locking provider.
@@ -17,45 +17,41 @@ class AsyncRedisKeyedMutex:
in case the key is not unlocked for a specified duration, to prevent memory leaks.
"""
def __init__(self, redis: "AsyncRedis", timeout: int | None = 60):
def __init__(self, redis: "Redis", timeout: int | None = 60):
self.redis = redis
self.timeout = timeout
self.locks: dict[Any, "AsyncRedisLock"] = ExpiringDict(
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
max_len=6000, max_age_seconds=self.timeout
)
self.locks_lock = asyncio.Lock()
self.locks_lock = Lock()
@asynccontextmanager
async def locked(self, key: Any):
lock = await self.acquire(key)
@contextmanager
def locked(self, key: Any):
lock = self.acquire(key)
try:
yield
finally:
if (await lock.locked()) and (await lock.owned()):
await lock.release()
if lock.locked():
lock.release()
async def acquire(self, key: Any) -> "AsyncRedisLock":
def acquire(self, key: Any) -> "RedisLock":
"""Acquires and returns a lock with the given key"""
async with self.locks_lock:
with self.locks_lock:
if key not in self.locks:
self.locks[key] = self.redis.lock(
str(key), self.timeout, thread_local=False
)
lock = self.locks[key]
await lock.acquire()
lock.acquire()
return lock
async def release(self, key: Any):
if (
(lock := self.locks.get(key))
and (await lock.locked())
and (await lock.owned())
):
await lock.release()
def release(self, key: Any):
if (lock := self.locks.get(key)) and lock.locked() and lock.owned():
lock.release()
async def release_all_locks(self):
def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
async with self.locks_lock:
for lock in self.locks.values():
if (await lock.locked()) and (await lock.owned()):
await lock.release()
self.locks_lock.acquire(blocking=False)
for lock in self.locks.values():
if lock.locked() and lock.owned():
lock.release()

File diff suppressed because it is too large Load Diff

View File

@@ -7,23 +7,21 @@ readme = "README.md"
packages = [{ include = "autogpt_libs" }]
[tool.poetry.dependencies]
python = ">=3.10,<4.0"
colorama = "^0.4.6"
expiringdict = "^1.2.2"
google-cloud-logging = "^3.12.1"
pydantic = "^2.11.4"
pydantic-settings = "^2.9.1"
google-cloud-logging = "^3.11.3"
pydantic = "^2.10.3"
pydantic-settings = "^2.7.0"
pyjwt = "^2.10.1"
pytest-asyncio = "^0.26.0"
pytest-asyncio = "^0.25.0"
pytest-mock = "^3.14.0"
supabase = "^2.15.1"
launchdarkly-server-sdk = "^9.11.1"
fastapi = "^0.115.12"
uvicorn = "^0.34.3"
python = ">=3.10,<4.0"
python-dotenv = "^1.0.1"
supabase = "^2.10.0"
[tool.poetry.group.dev.dependencies]
redis = "^5.2.1"
ruff = "^0.11.10"
ruff = "^0.8.6"
[build-system]
requires = ["poetry-core"]

View File

@@ -2,32 +2,19 @@ DB_USER=postgres
DB_PASS=your-super-secret-and-long-postgres-password
DB_NAME=postgres
DB_PORT=5432
DB_HOST=localhost
DB_CONNECTION_LIMIT=12
DB_CONNECT_TIMEOUT=60
DB_POOL_TIMEOUT=300
DB_SCHEMA=platform
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
DIRECT_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}?connect_timeout=60&schema=platform"
PRISMA_SCHEMA="postgres/schema.prisma"
# EXECUTOR
NUM_GRAPH_WORKERS=10
BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
# generate using `from cryptography.fernet import Fernet;Fernet.generate_key().decode()`
ENCRYPTION_KEY='dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw='
UNSUBSCRIBE_SECRET_KEY = 'HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio='
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD=password
ENABLE_CREDIT=false
STRIPE_API_KEY=
STRIPE_WEBHOOK_SECRET=
# What environment things should be logged under: local dev or prod
APP_ENV=local
# What environment to behave as: "local" or "cloud"
@@ -35,26 +22,12 @@ BEHAVE_AS=local
PYRO_HOST=localhost
SENTRY_DSN=
# Email For Postmark so we can send emails
POSTMARK_SERVER_API_TOKEN=
POSTMARK_SENDER_EMAIL=invalid@invalid.com
POSTMARK_WEBHOOK_TOKEN=
## User auth with Supabase is required for any of the 3rd party integrations with auth to work.
ENABLE_AUTH=true
SUPABASE_URL=http://localhost:8000
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
# RabbitMQ credentials -- Used for communication between services
RABBITMQ_HOST=localhost
RABBITMQ_PORT=5672
RABBITMQ_DEFAULT_USER=rabbitmq_user_default
RABBITMQ_DEFAULT_PASS=k0VMxyIJF9S35f3x2uaw5IWAl6Y536O7
## GCS bucket is required for marketplace and library functionality
MEDIA_GCS_BUCKET_NAME=
## For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow
## for integrations to work. Defaults to the value of PLATFORM_BASE_URL if not set.
# FRONTEND_BASE_URL=http://localhost:3000
@@ -63,14 +36,7 @@ MEDIA_GCS_BUCKET_NAME=
## to use the platform's webhook-related functionality.
## If you are developing locally, you can use something like ngrok to get a publc URL
## and tunnel it to your locally running backend.
PLATFORM_BASE_URL=http://localhost:3000
## Cloudflare Turnstile (CAPTCHA) Configuration
## Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
## This is the backend secret key
TURNSTILE_SECRET_KEY=
## This is the verify URL
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
PLATFORM_BASE_URL=https://your-public-url-here
## == INTEGRATION CREDENTIALS == ##
# Each set of server side credentials is required for the corresponding 3rd party
@@ -106,38 +72,20 @@ GOOGLE_CLIENT_SECRET=
TWITTER_CLIENT_ID=
TWITTER_CLIENT_SECRET=
# Linear App
# Make a new workspace for your OAuth APP -- trust me
# https://linear.app/settings/api/applications/new
# Callback URL: http://localhost:3000/auth/integrations/oauth_callback
LINEAR_CLIENT_ID=
LINEAR_CLIENT_SECRET=
# To obtain Todoist API credentials:
# 1. Create a Todoist account at todoist.com
# 2. Visit the Developer Console: https://developer.todoist.com/appconsole.html
# 3. Click "Create new app"
# 4. Once created, copy your Client ID and Client Secret below
TODOIST_CLIENT_ID=
TODOIST_CLIENT_SECRET=
## ===== OPTIONAL API KEYS ===== ##
# LLM
OPENAI_API_KEY=
ANTHROPIC_API_KEY=
AIML_API_KEY=
GROQ_API_KEY=
OPEN_ROUTER_API_KEY=
LLAMA_API_KEY=
# Reddit
# Go to https://www.reddit.com/prefs/apps and create a new app
# Choose "script" for the type
# Fill in the redirect uri as <your_frontend_url>/auth/integrations/oauth_callback, e.g. http://localhost:3000/auth/integrations/oauth_callback
REDDIT_CLIENT_ID=
REDDIT_CLIENT_SECRET=
REDDIT_USER_AGENT="AutoGPT:1.0 (by /u/autogpt)"
REDDIT_USERNAME=
REDDIT_PASSWORD=
# Discord
DISCORD_BOT_TOKEN=
@@ -173,32 +121,6 @@ REPLICATE_API_KEY=
# Ideogram
IDEOGRAM_API_KEY=
# Fal
FAL_API_KEY=
# Exa
EXA_API_KEY=
# E2B
E2B_API_KEY=
# Mem0
MEM0_API_KEY=
# Nvidia
NVIDIA_API_KEY=
# Apollo
APOLLO_API_KEY=
# SmartLead
SMARTLEAD_API_KEY=
# ZeroBounce
ZEROBOUNCE_API_KEY=
## ===== OPTIONAL API KEYS END ===== ##
# Logging Configuration
LOG_LEVEL=INFO
ENABLE_CLOUD_LOGGING=false

View File

@@ -73,6 +73,7 @@ FROM server_dependencies AS server
COPY autogpt_platform/backend /app/autogpt_platform/backend
RUN poetry install --no-ansi --only-root
ENV DATABASE_URL=""
ENV PORT=8000
CMD ["poetry", "run", "rest"]

View File

@@ -1 +1,75 @@
[Advanced Setup (Dev Branch)](https://dev-docs.agpt.co/platform/advanced_setup/#autogpt_agent_server_advanced_set_up)
# AutoGPT Agent Server Advanced set up
This guide walks you through a dockerized set up, with an external DB (postgres)
## Setup
We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory:
0. Install Poetry
```sh
pip install poetry
```
1. Configure Poetry to use .venv in your project directory
```sh
poetry config virtualenvs.in-project true
```
2. Enter the poetry shell
```sh
poetry shell
```
3. Install dependencies
```sh
poetry install
```
4. Copy .env.example to .env
```sh
cp .env.example .env
```
5. Generate the Prisma client
```sh
poetry run prisma generate
```
> In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package:
>
> ```sh
> pip uninstall prisma
> ```
>
> Then run the generation again. The path *should* look something like this:
> `<some path>/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma`
6. Run the postgres database from the /rnd folder
```sh
cd autogpt_platform/
docker compose up -d
```
7. Run the migrations (from the backend folder)
```sh
cd ../backend
prisma migrate deploy
```
## Running The Server
### Starting the server directly
Run the following command:
```sh
poetry run app
```

View File

@@ -1 +1,203 @@
[Getting Started (Released)](https://docs.agpt.co/platform/getting-started/#autogpt_agent_server)
# AutoGPT Agent Server
This is an initial project for creating the next generation of agent execution, which is an AutoGPT agent server.
The agent server will enable the creation of composite multi-agent systems that utilize AutoGPT agents and other non-agent components as its primitives.
## Docs
You can access the docs for the [AutoGPT Agent Server here](https://docs.agpt.co/server/setup).
## Setup
We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory:
0. Install Poetry
```sh
pip install poetry
```
1. Configure Poetry to use .venv in your project directory
```sh
poetry config virtualenvs.in-project true
```
2. Enter the poetry shell
```sh
poetry shell
```
3. Install dependencies
```sh
poetry install
```
4. Copy .env.example to .env
```sh
cp .env.example .env
```
5. Generate the Prisma client
```sh
poetry run prisma generate
```
> In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package:
>
> ```sh
> pip uninstall prisma
> ```
>
> Then run the generation again. The path *should* look something like this:
> `<some path>/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma`
6. Migrate the database. Be careful because this deletes current data in the database.
```sh
docker compose up db -d
poetry run prisma migrate deploy
```
## Running The Server
### Starting the server without Docker
Run the following command to run database in docker but the application locally:
```sh
docker compose --profile local up deps --build --detach
poetry run app
```
### Starting the server with Docker
Run the following command to build the dockerfiles:
```sh
docker compose build
```
Run the following command to run the app:
```sh
docker compose up
```
Run the following to automatically rebuild when code changes, in another terminal:
```sh
docker compose watch
```
Run the following command to shut down:
```sh
docker compose down
```
If you run into issues with dangling orphans, try:
```sh
docker compose down --volumes --remove-orphans && docker-compose up --force-recreate --renew-anon-volumes --remove-orphans
```
## Testing
To run the tests:
```sh
poetry run test
```
## Development
### Formatting & Linting
Auto formatter and linter are set up in the project. To run them:
Install:
```sh
poetry install --with dev
```
Format the code:
```sh
poetry run format
```
Lint the code:
```sh
poetry run lint
```
## Project Outline
The current project has the following main modules:
### **blocks**
This module stores all the Agent Blocks, which are reusable components to build a graph that represents the agent's behavior.
### **data**
This module stores the logical model that is persisted in the database.
It abstracts the database operations into functions that can be called by the service layer.
Any code that interacts with Prisma objects or the database should reside in this module.
The main models are:
* `block`: anything related to the block used in the graph
* `execution`: anything related to the execution graph execution
* `graph`: anything related to the graph, node, and its relations
### **execution**
This module stores the business logic of executing the graph.
It currently has the following main modules:
* `manager`: A service that consumes the queue of the graph execution and executes the graph. It contains both pieces of logic.
* `scheduler`: A service that triggers scheduled graph execution based on a cron expression. It pushes an execution request to the manager.
### **server**
This module stores the logic for the server API.
It contains all the logic used for the API that allows the client to create, execute, and monitor the graph and its execution.
This API service interacts with other services like those defined in `manager` and `scheduler`.
### **utils**
This module stores utility functions that are used across the project.
Currently, it has two main modules:
* `process`: A module that contains the logic to spawn a new process.
* `service`: A module that serves as a parent class for all the services in the project.
## Service Communication
Currently, there are only 3 active services:
- AgentServer (the API, defined in `server.py`)
- ExecutionManager (the executor, defined in `manager.py`)
- ExecutionScheduler (the scheduler, defined in `scheduler.py`)
The services run in independent Python processes and communicate through an IPC.
A communication layer (`service.py`) is created to decouple the communication library from the implementation.
Currently, the IPC is done using Pyro5 and abstracted in a way that allows a function decorated with `@expose` to be called from a different process.
By default the daemons run on the following ports:
Execution Manager Daemon: 8002
Execution Scheduler Daemon: 8003
Rest Server Daemon: 8004
## Adding a New Agent Block
To add a new agent block, you need to create a new class that inherits from `Block` and provides the following information:
* All the block code should live in the `blocks` (`backend.blocks`) module.
* `input_schema`: the schema of the input data, represented by a Pydantic object.
* `output_schema`: the schema of the output data, represented by a Pydantic object.
* `run` method: the main logic of the block.
* `test_input` & `test_output`: the sample input and output data for the block, which will be used to auto-test the block.
* You can mock the functions declared in the block using the `test_mock` field for your unit tests.
* Once you finish creating the block, you can test it by running `poetry run pytest -s test/block/test_block.py`.

View File

@@ -1,237 +0,0 @@
# Backend Testing Guide
This guide covers testing practices for the AutoGPT Platform backend, with a focus on snapshot testing for API endpoints.
## Table of Contents
- [Overview](#overview)
- [Running Tests](#running-tests)
- [Snapshot Testing](#snapshot-testing)
- [Writing Tests for API Routes](#writing-tests-for-api-routes)
- [Best Practices](#best-practices)
## Overview
The backend uses pytest for testing with the following key libraries:
- `pytest` - Test framework
- `pytest-asyncio` - Async test support
- `pytest-mock` - Mocking support
- `pytest-snapshot` - Snapshot testing for API responses
## Running Tests
### Run all tests
```bash
poetry run test
```
### Run specific test file
```bash
poetry run pytest path/to/test_file.py
```
### Run with verbose output
```bash
poetry run pytest -v
```
### Run with coverage
```bash
poetry run pytest --cov=backend
```
## Snapshot Testing
Snapshot testing captures the output of your code and compares it against previously saved snapshots. This is particularly useful for testing API responses.
### How Snapshot Testing Works
1. First run: Creates snapshot files in `snapshots/` directories
2. Subsequent runs: Compares output against saved snapshots
3. Changes detected: Test fails if output differs from snapshot
### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Snapshot Test Example
```python
import json
from pytest_snapshot.plugin import Snapshot
def test_api_endpoint(snapshot: Snapshot):
response = client.get("/api/endpoint")
# Snapshot the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"endpoint_response"
)
```
### Best Practices for Snapshots
1. **Use descriptive names**: `"user_list_response"` not `"response1"`
2. **Sort JSON keys**: Ensures consistent snapshots
3. **Format JSON**: Use `indent=2` for readable diffs
4. **Exclude dynamic data**: Remove timestamps, IDs, etc. that change between runs
Example of excluding dynamic data:
```python
response_data = response.json()
# Remove dynamic fields for snapshot
response_data.pop("created_at", None)
response_data.pop("id", None)
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"static_response_data"
)
```
## Writing Tests for API Routes
### Basic Structure
```python
import json
import fastapi
import fastapi.testclient
import pytest
from pytest_snapshot.plugin import Snapshot
from backend.server.v2.myroute import router
app = fastapi.FastAPI()
app.include_router(router)
client = fastapi.testclient.TestClient(app)
def test_endpoint_success(snapshot: Snapshot):
response = client.get("/endpoint")
assert response.status_code == 200
# Test specific fields
data = response.json()
assert data["status"] == "success"
# Snapshot the full response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(data, indent=2, sort_keys=True),
"endpoint_success_response"
)
```
### Testing with Authentication
```python
def override_auth_middleware():
return {"sub": "test-user-id"}
def override_get_user_id():
return "test-user-id"
app.dependency_overrides[auth_middleware] = override_auth_middleware
app.dependency_overrides[get_user_id] = override_get_user_id
```
### Mocking External Services
```python
def test_external_api_call(mocker, snapshot):
# Mock external service
mock_response = {"external": "data"}
mocker.patch(
"backend.services.external_api.call",
return_value=mock_response
)
response = client.post("/api/process")
assert response.status_code == 200
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"process_with_external_response"
)
```
## Best Practices
### 1. Test Organization
- Place tests next to the code: `routes.py``routes_test.py`
- Use descriptive test names: `test_create_user_with_invalid_email`
- Group related tests in classes when appropriate
### 2. Test Coverage
- Test happy path and error cases
- Test edge cases (empty data, invalid formats)
- Test authentication and authorization
### 3. Snapshot Testing Guidelines
- Review all snapshot changes carefully
- Don't snapshot sensitive data
- Keep snapshots focused and minimal
- Update snapshots intentionally, not accidentally
### 4. Async Testing
- Use regular `def` for FastAPI TestClient tests
- Use `async def` with `@pytest.mark.asyncio` for testing async functions directly
### 5. Fixtures
Create reusable fixtures for common test data:
```python
@pytest.fixture
def sample_user():
return {
"email": "test@example.com",
"name": "Test User"
}
def test_create_user(sample_user, snapshot):
response = client.post("/users", json=sample_user)
# ... test implementation
```
## CI/CD Integration
The GitHub Actions workflow automatically runs tests on:
- Pull requests
- Pushes to main branch
Snapshot tests work in CI by:
1. Committing snapshot files to the repository
2. CI compares against committed snapshots
3. Fails if snapshots don't match
## Troubleshooting
### Snapshot Mismatches
- Review the diff carefully
- If changes are expected: `poetry run pytest --snapshot-update`
- If changes are unexpected: Fix the code causing the difference
### Async Test Issues
- Ensure async functions use `@pytest.mark.asyncio`
- Use `AsyncMock` for mocking async functions
- FastAPI TestClient handles async automatically
### Import Errors
- Check that all dependencies are in `pyproject.toml`
- Run `poetry install` to ensure dependencies are installed
- Verify import paths are correct
## Summary
Snapshot testing provides a powerful way to ensure API responses remain consistent. Combined with traditional assertions, it creates a robust test suite that catches regressions while remaining maintainable.
Remember: Good tests are as important as good code!

View File

@@ -1,30 +1,22 @@
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
logger = logging.getLogger(__name__)
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
Includes enhanced error handling and process lifecycle management.
"""
try:
# Run all processes except the last one in the background.
for process in processes[:-1]:
process.start(background=True, **kwargs)
# Run the last process in the foreground.
# Run the last process in the foreground
processes[-1].start(background=False, **kwargs)
finally:
for process in processes:
try:
process.stop()
except Exception as e:
logger.exception(f"[{process.service_name}] unable to stop: {e}")
process.stop()
def main(**kwargs):
@@ -32,16 +24,14 @@ def main(**kwargs):
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
"""
from backend.executor import DatabaseManager, ExecutionManager, Scheduler
from backend.notifications import NotificationManager
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
from backend.server.rest_api import AgentServer
from backend.server.ws_api import WebsocketServer
run_processes(
DatabaseManager(),
ExecutionManager(),
Scheduler(),
NotificationManager(),
ExecutionScheduler(),
WebsocketServer(),
AgentServer(),
**kwargs,

View File

@@ -1,99 +1,89 @@
import functools
import importlib
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, TypeVar
from typing import Type, TypeVar
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
AVAILABLE_BLOCKS: dict[str, Type[Block]] = {}
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T")
@functools.cache
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
# Load all Block instances from the available modules
available_blocks: dict[str, type["Block"]] = {}
for block_cls in all_subclasses(Block):
class_name = block_cls.__name__
if class_name.endswith("Base"):
continue
if not class_name.endswith("Block"):
raise ValueError(
f"Block class {class_name} does not end with 'Block'. "
"If you are creating an abstract class, "
"please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(
f"Block ID {block.name} error: {block.id} is not a valid UUID"
)
if block.id in available_blocks:
raise ValueError(
f"Block ID {block.name} error: {block.id} is already in use"
)
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Ensure all fields in input_schema and output_schema are annotated SchemaFields
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(
f"{block.name} has a boolean field with no default value"
)
available_blocks[block.id] = block_cls
return available_blocks
__all__ = ["load_all_blocks"]
def all_subclasses(cls: type[T]) -> list[type[T]]:
def all_subclasses(cls: Type[T]) -> list[Type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
for block_cls in all_subclasses(Block):
name = block_cls.__name__
if block_cls.__name__.endswith("Base"):
continue
if not block_cls.__name__.endswith("Block"):
raise ValueError(
f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
if block.id in AVAILABLE_BLOCKS:
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Make sure all fields in input_schema and output_schema are annotated and has a value
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(f"{block.name} has a boolean field with no default value")
if block.disabled:
continue
AVAILABLE_BLOCKS[block.id] = block_cls
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]

View File

@@ -1,6 +1,6 @@
import asyncio
import logging
from typing import Any, Optional
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
@@ -12,43 +12,36 @@ from backend.data.block import (
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import CredentialsMetaInput, SchemaField
from backend.util import json
from backend.data.model import SchemaField
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
inputs: BlockInput = SchemaField(description="Input data for the graph")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
node_credentials_input_map: Optional[
dict[str, dict[str, CredentialsMetaInput]]
] = SchemaField(default=None, hidden=True)
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("inputs", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
@@ -62,83 +55,37 @@ class AgentExecutorBlock(Block):
categories={BlockCategory.AGENT},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
from backend.executor import utils as execution_utils
graph_exec = await execution_utils.add_graph_execution(
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
inputs=input_data.inputs,
node_credentials_input_map=input_data.node_credentials_input_map,
use_db_query=False,
data=input_data.data,
)
try:
async for name, data in self._run(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
graph_exec_id=graph_exec.id,
user_id=input_data.user_id,
):
yield name, data
except asyncio.CancelledError:
logger.warning(
f"Execution of graph {input_data.graph_id} version {input_data.graph_version} was cancelled."
)
await execution_utils.stop_graph_execution(
graph_exec.id, use_db_query=False
)
except Exception as e:
logger.error(
f"Execution of graph {input_data.graph_id} version {input_data.graph_version} failed: {e}, stopping execution."
)
await execution_utils.stop_graph_execution(
graph_exec.id, use_db_query=False
)
raise
async def _run(
self,
graph_id: str,
graph_version: int,
graph_exec_id: str,
user_id: str,
) -> BlockOutput:
from backend.data.execution import ExecutionEventType
from backend.executor import utils as execution_utils
event_bus = execution_utils.get_async_execution_event_bus()
log_id = f"Graph #{graph_id}-V{graph_version}, exec-id: {graph_exec_id}"
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
async for event in event_bus.listen(
user_id=user_id,
graph_id=graph_id,
graph_exec_id=graph_exec_id,
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
if event.status not in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.debug(
f"Execution {log_id} received event {event.event_type} with status {event.status}"
)
continue
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
# If the graph execution is COMPLETED, TERMINATED, or FAILED,
# we can stop listening for further events.
break
logger.debug(
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
@@ -153,7 +100,5 @@ class AgentExecutorBlock(Block):
continue
for output_data in event.output_data.get("output", []):
logger.debug(
f"Execution {log_id} produced {output_name}: {output_data}"
)
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data

View File

@@ -1,8 +1,8 @@
from enum import Enum
from typing import Literal
import replicate
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.data.block import Block, BlockCategory, BlockSchema
@@ -165,15 +165,15 @@ class AIImageGeneratorBlock(Block):
},
)
async def _run_client(
def _run_client(
self, credentials: APIKeyCredentials, model_name: str, input_params: dict
):
try:
# Initialize Replicate client
client = ReplicateClient(api_token=credentials.api_key.get_secret_value())
client = replicate.Client(api_token=credentials.api_key.get_secret_value())
# Run the model with input parameters
output = await client.async_run(model_name, input=input_params, wait=False)
output = client.run(model_name, input=input_params, wait=False)
# Process output
if isinstance(output, list) and len(output) > 0:
@@ -195,7 +195,7 @@ class AIImageGeneratorBlock(Block):
except Exception as e:
raise RuntimeError(f"Unexpected error during model execution: {e}")
async def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
try:
# Handle style-based prompt modification for models without native style support
modified_prompt = input_data.prompt
@@ -213,7 +213,7 @@ class AIImageGeneratorBlock(Block):
"steps": 40,
"cfg_scale": 7.0,
}
output = await self._run_client(
output = self._run_client(
credentials,
"stability-ai/stable-diffusion-3.5-medium",
input_params,
@@ -231,7 +231,7 @@ class AIImageGeneratorBlock(Block):
"output_format": "jpg", # Set to jpg for Flux models
"output_quality": 90,
}
output = await self._run_client(
output = self._run_client(
credentials, "black-forest-labs/flux-1.1-pro", input_params
)
return output
@@ -246,7 +246,7 @@ class AIImageGeneratorBlock(Block):
"output_format": "jpg",
"output_quality": 90,
}
output = await self._run_client(
output = self._run_client(
credentials, "black-forest-labs/flux-1.1-pro-ultra", input_params
)
return output
@@ -257,7 +257,7 @@ class AIImageGeneratorBlock(Block):
"size": SIZE_TO_RECRAFT_DIMENSIONS[input_data.size],
"style": input_data.style.value,
}
output = await self._run_client(
output = self._run_client(
credentials, "recraft-ai/recraft-v3", input_params
)
return output
@@ -296,9 +296,9 @@ class AIImageGeneratorBlock(Block):
style_text = style_map.get(style, "")
return f"{style_text} of" if style_text else ""
async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
try:
url = await self.generate_image(input_data, credentials)
url = self.generate_image(input_data, credentials)
if url:
yield "image_url", url
else:

View File

@@ -1,10 +1,10 @@
import asyncio
import logging
import time
from enum import Enum
from typing import Literal
import replicate
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
@@ -142,7 +142,7 @@ class AIMusicGeneratorBlock(Block):
test_credentials=TEST_CREDENTIALS,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
max_retries = 3
@@ -154,7 +154,7 @@ class AIMusicGeneratorBlock(Block):
logger.debug(
f"[AIMusicGeneratorBlock] - Running model (attempt {attempt + 1})"
)
result = await self.run_model(
result = self.run_model(
api_key=credentials.api_key,
music_gen_model_version=input_data.music_gen_model_version,
prompt=input_data.prompt,
@@ -176,13 +176,13 @@ class AIMusicGeneratorBlock(Block):
last_error = f"Unexpected error: {str(e)}"
logger.error(f"[AIMusicGeneratorBlock] - Error: {last_error}")
if attempt < max_retries - 1:
await asyncio.sleep(retry_delay)
time.sleep(retry_delay)
continue
# If we've exhausted all retries, yield the error
yield "error", f"Failed after {max_retries} attempts. Last error: {last_error}"
async def run_model(
def run_model(
self,
api_key: SecretStr,
music_gen_model_version: MusicGenModelVersion,
@@ -196,10 +196,10 @@ class AIMusicGeneratorBlock(Block):
normalization_strategy: NormalizationStrategy,
):
# Initialize Replicate client with the API key
client = ReplicateClient(api_token=api_key.get_secret_value())
client = replicate.Client(api_token=api_key.get_secret_value())
# Run the model with parameters
output = await client.async_run(
output = client.run(
"meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb",
input={
"prompt": prompt,

View File

@@ -1,4 +1,3 @@
import asyncio
import logging
import time
from enum import Enum
@@ -14,7 +13,7 @@ from backend.data.model import (
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.request import Requests
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
@@ -217,29 +216,29 @@ class AIShortformVideoCreatorBlock(Block):
test_credentials=TEST_CREDENTIALS,
)
async def create_webhook(self):
def create_webhook(self):
url = "https://webhook.site/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = await Requests().post(url, headers=headers)
response = requests.post(url, headers=headers)
webhook_data = response.json()
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
async def create_video(self, api_key: SecretStr, payload: dict) -> dict:
def create_video(self, api_key: SecretStr, payload: dict) -> dict:
url = "https://www.revid.ai/api/public/v2/render"
headers = {"key": api_key.get_secret_value()}
response = await Requests().post(url, json=payload, headers=headers)
response = requests.post(url, json=payload, headers=headers)
logger.debug(
f"API Response Status Code: {response.status}, Content: {response.text}"
f"API Response Status Code: {response.status_code}, Content: {response.text}"
)
return response.json()
async def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
headers = {"key": api_key.get_secret_value()}
response = await Requests().get(url, headers=headers)
response = requests.get(url, headers=headers)
return response.json()
async def wait_for_video(
def wait_for_video(
self,
api_key: SecretStr,
pid: str,
@@ -248,7 +247,7 @@ class AIShortformVideoCreatorBlock(Block):
) -> str:
start_time = time.time()
while time.time() - start_time < max_wait_time:
status = await self.check_video_status(api_key, pid)
status = self.check_video_status(api_key, pid)
logger.debug(f"Video status: {status}")
if status.get("status") == "ready" and "videoUrl" in status:
@@ -261,16 +260,16 @@ class AIShortformVideoCreatorBlock(Block):
logger.error(f"Video creation failed: {status.get('message')}")
raise ValueError(f"Video creation failed: {status.get('message')}")
await asyncio.sleep(10)
time.sleep(10)
logger.error("Video creation timed out")
raise TimeoutError("Video creation timed out")
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
# Create a new Webhook.site URL
webhook_token, webhook_url = await self.create_webhook()
webhook_token, webhook_url = self.create_webhook()
logger.debug(f"Webhook URL: {webhook_url}")
audio_url = input_data.background_music.audio_url
@@ -307,7 +306,7 @@ class AIShortformVideoCreatorBlock(Block):
}
logger.debug("Creating video...")
response = await self.create_video(credentials.api_key, payload)
response = self.create_video(credentials.api_key, payload)
pid = response.get("pid")
if not pid:
@@ -319,8 +318,6 @@ class AIShortformVideoCreatorBlock(Block):
logger.debug(
f"Video created with project ID: {pid}. Waiting for completion..."
)
video_url = await self.wait_for_video(
credentials.api_key, pid, webhook_token
)
video_url = self.wait_for_video(credentials.api_key, pid, webhook_token)
logger.debug(f"Video ready: {video_url}")
yield "video_url", video_url

View File

@@ -1,112 +0,0 @@
import logging
from typing import List
from backend.blocks.apollo._auth import ApolloCredentials
from backend.blocks.apollo.models import (
Contact,
Organization,
SearchOrganizationsRequest,
SearchOrganizationsResponse,
SearchPeopleRequest,
SearchPeopleResponse,
)
from backend.util.request import Requests
logger = logging.getLogger(name=__name__)
class ApolloClient:
"""Client for the Apollo API"""
API_URL = "https://api.apollo.io/api/v1"
def __init__(self, credentials: ApolloCredentials):
self.credentials = credentials
self.requests = Requests()
def _get_headers(self) -> dict[str, str]:
return {"x-api-key": self.credentials.api_key.get_secret_value()}
async def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
"""Search for people in Apollo"""
response = await self.requests.get(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchPeopleResponse(**data)
if parsed_response.pagination.total_entries == 0:
return []
people = parsed_response.people
# handle pagination
if (
query.max_results is not None
and query.max_results < parsed_response.pagination.total_entries
and len(people) < query.max_results
):
while (
len(people) < query.max_results
and query.page < parsed_response.pagination.total_pages
and len(parsed_response.people) > 0
):
query.page += 1
response = await self.requests.get(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchPeopleResponse(**data)
people.extend(parsed_response.people[: query.max_results - len(people)])
logger.info(f"Found {len(people)} people")
return people[: query.max_results] if query.max_results else people
async def search_organizations(
self, query: SearchOrganizationsRequest
) -> List[Organization]:
"""Search for organizations in Apollo"""
response = await self.requests.get(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchOrganizationsResponse(**data)
if parsed_response.pagination.total_entries == 0:
return []
organizations = parsed_response.organizations
# handle pagination
if (
query.max_results is not None
and query.max_results < parsed_response.pagination.total_entries
and len(organizations) < query.max_results
):
while (
len(organizations) < query.max_results
and query.page < parsed_response.pagination.total_pages
and len(parsed_response.organizations) > 0
):
query.page += 1
response = await self.requests.get(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchOrganizationsResponse(**data)
organizations.extend(
parsed_response.organizations[
: query.max_results - len(organizations)
]
)
logger.info(f"Found {len(organizations)} organizations")
return (
organizations[: query.max_results] if query.max_results else organizations
)

View File

@@ -1,35 +0,0 @@
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
from backend.integrations.providers import ProviderName
ApolloCredentials = APIKeyCredentials
ApolloCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.APOLLO],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="apollo",
api_key=SecretStr("mock-apollo-api-key"),
title="Mock Apollo API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
def ApolloCredentialsField() -> ApolloCredentialsInput:
"""
Creates a Apollo credentials input on a block.
"""
return CredentialsField(
description="The Apollo integration can be used with an API Key.",
)

View File

@@ -1,548 +0,0 @@
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel, ConfigDict
from backend.data.model import SchemaField
class PrimaryPhone(BaseModel):
"""A primary phone in Apollo"""
number: str
source: str
sanitized_number: str
class SenorityLevels(str, Enum):
"""Seniority levels in Apollo"""
OWNER = "owner"
FOUNDER = "founder"
C_SUITE = "c_suite"
PARTNER = "partner"
VP = "vp"
HEAD = "head"
DIRECTOR = "director"
MANAGER = "manager"
SENIOR = "senior"
ENTRY = "entry"
INTERN = "intern"
class ContactEmailStatuses(str, Enum):
"""Contact email statuses in Apollo"""
VERIFIED = "verified"
UNVERIFIED = "unverified"
LIKELY_TO_ENGAGE = "likely_to_engage"
UNAVAILABLE = "unavailable"
class RuleConfigStatus(BaseModel):
"""A rule config status in Apollo"""
_id: str
created_at: str
rule_action_config_id: str
rule_config_id: str
status_cd: str
updated_at: str
id: str
key: str
class ContactCampaignStatus(BaseModel):
"""A contact campaign status in Apollo"""
id: str
emailer_campaign_id: str
send_email_from_user_id: str
inactive_reason: str
status: str
added_at: str
added_by_user_id: str
finished_at: str
paused_at: str
auto_unpause_at: str
send_email_from_email_address: str
send_email_from_email_account_id: str
manually_set_unpause: str
failure_reason: str
current_step_id: str
in_response_to_emailer_message_id: str
cc_emails: str
bcc_emails: str
to_emails: str
class Account(BaseModel):
"""An account in Apollo"""
id: str
name: str
website_url: str
blog_url: str
angellist_url: str
linkedin_url: str
twitter_url: str
facebook_url: str
primary_phone: PrimaryPhone
languages: list[str]
alexa_ranking: int
phone: str
linkedin_uid: str
founded_year: int
publicly_traded_symbol: str
publicly_traded_exchange: str
logo_url: str
chrunchbase_url: str
primary_domain: str
domain: str
team_id: str
organization_id: str
account_stage_id: str
source: str
original_source: str
creator_id: str
owner_id: str
created_at: str
phone_status: str
hubspot_id: str
salesforce_id: str
crm_owner_id: str
parent_account_id: str
sanitized_phone: str
# no listed type on the API docs
account_playbook_statues: list[Any]
account_rule_config_statuses: list[RuleConfigStatus]
existence_level: str
label_ids: list[str]
typed_custom_fields: Any
custom_field_errors: Any
modality: str
source_display_name: str
salesforce_record_id: str
crm_record_url: str
class ContactEmail(BaseModel):
"""A contact email in Apollo"""
email: str = ""
email_md5: str = ""
email_sha256: str = ""
email_status: str = ""
email_source: str = ""
extrapolated_email_confidence: str = ""
position: int = 0
email_from_customer: str = ""
free_domain: bool = True
class EmploymentHistory(BaseModel):
"""An employment history in Apollo"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
_id: Optional[str] = None
created_at: Optional[str] = None
current: Optional[bool] = None
degree: Optional[str] = None
description: Optional[str] = None
emails: Optional[str] = None
end_date: Optional[str] = None
grade_level: Optional[str] = None
kind: Optional[str] = None
major: Optional[str] = None
organization_id: Optional[str] = None
organization_name: Optional[str] = None
raw_address: Optional[str] = None
start_date: Optional[str] = None
title: Optional[str] = None
updated_at: Optional[str] = None
id: Optional[str] = None
key: Optional[str] = None
class Breadcrumb(BaseModel):
"""A breadcrumb in Apollo"""
label: Optional[str] = "N/A"
signal_field_name: Optional[str] = "N/A"
value: str | list | None = "N/A"
display_name: Optional[str] = "N/A"
class TypedCustomField(BaseModel):
"""A typed custom field in Apollo"""
id: Optional[str] = "N/A"
value: Optional[str] = "N/A"
class Pagination(BaseModel):
"""Pagination in Apollo"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
page: int = 0
per_page: int = 0
total_entries: int = 0
total_pages: int = 0
class DialerFlags(BaseModel):
"""A dialer flags in Apollo"""
country_name: str
country_enabled: bool
high_risk_calling_enabled: bool
potential_high_risk_number: bool
class PhoneNumber(BaseModel):
"""A phone number in Apollo"""
raw_number: str = ""
sanitized_number: str = ""
type: str = ""
position: int = 0
status: str = ""
dnc_status: str = ""
dnc_other_info: str = ""
dailer_flags: DialerFlags = DialerFlags(
country_name="",
country_enabled=True,
high_risk_calling_enabled=True,
potential_high_risk_number=True,
)
class Organization(BaseModel):
"""An organization in Apollo"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
id: Optional[str] = "N/A"
name: Optional[str] = "N/A"
website_url: Optional[str] = "N/A"
blog_url: Optional[str] = "N/A"
angellist_url: Optional[str] = "N/A"
linkedin_url: Optional[str] = "N/A"
twitter_url: Optional[str] = "N/A"
facebook_url: Optional[str] = "N/A"
primary_phone: Optional[PrimaryPhone] = PrimaryPhone(
number="N/A", source="N/A", sanitized_number="N/A"
)
languages: list[str] = []
alexa_ranking: Optional[int] = 0
phone: Optional[str] = "N/A"
linkedin_uid: Optional[str] = "N/A"
founded_year: Optional[int] = 0
publicly_traded_symbol: Optional[str] = "N/A"
publicly_traded_exchange: Optional[str] = "N/A"
logo_url: Optional[str] = "N/A"
chrunchbase_url: Optional[str] = "N/A"
primary_domain: Optional[str] = "N/A"
sanitized_phone: Optional[str] = "N/A"
owned_by_organization_id: Optional[str] = "N/A"
intent_strength: Optional[str] = "N/A"
show_intent: bool = True
has_intent_signal_account: Optional[bool] = True
intent_signal_account: Optional[str] = "N/A"
class Contact(BaseModel):
"""A contact in Apollo"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
contact_roles: list[Any] = []
id: Optional[str] = None
first_name: Optional[str] = None
last_name: Optional[str] = None
name: Optional[str] = None
linkedin_url: Optional[str] = None
title: Optional[str] = None
contact_stage_id: Optional[str] = None
owner_id: Optional[str] = None
creator_id: Optional[str] = None
person_id: Optional[str] = None
email_needs_tickling: bool = True
organization_name: Optional[str] = None
source: Optional[str] = None
original_source: Optional[str] = None
organization_id: Optional[str] = None
headline: Optional[str] = None
photo_url: Optional[str] = None
present_raw_address: Optional[str] = None
linkededin_uid: Optional[str] = None
extrapolated_email_confidence: Optional[float] = None
salesforce_id: Optional[str] = None
salesforce_lead_id: Optional[str] = None
salesforce_contact_id: Optional[str] = None
saleforce_account_id: Optional[str] = None
crm_owner_id: Optional[str] = None
created_at: Optional[str] = None
emailer_campaign_ids: list[str] = []
direct_dial_status: Optional[str] = None
direct_dial_enrichment_failed_at: Optional[str] = None
email_status: Optional[str] = None
email_source: Optional[str] = None
account_id: Optional[str] = None
last_activity_date: Optional[str] = None
hubspot_vid: Optional[str] = None
hubspot_company_id: Optional[str] = None
crm_id: Optional[str] = None
sanitized_phone: Optional[str] = None
merged_crm_ids: Optional[str] = None
updated_at: Optional[str] = None
queued_for_crm_push: bool = True
suggested_from_rule_engine_config_id: Optional[str] = None
email_unsubscribed: Optional[str] = None
label_ids: list[Any] = []
has_pending_email_arcgate_request: bool = True
has_email_arcgate_request: bool = True
existence_level: Optional[str] = None
email: Optional[str] = None
email_from_customer: Optional[str] = None
typed_custom_fields: list[TypedCustomField] = []
custom_field_errors: Any = None
salesforce_record_id: Optional[str] = None
crm_record_url: Optional[str] = None
email_status_unavailable_reason: Optional[str] = None
email_true_status: Optional[str] = None
updated_email_true_status: bool = True
contact_rule_config_statuses: list[RuleConfigStatus] = []
source_display_name: Optional[str] = None
twitter_url: Optional[str] = None
contact_campaign_statuses: list[ContactCampaignStatus] = []
state: Optional[str] = None
city: Optional[str] = None
country: Optional[str] = None
account: Optional[Account] = None
contact_emails: list[ContactEmail] = []
organization: Optional[Organization] = None
employment_history: list[EmploymentHistory] = []
time_zone: Optional[str] = None
intent_strength: Optional[str] = None
show_intent: bool = True
phone_numbers: list[PhoneNumber] = []
account_phone_note: Optional[str] = None
free_domain: bool = True
is_likely_to_engage: bool = True
email_domain_catchall: bool = True
contact_job_change_event: Optional[str] = None
class SearchOrganizationsRequest(BaseModel):
"""Request for Apollo's search organizations API"""
organization_num_empoloyees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default=[0, 1000000],
)
organization_locations: list[str] = SchemaField(
description="""The location of the company headquarters. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
To exclude companies based on location, use the organization_not_locations parameter.
""",
default_factory=list,
)
organizations_not_locations: list[str] = SchemaField(
description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
""",
default_factory=list,
)
q_organization_keyword_tags: list[str] = SchemaField(
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry."""
)
q_organization_name: str = SchemaField(
description="""Filter search results to include a specific company name.
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible."""
)
organization_ids: list[str] = SchemaField(
description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, identify the values for organization_id when you call this endpoint.""",
default_factory=list,
)
max_results: int = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
le=50000,
advanced=True,
)
page: int = SchemaField(
description="""The page number of the Apollo data that you want to retrieve.
Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
default=1,
)
per_page: int = SchemaField(
description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
Use the page parameter to search the different pages of data.""",
default=100,
)
class SearchOrganizationsResponse(BaseModel):
"""Response from Apollo's search organizations API"""
breadcrumbs: list[Breadcrumb] = []
partial_results_only: bool = True
has_join: bool = True
disable_eu_prospecting: bool = True
partial_results_limit: int = 0
pagination: Pagination = Pagination(
page=0, per_page=0, total_entries=0, total_pages=0
)
# no listed type on the API docs
accounts: list[Any] = []
organizations: list[Organization] = []
models_ids: list[str] = []
num_fetch_result: Optional[str] = "N/A"
derived_params: Optional[str] = "N/A"
class SearchPeopleRequest(BaseModel):
"""Request for Apollo's search people API"""
person_titles: list[str] = SchemaField(
description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
""",
default_factory=list,
placeholder="marketing manager",
)
person_locations: list[str] = SchemaField(
description="""The location where people live. You can search across cities, US states, and countries.
To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
default_factory=list,
)
person_seniorities: list[SenorityLevels] = SchemaField(
description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
default_factory=list,
)
organization_locations: list[str] = SchemaField(
description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
To find people based on their personal location, use the person_locations parameter.""",
default_factory=list,
)
q_organization_domains: list[str] = SchemaField(
description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
You can add multiple domains to search across companies.
Examples: apollo.io and microsoft.com""",
default_factory=list,
)
contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
default_factory=list,
)
organization_ids: list[str] = SchemaField(
description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
default_factory=list,
)
organization_num_empoloyees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default_factory=list,
)
q_keywords: str = SchemaField(
description="""A string of words over which we want to filter the results""",
default="",
)
page: int = SchemaField(
description="""The page number of the Apollo data that you want to retrieve.
Use this parameter in combination with the per_page parameter to make search results for navigable and improve the performance of the endpoint.""",
default=1,
)
per_page: int = SchemaField(
description="""The number of search results that should be returned for each page. Limited the number of results per page improves the endpoint's performance.
Use the page parameter to search the different pages of data.""",
default=100,
)
max_results: int = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
le=50000,
advanced=True,
)
class SearchPeopleResponse(BaseModel):
"""Response from Apollo's search people API"""
model_config = ConfigDict(
extra="allow",
arbitrary_types_allowed=True,
from_attributes=True,
populate_by_name=True,
)
breadcrumbs: list[Breadcrumb] = []
partial_results_only: bool = True
has_join: bool = True
disable_eu_prospecting: bool = True
partial_results_limit: int = 0
pagination: Pagination = Pagination(
page=0, per_page=0, total_entries=0, total_pages=0
)
contacts: list[Contact] = []
people: list[Contact] = []
model_ids: list[str] = []
num_fetch_result: Optional[str] = "N/A"
derived_params: Optional[str] = "N/A"

View File

@@ -1,219 +0,0 @@
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
ApolloCredentials,
ApolloCredentialsInput,
)
from backend.blocks.apollo.models import (
Organization,
PrimaryPhone,
SearchOrganizationsRequest,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class SearchOrganizationsBlock(Block):
"""Search for organizations in Apollo"""
class Input(BlockSchema):
organization_num_empoloyees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default=[0, 1000000],
)
organization_locations: list[str] = SchemaField(
description="""The location of the company headquarters. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
To exclude companies based on location, use the organization_not_locations parameter.
""",
default_factory=list,
)
organizations_not_locations: list[str] = SchemaField(
description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
""",
default_factory=list,
)
q_organization_keyword_tags: list[str] = SchemaField(
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""",
default_factory=list,
)
q_organization_name: str = SchemaField(
description="""Filter search results to include a specific company name.
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""",
default="",
advanced=False,
)
organization_ids: list[str] = SchemaField(
description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, identify the values for organization_id when you call this endpoint.""",
default_factory=list,
)
max_results: int = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
le=50000,
advanced=True,
)
credentials: ApolloCredentialsInput = SchemaField(
description="Apollo credentials",
)
class Output(BlockSchema):
organizations: list[Organization] = SchemaField(
description="List of organizations found",
default_factory=list,
)
organization: Organization = SchemaField(
description="Each found organization, one at a time",
)
error: str = SchemaField(
description="Error message if the search failed",
default="",
)
def __init__(self):
super().__init__(
id="3d71270d-599e-4148-9b95-71b35d2f44f0",
description="Search for organizations in Apollo",
categories={BlockCategory.SEARCH},
input_schema=SearchOrganizationsBlock.Input,
output_schema=SearchOrganizationsBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={"query": "Google", "credentials": TEST_CREDENTIALS_INPUT},
test_output=[
(
"organization",
Organization(
id="1",
name="Google",
website_url="https://google.com",
blog_url="https://google.com/blog",
angellist_url="https://angel.co/google",
linkedin_url="https://linkedin.com/company/google",
twitter_url="https://twitter.com/google",
facebook_url="https://facebook.com/google",
primary_phone=PrimaryPhone(
source="google",
number="1234567890",
sanitized_number="1234567890",
),
languages=["en"],
alexa_ranking=1000,
phone="1234567890",
linkedin_uid="1234567890",
founded_year=2000,
publicly_traded_symbol="GOOGL",
publicly_traded_exchange="NASDAQ",
logo_url="https://google.com/logo.png",
chrunchbase_url="https://chrunchbase.com/google",
primary_domain="google.com",
sanitized_phone="1234567890",
owned_by_organization_id="1",
intent_strength="strong",
show_intent=True,
has_intent_signal_account=True,
intent_signal_account="1",
),
),
(
"organizations",
[
Organization(
id="1",
name="Google",
website_url="https://google.com",
blog_url="https://google.com/blog",
angellist_url="https://angel.co/google",
linkedin_url="https://linkedin.com/company/google",
twitter_url="https://twitter.com/google",
facebook_url="https://facebook.com/google",
primary_phone=PrimaryPhone(
source="google",
number="1234567890",
sanitized_number="1234567890",
),
languages=["en"],
alexa_ranking=1000,
phone="1234567890",
linkedin_uid="1234567890",
founded_year=2000,
publicly_traded_symbol="GOOGL",
publicly_traded_exchange="NASDAQ",
logo_url="https://google.com/logo.png",
chrunchbase_url="https://chrunchbase.com/google",
primary_domain="google.com",
sanitized_phone="1234567890",
owned_by_organization_id="1",
intent_strength="strong",
show_intent=True,
has_intent_signal_account=True,
intent_signal_account="1",
),
],
),
],
test_mock={
"search_organizations": lambda *args, **kwargs: [
Organization(
id="1",
name="Google",
website_url="https://google.com",
blog_url="https://google.com/blog",
angellist_url="https://angel.co/google",
linkedin_url="https://linkedin.com/company/google",
twitter_url="https://twitter.com/google",
facebook_url="https://facebook.com/google",
primary_phone=PrimaryPhone(
source="google",
number="1234567890",
sanitized_number="1234567890",
),
languages=["en"],
alexa_ranking=1000,
phone="1234567890",
linkedin_uid="1234567890",
founded_year=2000,
publicly_traded_symbol="GOOGL",
publicly_traded_exchange="NASDAQ",
logo_url="https://google.com/logo.png",
chrunchbase_url="https://chrunchbase.com/google",
primary_domain="google.com",
sanitized_phone="1234567890",
owned_by_organization_id="1",
intent_strength="strong",
show_intent=True,
has_intent_signal_account=True,
intent_signal_account="1",
)
]
},
)
@staticmethod
async def search_organizations(
query: SearchOrganizationsRequest, credentials: ApolloCredentials
) -> list[Organization]:
client = ApolloClient(credentials)
return await client.search_organizations(query)
async def run(
self, input_data: Input, *, credentials: ApolloCredentials, **kwargs
) -> BlockOutput:
query = SearchOrganizationsRequest(
**input_data.model_dump(exclude={"credentials"})
)
organizations = await self.search_organizations(query, credentials)
for organization in organizations:
yield "organization", organization
yield "organizations", organizations

View File

@@ -1,394 +0,0 @@
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
ApolloCredentials,
ApolloCredentialsInput,
)
from backend.blocks.apollo.models import (
Contact,
ContactEmailStatuses,
SearchPeopleRequest,
SenorityLevels,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class SearchPeopleBlock(Block):
"""Search for people in Apollo"""
class Input(BlockSchema):
person_titles: list[str] = SchemaField(
description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
Use this parameter in combination with the person_seniorities[] parameter to find people based on specific job functions and seniority levels.
""",
default_factory=list,
advanced=False,
)
person_locations: list[str] = SchemaField(
description="""The location where people live. You can search across cities, US states, and countries.
To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
default_factory=list,
advanced=False,
)
person_seniorities: list[SenorityLevels] = SchemaField(
description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
Searches only return results based on their current job title, so searching for Director-level employees only returns people that currently hold a Director-level title. If someone was previously a Director, but is currently a VP, they would not be included in your search results.
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
default_factory=list,
advanced=False,
)
organization_locations: list[str] = SchemaField(
description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
To find people based on their personal location, use the person_locations parameter.""",
default_factory=list,
advanced=False,
)
q_organization_domains: list[str] = SchemaField(
description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
You can add multiple domains to search across companies.
Examples: apollo.io and microsoft.com""",
default_factory=list,
advanced=False,
)
contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
default_factory=list,
advanced=False,
)
organization_ids: list[str] = SchemaField(
description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
default_factory=list,
advanced=False,
)
organization_num_empoloyees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default_factory=list,
advanced=False,
)
q_keywords: str = SchemaField(
description="""A string of words over which we want to filter the results""",
default="",
advanced=False,
)
max_results: int = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
le=50000,
advanced=True,
)
credentials: ApolloCredentialsInput = SchemaField(
description="Apollo credentials",
)
class Output(BlockSchema):
people: list[Contact] = SchemaField(
description="List of people found",
default_factory=list,
)
person: Contact = SchemaField(
description="Each found person, one at a time",
)
error: str = SchemaField(
description="Error message if the search failed",
default="",
)
def __init__(self):
super().__init__(
id="c2adb3aa-5aae-488d-8a6e-4eb8c23e2ed6",
description="Search for people in Apollo",
categories={BlockCategory.SEARCH},
input_schema=SearchPeopleBlock.Input,
output_schema=SearchPeopleBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={"credentials": TEST_CREDENTIALS_INPUT},
test_output=[
(
"person",
Contact(
contact_roles=[],
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
linkedin_url="https://www.linkedin.com/in/johndoe",
title="Software Engineer",
organization_name="Google",
organization_id="123456",
contact_stage_id="1",
owner_id="1",
creator_id="1",
person_id="1",
email_needs_tickling=True,
source="apollo",
original_source="apollo",
headline="Software Engineer",
photo_url="https://www.linkedin.com/in/johndoe",
present_raw_address="123 Main St, Anytown, USA",
linkededin_uid="123456",
extrapolated_email_confidence=0.8,
salesforce_id="123456",
salesforce_lead_id="123456",
salesforce_contact_id="123456",
saleforce_account_id="123456",
crm_owner_id="123456",
created_at="2021-01-01",
emailer_campaign_ids=[],
direct_dial_status="active",
direct_dial_enrichment_failed_at="2021-01-01",
email_status="active",
email_source="apollo",
account_id="123456",
last_activity_date="2021-01-01",
hubspot_vid="123456",
hubspot_company_id="123456",
crm_id="123456",
sanitized_phone="123456",
merged_crm_ids="123456",
updated_at="2021-01-01",
queued_for_crm_push=True,
suggested_from_rule_engine_config_id="123456",
email_unsubscribed=None,
label_ids=[],
has_pending_email_arcgate_request=True,
has_email_arcgate_request=True,
existence_level=None,
email=None,
email_from_customer=None,
typed_custom_fields=[],
custom_field_errors=None,
salesforce_record_id=None,
crm_record_url=None,
email_status_unavailable_reason=None,
email_true_status=None,
updated_email_true_status=True,
contact_rule_config_statuses=[],
source_display_name=None,
twitter_url=None,
contact_campaign_statuses=[],
state=None,
city=None,
country=None,
account=None,
contact_emails=[],
organization=None,
employment_history=[],
time_zone=None,
intent_strength=None,
show_intent=True,
phone_numbers=[],
account_phone_note=None,
free_domain=True,
is_likely_to_engage=True,
email_domain_catchall=True,
contact_job_change_event=None,
),
),
(
"people",
[
Contact(
contact_roles=[],
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
linkedin_url="https://www.linkedin.com/in/johndoe",
title="Software Engineer",
organization_name="Google",
organization_id="123456",
contact_stage_id="1",
owner_id="1",
creator_id="1",
person_id="1",
email_needs_tickling=True,
source="apollo",
original_source="apollo",
headline="Software Engineer",
photo_url="https://www.linkedin.com/in/johndoe",
present_raw_address="123 Main St, Anytown, USA",
linkededin_uid="123456",
extrapolated_email_confidence=0.8,
salesforce_id="123456",
salesforce_lead_id="123456",
salesforce_contact_id="123456",
saleforce_account_id="123456",
crm_owner_id="123456",
created_at="2021-01-01",
emailer_campaign_ids=[],
direct_dial_status="active",
direct_dial_enrichment_failed_at="2021-01-01",
email_status="active",
email_source="apollo",
account_id="123456",
last_activity_date="2021-01-01",
hubspot_vid="123456",
hubspot_company_id="123456",
crm_id="123456",
sanitized_phone="123456",
merged_crm_ids="123456",
updated_at="2021-01-01",
queued_for_crm_push=True,
suggested_from_rule_engine_config_id="123456",
email_unsubscribed=None,
label_ids=[],
has_pending_email_arcgate_request=True,
has_email_arcgate_request=True,
existence_level=None,
email=None,
email_from_customer=None,
typed_custom_fields=[],
custom_field_errors=None,
salesforce_record_id=None,
crm_record_url=None,
email_status_unavailable_reason=None,
email_true_status=None,
updated_email_true_status=True,
contact_rule_config_statuses=[],
source_display_name=None,
twitter_url=None,
contact_campaign_statuses=[],
state=None,
city=None,
country=None,
account=None,
contact_emails=[],
organization=None,
employment_history=[],
time_zone=None,
intent_strength=None,
show_intent=True,
phone_numbers=[],
account_phone_note=None,
free_domain=True,
is_likely_to_engage=True,
email_domain_catchall=True,
contact_job_change_event=None,
),
],
),
],
test_mock={
"search_people": lambda query, credentials: [
Contact(
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
linkedin_url="https://www.linkedin.com/in/johndoe",
title="Software Engineer",
organization_name="Google",
organization_id="123456",
contact_stage_id="1",
owner_id="1",
creator_id="1",
person_id="1",
email_needs_tickling=True,
source="apollo",
original_source="apollo",
headline="Software Engineer",
photo_url="https://www.linkedin.com/in/johndoe",
present_raw_address="123 Main St, Anytown, USA",
linkededin_uid="123456",
extrapolated_email_confidence=0.8,
salesforce_id="123456",
salesforce_lead_id="123456",
salesforce_contact_id="123456",
saleforce_account_id="123456",
crm_owner_id="123456",
created_at="2021-01-01",
emailer_campaign_ids=[],
direct_dial_status="active",
direct_dial_enrichment_failed_at="2021-01-01",
email_status="active",
email_source="apollo",
account_id="123456",
last_activity_date="2021-01-01",
hubspot_vid="123456",
hubspot_company_id="123456",
crm_id="123456",
sanitized_phone="123456",
merged_crm_ids="123456",
updated_at="2021-01-01",
queued_for_crm_push=True,
suggested_from_rule_engine_config_id="123456",
email_unsubscribed=None,
label_ids=[],
has_pending_email_arcgate_request=True,
has_email_arcgate_request=True,
existence_level=None,
email=None,
email_from_customer=None,
typed_custom_fields=[],
custom_field_errors=None,
salesforce_record_id=None,
crm_record_url=None,
email_status_unavailable_reason=None,
email_true_status=None,
updated_email_true_status=True,
contact_rule_config_statuses=[],
source_display_name=None,
twitter_url=None,
contact_campaign_statuses=[],
state=None,
city=None,
country=None,
account=None,
contact_emails=[],
organization=None,
employment_history=[],
time_zone=None,
intent_strength=None,
show_intent=True,
phone_numbers=[],
account_phone_note=None,
free_domain=True,
is_likely_to_engage=True,
email_domain_catchall=True,
contact_job_change_event=None,
),
]
},
)
@staticmethod
async def search_people(
query: SearchPeopleRequest, credentials: ApolloCredentials
) -> list[Contact]:
client = ApolloClient(credentials)
return await client.search_people(query)
async def run(
self,
input_data: Input,
*,
credentials: ApolloCredentials,
**kwargs,
) -> BlockOutput:
query = SearchPeopleRequest(**input_data.model_dump(exclude={"credentials"}))
people = await self.search_people(query, credentials)
for person in people:
yield "person", person
yield "people", people

View File

@@ -1,48 +1,11 @@
import enum
from typing import Any, List
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
from backend.data.model import SchemaField
from backend.util import json
from backend.util.file import store_media_file
from backend.util.mock import MockObject
from backend.util.type import MediaFileType, convert
from backend.util.text import TextFormatter
class FileStoreBlock(Block):
class Input(BlockSchema):
file_in: MediaFileType = SchemaField(
description="The file to store in the temporary directory, it can be a URL, data URI, or local path."
)
class Output(BlockSchema):
file_out: MediaFileType = SchemaField(
description="The relative path to the stored file in the temporary directory."
)
def __init__(self):
super().__init__(
id="cbb50872-625b-42f0-8203-a2ae78242d8a",
description="Stores the input file in the temporary directory.",
categories={BlockCategory.BASIC, BlockCategory.MULTIMEDIA},
input_schema=FileStoreBlock.Input,
output_schema=FileStoreBlock.Output,
static_output=True,
)
async def run(
self,
input_data: Input,
*,
graph_exec_id: str,
**kwargs,
) -> BlockOutput:
file_path = await store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.file_in,
return_content=False,
)
yield "file_out", file_path
formatter = TextFormatter()
class StoreValueBlock(Block):
@@ -84,16 +47,15 @@ class StoreValueBlock(Block):
static_output=True,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "output", input_data.data or input_data.input
class PrintToConsoleBlock(Block):
class Input(BlockSchema):
text: Any = SchemaField(description="The data to print to the console.")
text: str = SchemaField(description="The text to print to the console.")
class Output(BlockSchema):
output: Any = SchemaField(description="The data printed to the console.")
status: str = SchemaField(description="The status of the print operation.")
def __init__(self):
@@ -104,14 +66,11 @@ class PrintToConsoleBlock(Block):
input_schema=PrintToConsoleBlock.Input,
output_schema=PrintToConsoleBlock.Output,
test_input={"text": "Hello, World!"},
test_output=[
("output", "Hello, World!"),
("status", "printed"),
],
test_output=("status", "printed"),
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "output", input_data.text
def run(self, input_data: Input, **kwargs) -> BlockOutput:
print(">>>>> Print: ", input_data.text)
yield "status", "printed"
@@ -151,13 +110,10 @@ class FindInDictionaryBlock(Block):
categories={BlockCategory.BASIC},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
obj = input_data.input
key = input_data.key
if isinstance(obj, str):
obj = json.loads(obj)
if isinstance(obj, dict) and key in obj:
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, int) and 0 <= key < len(obj):
@@ -175,10 +131,190 @@ class FindInDictionaryBlock(Block):
yield "missing", input_data.input
class AgentInputBlock(Block):
"""
This block is used to provide input to the graph.
It takes in a value, name, description, default values list and bool to limit selection to default values.
It Outputs the value passed as input.
"""
class Input(BlockSchema):
name: str = SchemaField(description="The name of the input.")
value: Any = SchemaField(
description="The value to be passed as input.",
default=None,
)
title: str | None = SchemaField(
description="The title of the input.", default=None, advanced=True
)
description: str | None = SchemaField(
description="The description of the input.",
default=None,
advanced=True,
)
placeholder_values: List[Any] = SchemaField(
description="The placeholder values to be passed as input.",
default=[],
advanced=True,
)
limit_to_placeholder_values: bool = SchemaField(
description="Whether to limit the selection to placeholder values.",
default=False,
advanced=True,
)
advanced: bool = SchemaField(
description="Whether to show the input in the advanced section, if the field is not required.",
default=False,
advanced=True,
)
secret: bool = SchemaField(
description="Whether the input should be treated as a secret.",
default=False,
advanced=True,
)
class Output(BlockSchema):
result: Any = SchemaField(description="The value passed as input.")
def __init__(self):
super().__init__(
id="c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
description="This block is used to provide input to the graph.",
input_schema=AgentInputBlock.Input,
output_schema=AgentInputBlock.Output,
test_input=[
{
"value": "Hello, World!",
"name": "input_1",
"description": "This is a test input.",
"placeholder_values": [],
"limit_to_placeholder_values": False,
},
{
"value": "Hello, World!",
"name": "input_2",
"description": "This is a test input.",
"placeholder_values": ["Hello, World!"],
"limit_to_placeholder_values": True,
},
],
test_output=[
("result", "Hello, World!"),
("result", "Hello, World!"),
],
categories={BlockCategory.INPUT, BlockCategory.BASIC},
block_type=BlockType.INPUT,
static_output=True,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "result", input_data.value
class AgentOutputBlock(Block):
"""
Records the output of the graph for users to see.
Behavior:
If `format` is provided and the `value` is of a type that can be formatted,
the block attempts to format the recorded_value using the `format`.
If formatting fails or no `format` is provided, the raw `value` is output.
"""
class Input(BlockSchema):
value: Any = SchemaField(
description="The value to be recorded as output.",
default=None,
advanced=False,
)
name: str = SchemaField(description="The name of the output.")
title: str | None = SchemaField(
description="The title of the output.",
default=None,
advanced=True,
)
description: str | None = SchemaField(
description="The description of the output.",
default=None,
advanced=True,
)
format: str = SchemaField(
description="The format string to be used to format the recorded_value.",
default="",
advanced=True,
)
advanced: bool = SchemaField(
description="Whether to treat the output as advanced.",
default=False,
advanced=True,
)
secret: bool = SchemaField(
description="Whether the output should be treated as a secret.",
default=False,
advanced=True,
)
class Output(BlockSchema):
output: Any = SchemaField(description="The value recorded as output.")
def __init__(self):
super().__init__(
id="363ae599-353e-4804-937e-b2ee3cef3da4",
description="Stores the output of the graph for users to see.",
input_schema=AgentOutputBlock.Input,
output_schema=AgentOutputBlock.Output,
test_input=[
{
"value": "Hello, World!",
"name": "output_1",
"description": "This is a test output.",
"format": "{{ output_1 }}!!",
},
{
"value": "42",
"name": "output_2",
"description": "This is another test output.",
"format": "{{ output_2 }}",
},
{
"value": MockObject(value="!!", key="key"),
"name": "output_3",
"description": "This is a test output with a mock object.",
"format": "{{ output_3 }}",
},
],
test_output=[
("output", "Hello, World!!!"),
("output", "42"),
("output", MockObject(value="!!", key="key")),
],
categories={BlockCategory.OUTPUT, BlockCategory.BASIC},
block_type=BlockType.OUTPUT,
static_output=True,
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
"""
Attempts to format the recorded_value using the fmt_string if provided.
If formatting fails or no fmt_string is given, returns the original recorded_value.
"""
if input_data.format:
try:
yield "output", formatter.format_string(
input_data.format, {input_data.name: input_data.value}
)
except Exception as e:
yield "output", f"Error: {e}, {input_data.value}"
else:
yield "output", input_data.value
class AddToDictionaryBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(
default_factory=dict,
default={},
description="The dictionary to add the entry to. If not provided, a new dictionary will be created.",
)
key: str = SchemaField(
@@ -194,7 +330,7 @@ class AddToDictionaryBlock(Block):
advanced=False,
)
entries: dict[Any, Any] = SchemaField(
default_factory=dict,
default={},
description="The entries to add to the dictionary. This is the batch version of the `key` and `value` fields.",
advanced=True,
)
@@ -241,7 +377,7 @@ class AddToDictionaryBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
updated_dict = input_data.dictionary.copy()
if input_data.value is not None and input_data.key:
@@ -256,7 +392,7 @@ class AddToDictionaryBlock(Block):
class AddToListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(
default_factory=list,
default=[],
advanced=False,
description="The list to add the entry to. If not provided, a new list will be created.",
)
@@ -266,7 +402,7 @@ class AddToListBlock(Block):
default=None,
)
entries: List[Any] = SchemaField(
default_factory=lambda: list(),
default=[],
description="The entries to add to the list. This is the batch version of the `entry` field.",
advanced=True,
)
@@ -319,7 +455,7 @@ class AddToListBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
entries_added = input_data.entries.copy()
if input_data.entry:
entries_added.append(input_data.entry)
@@ -333,48 +469,6 @@ class AddToListBlock(Block):
yield "updated_list", updated_list
class FindInListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to search in.")
value: Any = SchemaField(description="The value to search for.")
class Output(BlockSchema):
index: int = SchemaField(description="The index of the value in the list.")
found: bool = SchemaField(
description="Whether the value was found in the list."
)
not_found_value: Any = SchemaField(
description="The value that was not found in the list."
)
def __init__(self):
super().__init__(
id="5e2c6d0a-1e37-489f-b1d0-8e1812b23333",
description="Finds the index of the value in the list.",
categories={BlockCategory.BASIC},
input_schema=FindInListBlock.Input,
output_schema=FindInListBlock.Output,
test_input=[
{"list": [1, 2, 3, 4, 5], "value": 3},
{"list": [1, 2, 3, 4, 5], "value": 6},
],
test_output=[
("index", 2),
("found", True),
("found", False),
("not_found_value", 6),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
yield "index", input_data.list.index(input_data.value)
yield "found", True
except ValueError:
yield "found", False
yield "not_found_value", input_data.value
class NoteBlock(Block):
class Input(BlockSchema):
text: str = SchemaField(description="The text to display in the sticky note.")
@@ -396,7 +490,7 @@ class NoteBlock(Block):
block_type=BlockType.NOTE,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "output", input_data.text
@@ -442,7 +536,7 @@ class CreateDictionaryBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
# The values are already validated by Pydantic schema
yield "dictionary", input_data.values
@@ -490,53 +584,9 @@ class CreateListBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
# The values are already validated by Pydantic schema
yield "list", input_data.values
except Exception as e:
yield "error", f"Failed to create list: {str(e)}"
class TypeOptions(enum.Enum):
STRING = "string"
NUMBER = "number"
BOOLEAN = "boolean"
LIST = "list"
DICTIONARY = "dictionary"
class UniversalTypeConverterBlock(Block):
class Input(BlockSchema):
value: Any = SchemaField(
description="The value to convert to a universal type."
)
type: TypeOptions = SchemaField(description="The type to convert the value to.")
class Output(BlockSchema):
value: Any = SchemaField(description="The converted value.")
def __init__(self):
super().__init__(
id="95d1b990-ce13-4d88-9737-ba5c2070c97b",
description="This block is used to convert a value to a universal type.",
categories={BlockCategory.BASIC},
input_schema=UniversalTypeConverterBlock.Input,
output_schema=UniversalTypeConverterBlock.Output,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
converted_value = convert(
input_data.value,
{
TypeOptions.STRING: str,
TypeOptions.NUMBER: float,
TypeOptions.BOOLEAN: bool,
TypeOptions.LIST: list,
TypeOptions.DICTIONARY: dict,
}[input_data.type],
)
yield "value", converted_value
except Exception as e:
yield "error", f"Failed to convert value: {str(e)}"

View File

@@ -38,7 +38,7 @@ class BlockInstallationBlock(Block):
disabled=True,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
code = input_data.code
if search := re.search(r"class (\w+)\(Block\):", code):
@@ -64,7 +64,7 @@ class BlockInstallationBlock(Block):
from backend.util.test import execute_block_test
await execute_block_test(block)
execute_block_test(block)
yield "success", "Block installed successfully."
except Exception as e:
os.remove(file_path)

View File

@@ -70,7 +70,7 @@ class ConditionBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
operator = input_data.operator
value1 = input_data.value1
@@ -107,83 +107,3 @@ class ConditionBlock(Block):
yield "yes_output", yes_value
else:
yield "no_output", no_value
class IfInputMatchesBlock(Block):
class Input(BlockSchema):
input: Any = SchemaField(
description="The input to match against",
placeholder="For example: 10 or 'hello' or True",
)
value: Any = SchemaField(
description="The value to output if the input matches",
placeholder="For example: 'Greater' or 20 or False",
)
yes_value: Any = SchemaField(
description="The value to output if the input matches",
placeholder="For example: 'Greater' or 20 or False",
default=None,
)
no_value: Any = SchemaField(
description="The value to output if the input does not match",
placeholder="For example: 'Greater' or 20 or False",
default=None,
)
class Output(BlockSchema):
result: bool = SchemaField(
description="The result of the condition evaluation (True or False)"
)
yes_output: Any = SchemaField(
description="The output value if the condition is true"
)
no_output: Any = SchemaField(
description="The output value if the condition is false"
)
def __init__(self):
super().__init__(
id="6dbbc4b3-ca6c-42b6-b508-da52d23e13f2",
input_schema=IfInputMatchesBlock.Input,
output_schema=IfInputMatchesBlock.Output,
description="Handles conditional logic based on comparison operators",
categories={BlockCategory.LOGIC},
test_input=[
{
"input": 10,
"value": 10,
"yes_value": "Greater",
"no_value": "Not greater",
},
{
"input": 10,
"value": 20,
"yes_value": "Greater",
"no_value": "Not greater",
},
{
"input": 10,
"value": None,
"yes_value": "Yes",
"no_value": "No",
},
],
test_output=[
("result", True),
("yes_output", "Greater"),
("result", False),
("no_output", "Not greater"),
("result", False),
("no_output", "No"),
# ("result", True),
# ("yes_output", "Yes"),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
if input_data.input == input_data.value or input_data.input is input_data.value:
yield "result", True
yield "yes_output", input_data.yes_value
else:
yield "result", False
yield "no_output", input_data.no_value

View File

@@ -1,7 +1,7 @@
from enum import Enum
from typing import Literal
from e2b_code_interpreter import AsyncSandbox
from e2b_code_interpreter import Sandbox
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
@@ -55,7 +55,7 @@ class CodeExecutionBlock(Block):
"These commands are executed with `sh`, in the foreground."
),
placeholder="pip install cowsay",
default_factory=list,
default=[],
advanced=False,
)
@@ -123,7 +123,7 @@ class CodeExecutionBlock(Block):
},
)
async def execute_code(
def execute_code(
self,
code: str,
language: ProgrammingLanguage,
@@ -135,21 +135,21 @@ class CodeExecutionBlock(Block):
try:
sandbox = None
if template_id:
sandbox = await AsyncSandbox.create(
sandbox = Sandbox(
template=template_id, api_key=api_key, timeout=timeout
)
else:
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
sandbox = Sandbox(api_key=api_key, timeout=timeout)
if not sandbox:
raise Exception("Sandbox not created")
# Running setup commands
for cmd in setup_commands:
await sandbox.commands.run(cmd)
sandbox.commands.run(cmd)
# Executing the code
execution = await sandbox.run_code(
execution = sandbox.run_code(
code,
language=language.value,
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
@@ -167,11 +167,11 @@ class CodeExecutionBlock(Block):
except Exception as e:
raise e
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
response, stdout_logs, stderr_logs = await self.execute_code(
response, stdout_logs, stderr_logs = self.execute_code(
input_data.code,
input_data.language,
input_data.setup_commands,
@@ -188,270 +188,3 @@ class CodeExecutionBlock(Block):
yield "stderr_logs", stderr_logs
except Exception as e:
yield "error", str(e)
class InstantiationBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.E2B], Literal["api_key"]
] = CredentialsField(
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
)
# Todo : Option to run commond in background
setup_commands: list[str] = SchemaField(
description=(
"Shell commands to set up the sandbox before running the code. "
"You can use `curl` or `git` to install your desired Debian based "
"package manager. `pip` and `npm` are pre-installed.\n\n"
"These commands are executed with `sh`, in the foreground."
),
placeholder="pip install cowsay",
default_factory=list,
advanced=False,
)
setup_code: str = SchemaField(
description="Code to execute in the sandbox",
placeholder="print('Hello, World!')",
default="",
advanced=False,
)
language: ProgrammingLanguage = SchemaField(
description="Programming language to execute",
default=ProgrammingLanguage.PYTHON,
advanced=False,
)
timeout: int = SchemaField(
description="Execution timeout in seconds", default=300
)
template_id: str = SchemaField(
description=(
"You can use an E2B sandbox template by entering its ID here. "
"Check out the E2B docs for more details: "
"[E2B - Sandbox template](https://e2b.dev/docs/sandbox-template)"
),
default="",
advanced=True,
)
class Output(BlockSchema):
sandbox_id: str = SchemaField(description="ID of the sandbox instance")
response: str = SchemaField(description="Response from code execution")
stdout_logs: str = SchemaField(
description="Standard output logs from execution"
)
stderr_logs: str = SchemaField(description="Standard error logs from execution")
error: str = SchemaField(description="Error message if execution failed")
def __init__(self):
super().__init__(
id="ff0861c9-1726-4aec-9e5b-bf53f3622112",
description="Instantiate an isolated sandbox environment with internet access where to execute code in.",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=InstantiationBlock.Input,
output_schema=InstantiationBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"setup_code": "print('Hello World')",
"language": ProgrammingLanguage.PYTHON.value,
"setup_commands": [],
"timeout": 300,
"template_id": "",
},
test_output=[
("sandbox_id", str),
("response", "Hello World"),
("stdout_logs", "Hello World\n"),
],
test_mock={
"execute_code": lambda setup_code, language, setup_commands, timeout, api_key, template_id: (
"sandbox_id",
"Hello World",
"Hello World\n",
"",
),
},
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
sandbox_id, response, stdout_logs, stderr_logs = await self.execute_code(
input_data.setup_code,
input_data.language,
input_data.setup_commands,
input_data.timeout,
credentials.api_key.get_secret_value(),
input_data.template_id,
)
if sandbox_id:
yield "sandbox_id", sandbox_id
else:
yield "error", "Sandbox ID not found"
if response:
yield "response", response
if stdout_logs:
yield "stdout_logs", stdout_logs
if stderr_logs:
yield "stderr_logs", stderr_logs
except Exception as e:
yield "error", str(e)
async def execute_code(
self,
code: str,
language: ProgrammingLanguage,
setup_commands: list[str],
timeout: int,
api_key: str,
template_id: str,
):
try:
sandbox = None
if template_id:
sandbox = await AsyncSandbox.create(
template=template_id, api_key=api_key, timeout=timeout
)
else:
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
if not sandbox:
raise Exception("Sandbox not created")
# Running setup commands
for cmd in setup_commands:
await sandbox.commands.run(cmd)
# Executing the code
execution = await sandbox.run_code(
code,
language=language.value,
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
)
if execution.error:
raise Exception(execution.error)
response = execution.text
stdout_logs = "".join(execution.logs.stdout)
stderr_logs = "".join(execution.logs.stderr)
return sandbox.sandbox_id, response, stdout_logs, stderr_logs
except Exception as e:
raise e
class StepExecutionBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.E2B], Literal["api_key"]
] = CredentialsField(
description="Enter your api key for the E2B Sandbox. You can get it in here - https://e2b.dev/docs",
)
sandbox_id: str = SchemaField(
description="ID of the sandbox instance to execute the code in",
advanced=False,
)
step_code: str = SchemaField(
description="Code to execute in the sandbox",
placeholder="print('Hello, World!')",
default="",
advanced=False,
)
language: ProgrammingLanguage = SchemaField(
description="Programming language to execute",
default=ProgrammingLanguage.PYTHON,
advanced=False,
)
class Output(BlockSchema):
response: str = SchemaField(description="Response from code execution")
stdout_logs: str = SchemaField(
description="Standard output logs from execution"
)
stderr_logs: str = SchemaField(description="Standard error logs from execution")
error: str = SchemaField(description="Error message if execution failed")
def __init__(self):
super().__init__(
id="82b59b8e-ea10-4d57-9161-8b169b0adba6",
description="Execute code in a previously instantiated sandbox environment.",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=StepExecutionBlock.Input,
output_schema=StepExecutionBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"sandbox_id": "sandbox_id",
"step_code": "print('Hello World')",
"language": ProgrammingLanguage.PYTHON.value,
},
test_output=[
("response", "Hello World"),
("stdout_logs", "Hello World\n"),
],
test_mock={
"execute_step_code": lambda sandbox_id, step_code, language, api_key: (
"Hello World",
"Hello World\n",
"",
),
},
)
async def execute_step_code(
self,
sandbox_id: str,
code: str,
language: ProgrammingLanguage,
api_key: str,
):
try:
sandbox = await AsyncSandbox.connect(sandbox_id=sandbox_id, api_key=api_key)
if not sandbox:
raise Exception("Sandbox not found")
# Executing the code
execution = await sandbox.run_code(code, language=language.value)
if execution.error:
raise Exception(execution.error)
response = execution.text
stdout_logs = "".join(execution.logs.stdout)
stderr_logs = "".join(execution.logs.stderr)
return response, stdout_logs, stderr_logs
except Exception as e:
raise e
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
response, stdout_logs, stderr_logs = await self.execute_step_code(
input_data.sandbox_id,
input_data.step_code,
input_data.language,
credentials.api_key.get_secret_value(),
)
if response:
yield "response", response
if stdout_logs:
yield "stdout_logs", stdout_logs
if stderr_logs:
yield "stderr_logs", stderr_logs
except Exception as e:
yield "error", str(e)

View File

@@ -49,7 +49,7 @@ class CodeExtractionBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
# List of supported programming languages with mapped aliases
language_aliases = {
"html": ["html", "htm"],

View File

@@ -8,7 +8,6 @@ from backend.data.block import (
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.compass import CompassWebhookType
@@ -43,7 +42,7 @@ class CompassAITriggerBlock(Block):
input_schema=CompassAITriggerBlock.Input,
output_schema=CompassAITriggerBlock.Output,
webhook_config=BlockManualWebhookConfig(
provider=ProviderName.COMPASS,
provider="compass",
webhook_type=CompassWebhookType.TRANSCRIPTION,
),
test_input=[
@@ -56,5 +55,5 @@ class CompassAITriggerBlock(Block):
# ],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "transcription", input_data.payload.transcription

View File

@@ -30,7 +30,7 @@ class WordCharacterCountBlock(Block):
test_output=[("word_count", 4), ("character_count", 19)],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
text = input_data.text
word_count = len(text.split())

View File

@@ -34,7 +34,7 @@ class ReadCsvBlock(Block):
)
skip_columns: list[str] = SchemaField(
description="The columns to skip from the start of the row",
default_factory=list,
default=[],
)
class Output(BlockSchema):
@@ -69,7 +69,7 @@ class ReadCsvBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
import csv
from io import StringIO

View File

@@ -34,6 +34,6 @@ This is a "quoted" string.""",
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
decoded_text = codecs.decode(input_data.text, "unicode_escape")
yield "decoded_text", decoded_text

View File

@@ -1,3 +1,4 @@
import asyncio
from typing import Literal
import aiohttp
@@ -73,11 +74,7 @@ class ReadDiscordMessagesBlock(Block):
("username", "test_user"),
],
test_mock={
"run_bot": lambda token: {
"output_data": "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.",
"channel_name": "general",
"username": "test_user",
}
"run_bot": lambda token: asyncio.Future() # Create a Future object for mocking
},
)
@@ -109,24 +106,37 @@ class ReadDiscordMessagesBlock(Block):
if attachment.filename.endswith((".txt", ".py")):
async with aiohttp.ClientSession() as session:
async with session.get(attachment.url) as response:
file_content = response.text()
file_content = await response.text()
self.output_data += f"\n\nFile from user: {attachment.filename}\nContent: {file_content}"
await client.close()
await client.start(token.get_secret_value())
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
async for output_name, output_value in self.__run(input_data, credentials):
yield output_name, output_value
while True:
for output_name, output_value in self.__run(input_data, credentials):
yield output_name, output_value
break
async def __run(
self, input_data: Input, credentials: APIKeyCredentials
) -> BlockOutput:
def __run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput:
try:
result = await self.run_bot(credentials.api_key)
loop = asyncio.get_event_loop()
future = self.run_bot(credentials.api_key)
# If it's a Future (mock), set the result
if isinstance(future, asyncio.Future):
future.set_result(
{
"output_data": "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.",
"channel_name": "general",
"username": "test_user",
}
)
result = loop.run_until_complete(future)
# For testing purposes, use the mocked result
if isinstance(result, dict):
@@ -180,7 +190,7 @@ class SendDiscordMessageBlock(Block):
},
test_output=[("status", "Message sent")],
test_mock={
"send_message": lambda token, channel_name, message_content: "Message sent"
"send_message": lambda token, channel_name, message_content: asyncio.Future()
},
test_credentials=TEST_CREDENTIALS,
)
@@ -212,16 +222,23 @@ class SendDiscordMessageBlock(Block):
"""Splits a message into chunks not exceeding the Discord limit."""
return [message[i : i + limit] for i in range(0, len(message), limit)]
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self.send_message(
loop = asyncio.get_event_loop()
future = self.send_message(
credentials.api_key.get_secret_value(),
input_data.channel_name,
input_data.message_content,
)
# If it's a Future (mock), set the result
if isinstance(future, asyncio.Future):
future.set_result("Message sent")
result = loop.run_until_complete(future)
# For testing purposes, use the mocked result
if isinstance(result, str):
self.output_data = result

View File

@@ -1,53 +1,22 @@
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Literal
from pydantic import BaseModel, ConfigDict, SecretStr
from pydantic import BaseModel, ConfigDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
CredentialsField,
CredentialsMetaInput,
SchemaField,
UserPasswordCredentials,
)
from backend.integrations.providers import ProviderName
TEST_CREDENTIALS = UserPasswordCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="smtp",
username=SecretStr("mock-smtp-username"),
password=SecretStr("mock-smtp-password"),
title="Mock SMTP credentials",
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
SMTPCredentials = UserPasswordCredentials
SMTPCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.SMTP],
Literal["user_password"],
]
from backend.data.model import BlockSecret, SchemaField, SecretField
def SMTPCredentialsField() -> SMTPCredentialsInput:
return CredentialsField(
description="The SMTP integration requires a username and password.",
)
class SMTPConfig(BaseModel):
class EmailCredentials(BaseModel):
smtp_server: str = SchemaField(
default="smtp.example.com", description="SMTP server address"
default="smtp.gmail.com", description="SMTP server address"
)
smtp_port: int = SchemaField(default=25, description="SMTP port number")
smtp_username: BlockSecret = SecretField(key="smtp_username")
smtp_password: BlockSecret = SecretField(key="smtp_password")
model_config = ConfigDict(title="SMTP Config")
model_config = ConfigDict(title="Email Credentials")
class SendEmailBlock(Block):
@@ -61,11 +30,10 @@ class SendEmailBlock(Block):
body: str = SchemaField(
description="Body of the email", placeholder="Enter the email body"
)
config: SMTPConfig = SchemaField(
description="SMTP Config",
default=SMTPConfig(),
creds: EmailCredentials = SchemaField(
description="SMTP credentials",
default=EmailCredentials(),
)
credentials: SMTPCredentialsInput = SMTPCredentialsField()
class Output(BlockSchema):
status: str = SchemaField(description="Status of the email sending operation")
@@ -75,6 +43,7 @@ class SendEmailBlock(Block):
def __init__(self):
super().__init__(
disabled=True,
id="4335878a-394e-4e67-adf2-919877ff49ae",
description="This block sends an email using the provided SMTP credentials.",
categories={BlockCategory.OUTPUT},
@@ -84,29 +53,25 @@ class SendEmailBlock(Block):
"to_email": "recipient@example.com",
"subject": "Test Email",
"body": "This is a test email.",
"config": {
"creds": {
"smtp_server": "smtp.gmail.com",
"smtp_port": 25,
"smtp_username": "your-email@gmail.com",
"smtp_password": "your-gmail-password",
},
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[("status", "Email sent successfully")],
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
)
@staticmethod
def send_email(
config: SMTPConfig,
to_email: str,
subject: str,
body: str,
credentials: SMTPCredentials,
creds: EmailCredentials, to_email: str, subject: str, body: str
) -> str:
smtp_server = config.smtp_server
smtp_port = config.smtp_port
smtp_username = credentials.username.get_secret_value()
smtp_password = credentials.password.get_secret_value()
smtp_server = creds.smtp_server
smtp_port = creds.smtp_port
smtp_username = creds.smtp_username.get_secret_value()
smtp_password = creds.smtp_password.get_secret_value()
msg = MIMEMultipart()
msg["From"] = smtp_username
@@ -121,13 +86,10 @@ class SendEmailBlock(Block):
return "Email sent successfully"
async def run(
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "status", self.send_email(
config=input_data.config,
to_email=input_data.to_email,
subject=input_data.subject,
body=input_data.body,
credentials=credentials,
input_data.creds,
input_data.to_email,
input_data.subject,
input_data.body,
)

View File

@@ -1,4 +1,4 @@
from typing import List
from typing import List, Optional
from pydantic import BaseModel
@@ -9,16 +9,16 @@ from backend.blocks.exa._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
text: Optional[dict] = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
highlights: Optional[dict] = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
@@ -27,7 +27,7 @@ class ContentRetrievalSettings(BaseModel):
},
advanced=True,
)
summary: dict = SchemaField(
summary: Optional[dict] = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
@@ -49,9 +49,8 @@ class ExaContentsBlock(Block):
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default_factory=list,
default=[],
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
@@ -62,7 +61,7 @@ class ExaContentsBlock(Block):
output_schema=ExaContentsBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
@@ -79,8 +78,10 @@ class ExaContentsBlock(Block):
}
try:
response = await Requests().post(url, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []

View File

@@ -9,7 +9,7 @@ from backend.blocks.exa._auth import (
from backend.blocks.exa.helpers import ContentSettings
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class ExaSearchBlock(Block):
@@ -38,11 +38,11 @@ class ExaSearchBlock(Block):
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default_factory=list,
default=[],
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default_factory=list,
default=[],
advanced=True,
)
start_crawl_date: datetime = SchemaField(
@@ -59,12 +59,12 @@ class ExaSearchBlock(Block):
)
include_text: List[str] = SchemaField(
description="Text patterns to include",
default_factory=list,
default=[],
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude",
default_factory=list,
default=[],
advanced=True,
)
contents: ContentSettings = SchemaField(
@@ -76,10 +76,7 @@ class ExaSearchBlock(Block):
class Output(BlockSchema):
results: list = SchemaField(
description="List of search results",
default_factory=list,
)
error: str = SchemaField(
description="Error message if the request failed",
default=[],
)
def __init__(self):
@@ -91,7 +88,7 @@ class ExaSearchBlock(Block):
output_schema=ExaSearchBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/search"
@@ -136,9 +133,11 @@ class ExaSearchBlock(Block):
payload[api_field] = value
try:
response = await Requests().post(url, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
# Extract just the results array from the response
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []

View File

@@ -8,7 +8,7 @@ from backend.blocks.exa._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
from .helpers import ContentSettings
@@ -26,12 +26,12 @@ class ExaFindSimilarBlock(Block):
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default_factory=list,
default=[],
advanced=True,
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default_factory=list,
default=[],
advanced=True,
)
start_crawl_date: datetime = SchemaField(
@@ -48,12 +48,12 @@ class ExaFindSimilarBlock(Block):
)
include_text: List[str] = SchemaField(
description="Text patterns to include (max 1 string, up to 5 words)",
default_factory=list,
default=[],
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude (max 1 string, up to 5 words)",
default_factory=list,
default=[],
advanced=True,
)
contents: ContentSettings = SchemaField(
@@ -65,9 +65,8 @@ class ExaFindSimilarBlock(Block):
class Output(BlockSchema):
results: List[Any] = SchemaField(
description="List of similar documents with title, URL, published date, author, and score",
default_factory=list,
default=[],
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
@@ -78,7 +77,7 @@ class ExaFindSimilarBlock(Block):
output_schema=ExaFindSimilarBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/findSimilar"
@@ -120,8 +119,10 @@ class ExaFindSimilarBlock(Block):
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
try:
response = await Requests().post(url, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []

View File

@@ -1,8 +1,10 @@
import asyncio
import logging
import time
from enum import Enum
from typing import Any
import httpx
from backend.blocks.fal._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
@@ -12,7 +14,6 @@ from backend.blocks.fal._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import ClientResponseError, Requests
logger = logging.getLogger(__name__)
@@ -20,7 +21,6 @@ logger = logging.getLogger(__name__)
class FalModel(str, Enum):
MOCHI = "fal-ai/mochi-v1"
LUMA = "fal-ai/luma-dream-machine"
VEO3 = "fal-ai/veo3"
class AIVideoGeneratorBlock(Block):
@@ -42,7 +42,7 @@ class AIVideoGeneratorBlock(Block):
description="Error message if video generation failed."
)
logs: list[str] = SchemaField(
description="Generation progress logs.",
description="Generation progress logs.", optional=True
)
def __init__(self):
@@ -65,37 +65,35 @@ class AIVideoGeneratorBlock(Block):
)
def _get_headers(self, api_key: str) -> dict[str, str]:
"""Get headers for FAL API Requests."""
"""Get headers for FAL API requests."""
return {
"Authorization": f"Key {api_key}",
"Content-Type": "application/json",
}
async def _submit_request(
def _submit_request(
self, url: str, headers: dict[str, str], data: dict[str, Any]
) -> dict[str, Any]:
"""Submit a request to the FAL API."""
try:
response = await Requests().post(url, headers=headers, json=data)
response = httpx.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except ClientResponseError as e:
except httpx.HTTPError as e:
logger.error(f"FAL API request failed: {str(e)}")
raise RuntimeError(f"Failed to submit request: {str(e)}")
async def _poll_status(
self, status_url: str, headers: dict[str, str]
) -> dict[str, Any]:
def _poll_status(self, status_url: str, headers: dict[str, str]) -> dict[str, Any]:
"""Poll the status endpoint until completion or failure."""
try:
response = await Requests().get(status_url, headers=headers)
response = httpx.get(status_url, headers=headers)
response.raise_for_status()
return response.json()
except ClientResponseError as e:
except httpx.HTTPError as e:
logger.error(f"Failed to get status: {str(e)}")
raise RuntimeError(f"Failed to get status: {str(e)}")
async def generate_video(
self, input_data: Input, credentials: FalCredentials
) -> str:
def generate_video(self, input_data: Input, credentials: FalCredentials) -> str:
"""Generate video using the specified FAL model."""
base_url = "https://queue.fal.run"
api_key = credentials.api_key.get_secret_value()
@@ -104,16 +102,13 @@ class AIVideoGeneratorBlock(Block):
# Submit generation request
submit_url = f"{base_url}/{input_data.model.value}"
submit_data = {"prompt": input_data.prompt}
if input_data.model == FalModel.VEO3:
submit_data["generate_audio"] = True # type: ignore
seen_logs = set()
try:
# Submit request to queue
submit_response = await Requests().post(
submit_url, headers=headers, json=submit_data
)
submit_response = httpx.post(submit_url, headers=headers, json=submit_data)
submit_response.raise_for_status()
request_data = submit_response.json()
# Get request_id and urls from initial response
@@ -124,23 +119,14 @@ class AIVideoGeneratorBlock(Block):
if not all([request_id, status_url, result_url]):
raise ValueError("Missing required data in submission response")
# Ensure status_url is a string
if not isinstance(status_url, str):
raise ValueError("Invalid status URL format")
# Ensure result_url is a string
if not isinstance(result_url, str):
raise ValueError("Invalid result URL format")
# Poll for status with exponential backoff
max_attempts = 30
attempt = 0
base_wait_time = 5
while attempt < max_attempts:
status_response = await Requests().get(
f"{status_url}?logs=1", headers=headers
)
status_response = httpx.get(f"{status_url}?logs=1", headers=headers)
status_response.raise_for_status()
status_data = status_response.json()
# Process new logs only
@@ -163,7 +149,8 @@ class AIVideoGeneratorBlock(Block):
status = status_data.get("status")
if status == "COMPLETED":
# Get the final result
result_response = await Requests().get(result_url, headers=headers)
result_response = httpx.get(result_url, headers=headers)
result_response.raise_for_status()
result_data = result_response.json()
if "video" not in result_data or not isinstance(
@@ -172,8 +159,8 @@ class AIVideoGeneratorBlock(Block):
raise ValueError("Invalid response format - missing video data")
video_url = result_data["video"].get("url")
if not video_url or not isinstance(video_url, str):
raise ValueError("No valid video URL in response")
if not video_url:
raise ValueError("No video URL in response")
return video_url
@@ -193,19 +180,19 @@ class AIVideoGeneratorBlock(Block):
logger.info(f"[FAL Generation] Status: Unknown status: {status}")
wait_time = min(base_wait_time * (2**attempt), 60) # Cap at 60 seconds
await asyncio.sleep(wait_time)
time.sleep(wait_time)
attempt += 1
raise RuntimeError("Maximum polling attempts reached")
except ClientResponseError as e:
except httpx.HTTPError as e:
raise RuntimeError(f"API request failed: {str(e)}")
async def run(
def run(
self, input_data: Input, *, credentials: FalCredentials, **kwargs
) -> BlockOutput:
try:
video_url = await self.generate_video(input_data, credentials)
video_url = self.generate_video(input_data, credentials)
yield "video_url", video_url
except Exception as e:
error_message = str(e)

View File

@@ -1,174 +0,0 @@
from enum import Enum
from typing import Literal, Optional
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.file import MediaFileType
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="replicate",
api_key=SecretStr("mock-replicate-api-key"),
title="Mock Replicate API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class FluxKontextModelName(str, Enum):
PRO = "Flux Kontext Pro"
MAX = "Flux Kontext Max"
@property
def api_name(self) -> str:
return f"black-forest-labs/flux-kontext-{self.name.lower()}"
class AspectRatio(str, Enum):
MATCH_INPUT_IMAGE = "match_input_image"
ASPECT_1_1 = "1:1"
ASPECT_16_9 = "16:9"
ASPECT_9_16 = "9:16"
ASPECT_4_3 = "4:3"
ASPECT_3_4 = "3:4"
ASPECT_3_2 = "3:2"
ASPECT_2_3 = "2:3"
ASPECT_4_5 = "4:5"
ASPECT_5_4 = "5:4"
ASPECT_21_9 = "21:9"
ASPECT_9_21 = "9:21"
ASPECT_2_1 = "2:1"
ASPECT_1_2 = "1:2"
class AIImageEditorBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.REPLICATE], Literal["api_key"]
] = CredentialsField(
description="Replicate API key with permissions for Flux Kontext models",
)
prompt: str = SchemaField(
description="Text instruction describing the desired edit",
title="Prompt",
)
input_image: Optional[MediaFileType] = SchemaField(
description="Reference image URI (jpeg, png, gif, webp)",
default=None,
title="Input Image",
)
aspect_ratio: AspectRatio = SchemaField(
description="Aspect ratio of the generated image",
default=AspectRatio.MATCH_INPUT_IMAGE,
title="Aspect Ratio",
advanced=False,
)
seed: Optional[int] = SchemaField(
description="Random seed. Set for reproducible generation",
default=None,
title="Seed",
advanced=True,
)
model: FluxKontextModelName = SchemaField(
description="Model variant to use",
default=FluxKontextModelName.PRO,
title="Model",
)
class Output(BlockSchema):
output_image: MediaFileType = SchemaField(
description="URL of the transformed image"
)
error: str = SchemaField(description="Error message if generation failed")
def __init__(self):
super().__init__(
id="3fd9c73d-4370-4925-a1ff-1b86b99fabfa",
description=(
"Edit images using BlackForest Labs' Flux Kontext models. Provide a prompt "
"and optional reference image to generate a modified image."
),
categories={BlockCategory.AI, BlockCategory.MULTIMEDIA},
input_schema=AIImageEditorBlock.Input,
output_schema=AIImageEditorBlock.Output,
test_input={
"prompt": "Add a hat to the cat",
"input_image": "https://example.com/cat.png",
"aspect_ratio": AspectRatio.MATCH_INPUT_IMAGE,
"seed": None,
"model": FluxKontextModelName.PRO,
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[
("output_image", "https://replicate.com/output/edited-image.png"),
],
test_mock={
"run_model": lambda *args, **kwargs: "https://replicate.com/output/edited-image.png",
},
test_credentials=TEST_CREDENTIALS,
)
async def run(
self,
input_data: Input,
*,
credentials: APIKeyCredentials,
**kwargs,
) -> BlockOutput:
result = await self.run_model(
api_key=credentials.api_key,
model_name=input_data.model.api_name,
prompt=input_data.prompt,
input_image=input_data.input_image,
aspect_ratio=input_data.aspect_ratio.value,
seed=input_data.seed,
)
yield "output_image", result
async def run_model(
self,
api_key: SecretStr,
model_name: str,
prompt: str,
input_image: Optional[MediaFileType],
aspect_ratio: str,
seed: Optional[int],
) -> MediaFileType:
client = ReplicateClient(api_token=api_key.get_secret_value())
input_params = {
"prompt": prompt,
"input_image": input_image,
"aspect_ratio": aspect_ratio,
**({"seed": seed} if seed is not None else {}),
}
output: FileOutput | list[FileOutput] = await client.async_run( # type: ignore
model_name,
input=input_params,
wait=False,
)
if isinstance(output, list) and output:
output = output[0]
if isinstance(output, FileOutput):
return MediaFileType(output.url)
if isinstance(output, str):
return MediaFileType(output)
raise ValueError("No output received")

View File

@@ -1,51 +0,0 @@
from backend.data.block import (
Block,
BlockCategory,
BlockManualWebhookConfig,
BlockOutput,
BlockSchema,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks.generic import GenericWebhookType
class GenericWebhookTriggerBlock(Block):
class Input(BlockSchema):
payload: dict = SchemaField(hidden=True, default_factory=dict)
constants: dict = SchemaField(
description="The constants to be set when the block is put on the graph",
default_factory=dict,
)
class Output(BlockSchema):
payload: dict = SchemaField(
description="The complete webhook payload that was received from the generic webhook."
)
constants: dict = SchemaField(
description="The constants to be set when the block is put on the graph"
)
example_payload = {"message": "Hello, World!"}
def __init__(self):
super().__init__(
id="8fa8c167-2002-47ce-aba8-97572fc5d387",
description="This block will output the contents of the generic input for the webhook.",
categories={BlockCategory.INPUT},
input_schema=GenericWebhookTriggerBlock.Input,
output_schema=GenericWebhookTriggerBlock.Output,
webhook_config=BlockManualWebhookConfig(
provider=ProviderName.GENERIC_WEBHOOK,
webhook_type=GenericWebhookType.PLAIN,
),
test_input={"constants": {"key": "value"}, "payload": self.example_payload},
test_output=[
("constants", {"key": "value"}),
("payload", self.example_payload),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "constants", input_data.constants
yield "payload", input_data.payload

View File

@@ -1,30 +1,16 @@
from typing import overload
from urllib.parse import urlparse
from backend.blocks.github._auth import (
GithubCredentials,
GithubFineGrainedAPICredentials,
)
from backend.util.request import URL, Requests
from backend.blocks.github._auth import GithubCredentials
from backend.util.request import Requests
@overload
def _convert_to_api_url(url: str) -> str: ...
@overload
def _convert_to_api_url(url: URL) -> URL: ...
def _convert_to_api_url(url: str | URL) -> str | URL:
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
if url_as_str := isinstance(url, str):
url = urlparse(url)
path_parts = url.path.strip("/").split("/")
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
@@ -39,73 +25,17 @@ def _convert_to_api_url(url: str | URL) -> str | URL:
else:
raise ValueError("Invalid GitHub URL format.")
return api_url if url_as_str else urlparse(api_url)
return api_url
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
return {
"Authorization": credentials.auth_header(),
"Authorization": credentials.bearer(),
"Accept": "application/vnd.github.v3+json",
}
def convert_comment_url_to_api_endpoint(comment_url: str) -> str:
"""
Converts a GitHub comment URL (web interface) to the appropriate API endpoint URL.
Handles:
1. Issue/PR comments: #issuecomment-{id}
2. PR review comments: #discussion_r{id}
Returns the appropriate API endpoint path for the comment.
"""
# First, check if this is already an API URL
parsed_url = urlparse(comment_url)
if parsed_url.hostname == "api.github.com":
return comment_url
# Replace pull with issues for comment endpoints
if "/pull/" in comment_url:
comment_url = comment_url.replace("/pull/", "/issues/")
# Handle issue/PR comments (#issuecomment-xxx)
if "#issuecomment-" in comment_url:
base_url, comment_part = comment_url.split("#issuecomment-")
comment_id = comment_part
# Extract repo information from base URL
parsed_url = urlparse(base_url)
path_parts = parsed_url.path.strip("/").split("/")
owner, repo = path_parts[0], path_parts[1]
# Construct API URL for issue comments
return (
f"https://api.github.com/repos/{owner}/{repo}/issues/comments/{comment_id}"
)
# Handle PR review comments (#discussion_r)
elif "#discussion_r" in comment_url:
base_url, comment_part = comment_url.split("#discussion_r")
comment_id = comment_part
# Extract repo information from base URL
parsed_url = urlparse(base_url)
path_parts = parsed_url.path.strip("/").split("/")
owner, repo = path_parts[0], path_parts[1]
# Construct API URL for PR review comments
return (
f"https://api.github.com/repos/{owner}/{repo}/pulls/comments/{comment_id}"
)
# If no specific comment identifiers are found, use the general URL conversion
return _convert_to_api_url(comment_url)
def get_api(
credentials: GithubCredentials | GithubFineGrainedAPICredentials,
convert_urls: bool = True,
) -> Requests:
def get_api(credentials: GithubCredentials, convert_urls: bool = True) -> Requests:
return Requests(
trusted_origins=["https://api.github.com", "https://github.com"],
extra_url_validator=_convert_to_api_url if convert_urls else None,

View File

@@ -22,11 +22,6 @@ GithubCredentialsInput = CredentialsMetaInput[
Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"],
]
GithubFineGrainedAPICredentials = APIKeyCredentials
GithubFineGrainedAPICredentialsInput = CredentialsMetaInput[
Literal[ProviderName.GITHUB], Literal["api_key"]
]
def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
"""
@@ -42,16 +37,6 @@ def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
)
def GithubFineGrainedAPICredentialsField(
scope: str,
) -> GithubFineGrainedAPICredentialsInput:
return CredentialsField(
required_scopes={scope},
description="The GitHub integration can be used with OAuth, "
"or any API key with sufficient permissions for the blocks it is used on.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="github",
@@ -65,18 +50,3 @@ TEST_CREDENTIALS_INPUT = {
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
TEST_FINE_GRAINED_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="github",
api_key=SecretStr("mock-github-api-key"),
title="Mock GitHub API key",
expires_at=None,
)
TEST_FINE_GRAINED_CREDENTIALS_INPUT = {
"provider": TEST_FINE_GRAINED_CREDENTIALS.provider,
"id": TEST_FINE_GRAINED_CREDENTIALS.id,
"type": TEST_FINE_GRAINED_CREDENTIALS.type,
"title": TEST_FINE_GRAINED_CREDENTIALS.type,
}

View File

@@ -1,360 +0,0 @@
from enum import Enum
from typing import Optional
from pydantic import BaseModel
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from ._api import get_api
from ._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
GithubCredentials,
GithubCredentialsField,
GithubCredentialsInput,
)
# queued, in_progress, completed, waiting, requested, pending
class ChecksStatus(Enum):
QUEUED = "queued"
IN_PROGRESS = "in_progress"
COMPLETED = "completed"
WAITING = "waiting"
REQUESTED = "requested"
PENDING = "pending"
class ChecksConclusion(Enum):
SUCCESS = "success"
FAILURE = "failure"
NEUTRAL = "neutral"
CANCELLED = "cancelled"
TIMED_OUT = "timed_out"
ACTION_REQUIRED = "action_required"
SKIPPED = "skipped"
class GithubCreateCheckRunBlock(Block):
"""Block for creating a new check run on a GitHub repository."""
class Input(BlockSchema):
credentials: GithubCredentialsInput = GithubCredentialsField("repo:status")
repo_url: str = SchemaField(
description="URL of the GitHub repository",
placeholder="https://github.com/owner/repo",
)
name: str = SchemaField(
description="The name of the check run (e.g., 'code-coverage')",
)
head_sha: str = SchemaField(
description="The SHA of the commit to check",
)
status: ChecksStatus = SchemaField(
description="Current status of the check run",
default=ChecksStatus.QUEUED,
)
conclusion: Optional[ChecksConclusion] = SchemaField(
description="The final conclusion of the check (required if status is completed)",
default=None,
)
details_url: str = SchemaField(
description="The URL for the full details of the check",
default="",
)
output_title: str = SchemaField(
description="Title of the check run output",
default="",
)
output_summary: str = SchemaField(
description="Summary of the check run output",
default="",
)
output_text: str = SchemaField(
description="Detailed text of the check run output",
default="",
)
class Output(BlockSchema):
class CheckRunResult(BaseModel):
id: int
html_url: str
status: str
check_run: CheckRunResult = SchemaField(
description="Details of the created check run"
)
error: str = SchemaField(
description="Error message if check run creation failed"
)
def __init__(self):
super().__init__(
id="2f45e89a-3b7d-4f22-b89e-6c4f5c7e1234",
description="Creates a new check run for a specific commit in a GitHub repository",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=GithubCreateCheckRunBlock.Input,
output_schema=GithubCreateCheckRunBlock.Output,
test_input={
"repo_url": "https://github.com/owner/repo",
"name": "test-check",
"head_sha": "ce587453ced02b1526dfb4cb910479d431683101",
"status": ChecksStatus.COMPLETED.value,
"conclusion": ChecksConclusion.SUCCESS.value,
"output_title": "Test Results",
"output_summary": "All tests passed",
"credentials": TEST_CREDENTIALS_INPUT,
},
# requires a github app not available to oauth in our current system
disabled=True,
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"check_run",
{
"id": 4,
"html_url": "https://github.com/owner/repo/runs/4",
"status": "completed",
},
),
],
test_mock={
"create_check_run": lambda *args, **kwargs: {
"id": 4,
"html_url": "https://github.com/owner/repo/runs/4",
"status": "completed",
}
},
)
@staticmethod
async def create_check_run(
credentials: GithubCredentials,
repo_url: str,
name: str,
head_sha: str,
status: ChecksStatus,
conclusion: Optional[ChecksConclusion] = None,
details_url: Optional[str] = None,
output_title: Optional[str] = None,
output_summary: Optional[str] = None,
output_text: Optional[str] = None,
) -> dict:
api = get_api(credentials)
class CheckRunData(BaseModel):
name: str
head_sha: str
status: str
conclusion: Optional[str] = None
details_url: Optional[str] = None
output: Optional[dict[str, str]] = None
data = CheckRunData(
name=name,
head_sha=head_sha,
status=status.value,
)
if conclusion:
data.conclusion = conclusion.value
if details_url:
data.details_url = details_url
if output_title or output_summary or output_text:
output_data = {
"title": output_title or "",
"summary": output_summary or "",
"text": output_text or "",
}
data.output = output_data
check_runs_url = f"{repo_url}/check-runs"
response = await api.post(
check_runs_url, data=data.model_dump_json(exclude_none=True)
)
result = response.json()
return {
"id": result["id"],
"html_url": result["html_url"],
"status": result["status"],
}
async def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
try:
result = await self.create_check_run(
credentials=credentials,
repo_url=input_data.repo_url,
name=input_data.name,
head_sha=input_data.head_sha,
status=input_data.status,
conclusion=input_data.conclusion,
details_url=input_data.details_url,
output_title=input_data.output_title,
output_summary=input_data.output_summary,
output_text=input_data.output_text,
)
yield "check_run", result
except Exception as e:
yield "error", str(e)
class GithubUpdateCheckRunBlock(Block):
"""Block for updating an existing check run on a GitHub repository."""
class Input(BlockSchema):
credentials: GithubCredentialsInput = GithubCredentialsField("repo:status")
repo_url: str = SchemaField(
description="URL of the GitHub repository",
placeholder="https://github.com/owner/repo",
)
check_run_id: int = SchemaField(
description="The ID of the check run to update",
)
status: ChecksStatus = SchemaField(
description="New status of the check run",
)
conclusion: ChecksConclusion = SchemaField(
description="The final conclusion of the check (required if status is completed)",
)
output_title: Optional[str] = SchemaField(
description="New title of the check run output",
default=None,
)
output_summary: Optional[str] = SchemaField(
description="New summary of the check run output",
default=None,
)
output_text: Optional[str] = SchemaField(
description="New detailed text of the check run output",
default=None,
)
class Output(BlockSchema):
class CheckRunResult(BaseModel):
id: int
html_url: str
status: str
conclusion: Optional[str]
check_run: CheckRunResult = SchemaField(
description="Details of the updated check run"
)
error: str = SchemaField(description="Error message if check run update failed")
def __init__(self):
super().__init__(
id="8a23c567-9d01-4e56-b789-0c12d3e45678", # Generated UUID
description="Updates an existing check run in a GitHub repository",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=GithubUpdateCheckRunBlock.Input,
output_schema=GithubUpdateCheckRunBlock.Output,
# requires a github app not available to oauth in our current system
disabled=True,
test_input={
"repo_url": "https://github.com/owner/repo",
"check_run_id": 4,
"status": ChecksStatus.COMPLETED.value,
"conclusion": ChecksConclusion.SUCCESS.value,
"output_title": "Updated Results",
"output_summary": "All tests passed after retry",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"check_run",
{
"id": 4,
"html_url": "https://github.com/owner/repo/runs/4",
"status": "completed",
"conclusion": "success",
},
),
],
test_mock={
"update_check_run": lambda *args, **kwargs: {
"id": 4,
"html_url": "https://github.com/owner/repo/runs/4",
"status": "completed",
"conclusion": "success",
}
},
)
@staticmethod
async def update_check_run(
credentials: GithubCredentials,
repo_url: str,
check_run_id: int,
status: ChecksStatus,
conclusion: Optional[ChecksConclusion] = None,
output_title: Optional[str] = None,
output_summary: Optional[str] = None,
output_text: Optional[str] = None,
) -> dict:
api = get_api(credentials)
class UpdateCheckRunData(BaseModel):
status: str
conclusion: Optional[str] = None
output: Optional[dict[str, str]] = None
data = UpdateCheckRunData(
status=status.value,
)
if conclusion:
data.conclusion = conclusion.value
if output_title or output_summary or output_text:
output_data = {
"title": output_title or "",
"summary": output_summary or "",
"text": output_text or "",
}
data.output = output_data
check_run_url = f"{repo_url}/check-runs/{check_run_id}"
response = await api.patch(
check_run_url, data=data.model_dump_json(exclude_none=True)
)
result = response.json()
return {
"id": result["id"],
"html_url": result["html_url"],
"status": result["status"],
"conclusion": result.get("conclusion"),
}
async def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
try:
result = await self.update_check_run(
credentials=credentials,
repo_url=input_data.repo_url,
check_run_id=input_data.check_run_id,
status=input_data.status,
conclusion=input_data.conclusion,
output_title=input_data.output_title,
output_summary=input_data.output_summary,
output_text=input_data.output_text,
)
yield "check_run", result
except Exception as e:
yield "error", str(e)

View File

@@ -1,4 +1,3 @@
import logging
from urllib.parse import urlparse
from typing_extensions import TypedDict
@@ -6,7 +5,7 @@ from typing_extensions import TypedDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from ._api import convert_comment_url_to_api_endpoint, get_api
from ._api import get_api
from ._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
@@ -15,8 +14,6 @@ from ._auth import (
GithubCredentialsInput,
)
logger = logging.getLogger(__name__)
def is_github_url(url: str) -> bool:
return urlparse(url).netloc == "github.com"
@@ -80,7 +77,7 @@ class GithubCommentBlock(Block):
)
@staticmethod
async def post_comment(
def post_comment(
credentials: GithubCredentials, issue_url: str, body_text: str
) -> tuple[int, str]:
api = get_api(credentials)
@@ -88,18 +85,18 @@ class GithubCommentBlock(Block):
if "pull" in issue_url:
issue_url = issue_url.replace("pull", "issues")
comments_url = issue_url + "/comments"
response = await api.post(comments_url, json=data)
response = api.post(comments_url, json=data)
comment = response.json()
return comment["id"], comment["html_url"]
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
id, url = await self.post_comment(
id, url = self.post_comment(
credentials,
input_data.issue_url,
input_data.comment,
@@ -111,229 +108,6 @@ class GithubCommentBlock(Block):
# --8<-- [end:GithubCommentBlockExample]
class GithubUpdateCommentBlock(Block):
class Input(BlockSchema):
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
comment_url: str = SchemaField(
description="URL of the GitHub comment",
placeholder="https://github.com/owner/repo/issues/1#issuecomment-123456789",
default="",
advanced=False,
)
issue_url: str = SchemaField(
description="URL of the GitHub issue or pull request",
placeholder="https://github.com/owner/repo/issues/1",
default="",
)
comment_id: str = SchemaField(
description="ID of the GitHub comment",
placeholder="123456789",
default="",
)
comment: str = SchemaField(
description="Comment to update",
placeholder="Enter your comment",
)
class Output(BlockSchema):
id: int = SchemaField(description="ID of the updated comment")
url: str = SchemaField(description="URL to the comment on GitHub")
error: str = SchemaField(
description="Error message if the comment update failed"
)
def __init__(self):
super().__init__(
id="b3f4d747-10e3-4e69-8c51-f2be1d99c9a7",
description="This block updates a comment on a specified GitHub issue or pull request.",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=GithubUpdateCommentBlock.Input,
output_schema=GithubUpdateCommentBlock.Output,
test_input={
"comment_url": "https://github.com/owner/repo/issues/1#issuecomment-123456789",
"comment": "This is an updated comment.",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("id", 123456789),
(
"url",
"https://github.com/owner/repo/issues/1#issuecomment-123456789",
),
],
test_mock={
"update_comment": lambda *args, **kwargs: (
123456789,
"https://github.com/owner/repo/issues/1#issuecomment-123456789",
)
},
)
@staticmethod
async def update_comment(
credentials: GithubCredentials, comment_url: str, body_text: str
) -> tuple[int, str]:
api = get_api(credentials, convert_urls=False)
data = {"body": body_text}
url = convert_comment_url_to_api_endpoint(comment_url)
logger.info(url)
response = await api.patch(url, json=data)
comment = response.json()
return comment["id"], comment["html_url"]
async def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
if (
not input_data.comment_url
and input_data.comment_id
and input_data.issue_url
):
parsed_url = urlparse(input_data.issue_url)
path_parts = parsed_url.path.strip("/").split("/")
owner, repo = path_parts[0], path_parts[1]
input_data.comment_url = f"https://api.github.com/repos/{owner}/{repo}/issues/comments/{input_data.comment_id}"
elif (
not input_data.comment_url
and not input_data.comment_id
and input_data.issue_url
):
raise ValueError(
"Must provide either comment_url or comment_id and issue_url"
)
id, url = await self.update_comment(
credentials,
input_data.comment_url,
input_data.comment,
)
yield "id", id
yield "url", url
class GithubListCommentsBlock(Block):
class Input(BlockSchema):
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
issue_url: str = SchemaField(
description="URL of the GitHub issue or pull request",
placeholder="https://github.com/owner/repo/issues/1",
)
class Output(BlockSchema):
class CommentItem(TypedDict):
id: int
body: str
user: str
url: str
comment: CommentItem = SchemaField(
title="Comment", description="Comments with their ID, body, user, and URL"
)
comments: list[CommentItem] = SchemaField(
description="List of comments with their ID, body, user, and URL"
)
error: str = SchemaField(description="Error message if listing comments failed")
def __init__(self):
super().__init__(
id="c4b5fb63-0005-4a11-b35a-0c2467bd6b59",
description="This block lists all comments for a specified GitHub issue or pull request.",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=GithubListCommentsBlock.Input,
output_schema=GithubListCommentsBlock.Output,
test_input={
"issue_url": "https://github.com/owner/repo/issues/1",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"comment",
{
"id": 123456789,
"body": "This is a test comment.",
"user": "test_user",
"url": "https://github.com/owner/repo/issues/1#issuecomment-123456789",
},
),
(
"comments",
[
{
"id": 123456789,
"body": "This is a test comment.",
"user": "test_user",
"url": "https://github.com/owner/repo/issues/1#issuecomment-123456789",
}
],
),
],
test_mock={
"list_comments": lambda *args, **kwargs: [
{
"id": 123456789,
"body": "This is a test comment.",
"user": "test_user",
"url": "https://github.com/owner/repo/issues/1#issuecomment-123456789",
}
]
},
)
@staticmethod
async def list_comments(
credentials: GithubCredentials, issue_url: str
) -> list[Output.CommentItem]:
parsed_url = urlparse(issue_url)
path_parts = parsed_url.path.strip("/").split("/")
owner = path_parts[0]
repo = path_parts[1]
# GitHub API uses 'issues' for both issues and pull requests when it comes to comments
issue_number = path_parts[3] # Whether 'issues/123' or 'pull/123'
# Construct the proper API URL directly
api_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/comments"
# Set convert_urls=False since we're already providing an API URL
api = get_api(credentials, convert_urls=False)
response = await api.get(api_url)
comments = response.json()
parsed_comments: list[GithubListCommentsBlock.Output.CommentItem] = [
{
"id": comment["id"],
"body": comment["body"],
"user": comment["user"]["login"],
"url": comment["html_url"],
}
for comment in comments
]
return parsed_comments
async def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
comments = await self.list_comments(
credentials,
input_data.issue_url,
)
for comment in comments:
yield "comment", comment
yield "comments", comments
class GithubMakeIssueBlock(Block):
class Input(BlockSchema):
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
@@ -382,24 +156,24 @@ class GithubMakeIssueBlock(Block):
)
@staticmethod
async def create_issue(
def create_issue(
credentials: GithubCredentials, repo_url: str, title: str, body: str
) -> tuple[int, str]:
api = get_api(credentials)
data = {"title": title, "body": body}
issues_url = repo_url + "/issues"
response = await api.post(issues_url, json=data)
response = api.post(issues_url, json=data)
issue = response.json()
return issue["number"], issue["html_url"]
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
number, url = await self.create_issue(
number, url = self.create_issue(
credentials,
input_data.repo_url,
input_data.title,
@@ -452,25 +226,25 @@ class GithubReadIssueBlock(Block):
)
@staticmethod
async def read_issue(
def read_issue(
credentials: GithubCredentials, issue_url: str
) -> tuple[str, str, str]:
api = get_api(credentials)
response = await api.get(issue_url)
response = api.get(issue_url)
data = response.json()
title = data.get("title", "No title found")
body = data.get("body", "No body content found")
user = data.get("user", {}).get("login", "No user found")
return title, body, user
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
title, body, user = await self.read_issue(
title, body, user = self.read_issue(
credentials,
input_data.issue_url,
)
@@ -532,30 +306,30 @@ class GithubListIssuesBlock(Block):
)
@staticmethod
async def list_issues(
def list_issues(
credentials: GithubCredentials, repo_url: str
) -> list[Output.IssueItem]:
api = get_api(credentials)
issues_url = repo_url + "/issues"
response = await api.get(issues_url)
response = api.get(issues_url)
data = response.json()
issues: list[GithubListIssuesBlock.Output.IssueItem] = [
{"title": issue["title"], "url": issue["html_url"]} for issue in data
]
return issues
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
for issue in await self.list_issues(
issues = self.list_issues(
credentials,
input_data.repo_url,
):
yield "issue", issue
)
yield from (("issue", issue) for issue in issues)
class GithubAddLabelBlock(Block):
@@ -594,23 +368,21 @@ class GithubAddLabelBlock(Block):
)
@staticmethod
async def add_label(
credentials: GithubCredentials, issue_url: str, label: str
) -> str:
def add_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
api = get_api(credentials)
data = {"labels": [label]}
labels_url = issue_url + "/labels"
await api.post(labels_url, json=data)
api.post(labels_url, json=data)
return "Label added successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.add_label(
status = self.add_label(
credentials,
input_data.issue_url,
input_data.label,
@@ -656,22 +428,20 @@ class GithubRemoveLabelBlock(Block):
)
@staticmethod
async def remove_label(
credentials: GithubCredentials, issue_url: str, label: str
) -> str:
def remove_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
api = get_api(credentials)
label_url = issue_url + f"/labels/{label}"
await api.delete(label_url)
api.delete(label_url)
return "Label removed successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.remove_label(
status = self.remove_label(
credentials,
input_data.issue_url,
input_data.label,
@@ -719,7 +489,7 @@ class GithubAssignIssueBlock(Block):
)
@staticmethod
async def assign_issue(
def assign_issue(
credentials: GithubCredentials,
issue_url: str,
assignee: str,
@@ -727,17 +497,17 @@ class GithubAssignIssueBlock(Block):
api = get_api(credentials)
assignees_url = issue_url + "/assignees"
data = {"assignees": [assignee]}
await api.post(assignees_url, json=data)
api.post(assignees_url, json=data)
return "Issue assigned successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.assign_issue(
status = self.assign_issue(
credentials,
input_data.issue_url,
input_data.assignee,
@@ -785,7 +555,7 @@ class GithubUnassignIssueBlock(Block):
)
@staticmethod
async def unassign_issue(
def unassign_issue(
credentials: GithubCredentials,
issue_url: str,
assignee: str,
@@ -793,17 +563,17 @@ class GithubUnassignIssueBlock(Block):
api = get_api(credentials)
assignees_url = issue_url + "/assignees"
data = {"assignees": [assignee]}
await api.delete(assignees_url, json=data)
api.delete(assignees_url, json=data)
return "Issue unassigned successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.unassign_issue(
status = self.unassign_issue(
credentials,
input_data.issue_url,
input_data.assignee,

View File

@@ -65,31 +65,28 @@ class GithubListPullRequestsBlock(Block):
)
@staticmethod
async def list_prs(
credentials: GithubCredentials, repo_url: str
) -> list[Output.PRItem]:
def list_prs(credentials: GithubCredentials, repo_url: str) -> list[Output.PRItem]:
api = get_api(credentials)
pulls_url = repo_url + "/pulls"
response = await api.get(pulls_url)
response = api.get(pulls_url)
data = response.json()
pull_requests: list[GithubListPullRequestsBlock.Output.PRItem] = [
{"title": pr["title"], "url": pr["html_url"]} for pr in data
]
return pull_requests
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
pull_requests = await self.list_prs(
pull_requests = self.list_prs(
credentials,
input_data.repo_url,
)
for pr in pull_requests:
yield "pull_request", pr
yield from (("pull_request", pr) for pr in pull_requests)
class GithubMakePullRequestBlock(Block):
@@ -156,7 +153,7 @@ class GithubMakePullRequestBlock(Block):
)
@staticmethod
async def create_pr(
def create_pr(
credentials: GithubCredentials,
repo_url: str,
title: str,
@@ -167,11 +164,11 @@ class GithubMakePullRequestBlock(Block):
api = get_api(credentials)
pulls_url = repo_url + "/pulls"
data = {"title": title, "body": body, "head": head, "base": base}
response = await api.post(pulls_url, json=data)
response = api.post(pulls_url, json=data)
pr_data = response.json()
return pr_data["number"], pr_data["html_url"]
async def run(
def run(
self,
input_data: Input,
*,
@@ -179,7 +176,7 @@ class GithubMakePullRequestBlock(Block):
**kwargs,
) -> BlockOutput:
try:
number, url = await self.create_pr(
number, url = self.create_pr(
credentials,
input_data.repo_url,
input_data.title,
@@ -203,7 +200,6 @@ class GithubReadPullRequestBlock(Block):
include_pr_changes: bool = SchemaField(
description="Whether to include the changes made in the pull request",
default=False,
advanced=False,
)
class Output(BlockSchema):
@@ -245,39 +241,39 @@ class GithubReadPullRequestBlock(Block):
)
@staticmethod
async def read_pr(
credentials: GithubCredentials, pr_url: str
) -> tuple[str, str, str]:
def read_pr(credentials: GithubCredentials, pr_url: str) -> tuple[str, str, str]:
api = get_api(credentials)
# Adjust the URL to access the issue endpoint for PR metadata
issue_url = pr_url.replace("/pull/", "/issues/")
response = await api.get(issue_url)
response = api.get(issue_url)
data = response.json()
title = data.get("title", "No title found")
body = data.get("body", "No body content found")
author = data.get("user", {}).get("login", "Unknown author")
author = data.get("user", {}).get("login", "No user found")
return title, body, author
@staticmethod
async def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str:
def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str:
api = get_api(credentials)
files_url = prepare_pr_api_url(pr_url=pr_url, path="files")
response = await api.get(files_url)
response = api.get(files_url)
files = response.json()
changes = []
for file in files:
filename = file.get("filename", "")
status = file.get("status", "")
changes.append(f"{filename}: {status}")
return "\n".join(changes)
filename = file.get("filename")
patch = file.get("patch")
if filename and patch:
changes.append(f"File: {filename}\n{patch}")
return "\n\n".join(changes)
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
title, body, author = await self.read_pr(
title, body, author = self.read_pr(
credentials,
input_data.pr_url,
)
@@ -286,7 +282,7 @@ class GithubReadPullRequestBlock(Block):
yield "author", author
if input_data.include_pr_changes:
changes = await self.read_pr_changes(
changes = self.read_pr_changes(
credentials,
input_data.pr_url,
)
@@ -333,16 +329,16 @@ class GithubAssignPRReviewerBlock(Block):
)
@staticmethod
async def assign_reviewer(
def assign_reviewer(
credentials: GithubCredentials, pr_url: str, reviewer: str
) -> str:
api = get_api(credentials)
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
data = {"reviewers": [reviewer]}
await api.post(reviewers_url, json=data)
api.post(reviewers_url, json=data)
return "Reviewer assigned successfully"
async def run(
def run(
self,
input_data: Input,
*,
@@ -350,7 +346,7 @@ class GithubAssignPRReviewerBlock(Block):
**kwargs,
) -> BlockOutput:
try:
status = await self.assign_reviewer(
status = self.assign_reviewer(
credentials,
input_data.pr_url,
input_data.reviewer,
@@ -400,16 +396,16 @@ class GithubUnassignPRReviewerBlock(Block):
)
@staticmethod
async def unassign_reviewer(
def unassign_reviewer(
credentials: GithubCredentials, pr_url: str, reviewer: str
) -> str:
api = get_api(credentials)
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
data = {"reviewers": [reviewer]}
await api.delete(reviewers_url, json=data)
api.delete(reviewers_url, json=data)
return "Reviewer unassigned successfully"
async def run(
def run(
self,
input_data: Input,
*,
@@ -417,7 +413,7 @@ class GithubUnassignPRReviewerBlock(Block):
**kwargs,
) -> BlockOutput:
try:
status = await self.unassign_reviewer(
status = self.unassign_reviewer(
credentials,
input_data.pr_url,
input_data.reviewer,
@@ -480,12 +476,12 @@ class GithubListPRReviewersBlock(Block):
)
@staticmethod
async def list_reviewers(
def list_reviewers(
credentials: GithubCredentials, pr_url: str
) -> list[Output.ReviewerItem]:
api = get_api(credentials)
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
response = await api.get(reviewers_url)
response = api.get(reviewers_url)
data = response.json()
reviewers: list[GithubListPRReviewersBlock.Output.ReviewerItem] = [
{"username": reviewer["login"], "url": reviewer["html_url"]}
@@ -493,18 +489,18 @@ class GithubListPRReviewersBlock(Block):
]
return reviewers
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
for reviewer in await self.list_reviewers(
reviewers = self.list_reviewers(
credentials,
input_data.pr_url,
):
yield "reviewer", reviewer
)
yield from (("reviewer", reviewer) for reviewer in reviewers)
def prepare_pr_api_url(pr_url: str, path: str) -> str:

View File

@@ -65,12 +65,12 @@ class GithubListTagsBlock(Block):
)
@staticmethod
async def list_tags(
def list_tags(
credentials: GithubCredentials, repo_url: str
) -> list[Output.TagItem]:
api = get_api(credentials)
tags_url = repo_url + "/tags"
response = await api.get(tags_url)
response = api.get(tags_url)
data = response.json()
repo_path = repo_url.replace("https://github.com/", "")
tags: list[GithubListTagsBlock.Output.TagItem] = [
@@ -82,19 +82,18 @@ class GithubListTagsBlock(Block):
]
return tags
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
tags = await self.list_tags(
tags = self.list_tags(
credentials,
input_data.repo_url,
)
for tag in tags:
yield "tag", tag
yield from (("tag", tag) for tag in tags)
class GithubListBranchesBlock(Block):
@@ -148,12 +147,12 @@ class GithubListBranchesBlock(Block):
)
@staticmethod
async def list_branches(
def list_branches(
credentials: GithubCredentials, repo_url: str
) -> list[Output.BranchItem]:
api = get_api(credentials)
branches_url = repo_url + "/branches"
response = await api.get(branches_url)
response = api.get(branches_url)
data = response.json()
repo_path = repo_url.replace("https://github.com/", "")
branches: list[GithubListBranchesBlock.Output.BranchItem] = [
@@ -165,19 +164,18 @@ class GithubListBranchesBlock(Block):
]
return branches
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
branches = await self.list_branches(
branches = self.list_branches(
credentials,
input_data.repo_url,
)
for branch in branches:
yield "branch", branch
yield from (("branch", branch) for branch in branches)
class GithubListDiscussionsBlock(Block):
@@ -236,7 +234,7 @@ class GithubListDiscussionsBlock(Block):
)
@staticmethod
async def list_discussions(
def list_discussions(
credentials: GithubCredentials, repo_url: str, num_discussions: int
) -> list[Output.DiscussionItem]:
api = get_api(credentials)
@@ -256,7 +254,7 @@ class GithubListDiscussionsBlock(Block):
}
"""
variables = {"owner": owner, "repo": repo, "num": num_discussions}
response = await api.post(
response = api.post(
"https://api.github.com/graphql",
json={"query": query, "variables": variables},
)
@@ -267,20 +265,17 @@ class GithubListDiscussionsBlock(Block):
]
return discussions
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
discussions = await self.list_discussions(
credentials,
input_data.repo_url,
input_data.num_discussions,
discussions = self.list_discussions(
credentials, input_data.repo_url, input_data.num_discussions
)
for discussion in discussions:
yield "discussion", discussion
yield from (("discussion", discussion) for discussion in discussions)
class GithubListReleasesBlock(Block):
@@ -334,31 +329,30 @@ class GithubListReleasesBlock(Block):
)
@staticmethod
async def list_releases(
def list_releases(
credentials: GithubCredentials, repo_url: str
) -> list[Output.ReleaseItem]:
api = get_api(credentials)
releases_url = repo_url + "/releases"
response = await api.get(releases_url)
response = api.get(releases_url)
data = response.json()
releases: list[GithubListReleasesBlock.Output.ReleaseItem] = [
{"name": release["name"], "url": release["html_url"]} for release in data
]
return releases
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
releases = await self.list_releases(
releases = self.list_releases(
credentials,
input_data.repo_url,
)
for release in releases:
yield "release", release
yield from (("release", release) for release in releases)
class GithubReadFileBlock(Block):
@@ -411,40 +405,40 @@ class GithubReadFileBlock(Block):
)
@staticmethod
async def read_file(
def read_file(
credentials: GithubCredentials, repo_url: str, file_path: str, branch: str
) -> tuple[str, int]:
api = get_api(credentials)
content_url = repo_url + f"/contents/{file_path}?ref={branch}"
response = await api.get(content_url)
data = response.json()
response = api.get(content_url)
content = response.json()
if isinstance(data, list):
if isinstance(content, list):
# Multiple entries of different types exist at this path
if not (file := next((f for f in data if f["type"] == "file"), None)):
if not (file := next((f for f in content if f["type"] == "file"), None)):
raise TypeError("Not a file")
data = file
content = file
if data["type"] != "file":
if content["type"] != "file":
raise TypeError("Not a file")
return data["content"], data["size"]
return content["content"], content["size"]
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
content, size = await self.read_file(
raw_content, size = self.read_file(
credentials,
input_data.repo_url,
input_data.file_path,
input_data.file_path.lstrip("/"),
input_data.branch,
)
yield "raw_content", content
yield "text_content", base64.b64decode(content).decode("utf-8")
yield "raw_content", raw_content
yield "text_content", base64.b64decode(raw_content).decode("utf-8")
yield "size", size
@@ -521,55 +515,52 @@ class GithubReadFolderBlock(Block):
)
@staticmethod
async def read_folder(
def read_folder(
credentials: GithubCredentials, repo_url: str, folder_path: str, branch: str
) -> tuple[list[Output.FileEntry], list[Output.DirEntry]]:
api = get_api(credentials)
contents_url = repo_url + f"/contents/{folder_path}?ref={branch}"
response = await api.get(contents_url)
data = response.json()
response = api.get(contents_url)
content = response.json()
if not isinstance(data, list):
if not isinstance(content, list):
raise TypeError("Not a folder")
files: list[GithubReadFolderBlock.Output.FileEntry] = [
files = [
GithubReadFolderBlock.Output.FileEntry(
name=entry["name"],
path=entry["path"],
size=entry["size"],
)
for entry in data
for entry in content
if entry["type"] == "file"
]
dirs: list[GithubReadFolderBlock.Output.DirEntry] = [
dirs = [
GithubReadFolderBlock.Output.DirEntry(
name=entry["name"],
path=entry["path"],
)
for entry in data
for entry in content
if entry["type"] == "dir"
]
return files, dirs
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
files, dirs = await self.read_folder(
files, dirs = self.read_folder(
credentials,
input_data.repo_url,
input_data.folder_path.lstrip("/"),
input_data.branch,
)
for file in files:
yield "file", file
for dir in dirs:
yield "dir", dir
yield from (("file", file) for file in files)
yield from (("dir", dir) for dir in dirs)
class GithubMakeBranchBlock(Block):
@@ -615,35 +606,32 @@ class GithubMakeBranchBlock(Block):
)
@staticmethod
async def create_branch(
def create_branch(
credentials: GithubCredentials,
repo_url: str,
new_branch: str,
source_branch: str,
) -> str:
api = get_api(credentials)
# Get the SHA of the source branch
ref_url = repo_url + f"/git/refs/heads/{source_branch}"
response = await api.get(ref_url)
data = response.json()
sha = data["object"]["sha"]
response = api.get(ref_url)
sha = response.json()["object"]["sha"]
# Create the new branch
new_ref_url = repo_url + "/git/refs"
data = {
"ref": f"refs/heads/{new_branch}",
"sha": sha,
}
response = await api.post(new_ref_url, json=data)
create_ref_url = repo_url + "/git/refs"
data = {"ref": f"refs/heads/{new_branch}", "sha": sha}
response = api.post(create_ref_url, json=data)
return "Branch created successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.create_branch(
status = self.create_branch(
credentials,
input_data.repo_url,
input_data.new_branch,
@@ -690,22 +678,22 @@ class GithubDeleteBranchBlock(Block):
)
@staticmethod
async def delete_branch(
def delete_branch(
credentials: GithubCredentials, repo_url: str, branch: str
) -> str:
api = get_api(credentials)
ref_url = repo_url + f"/git/refs/heads/{branch}"
await api.delete(ref_url)
api.delete(ref_url)
return "Branch deleted successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.delete_branch(
status = self.delete_branch(
credentials,
input_data.repo_url,
input_data.branch,
@@ -773,7 +761,7 @@ class GithubCreateFileBlock(Block):
)
@staticmethod
async def create_file(
def create_file(
credentials: GithubCredentials,
repo_url: str,
file_path: str,
@@ -782,18 +770,23 @@ class GithubCreateFileBlock(Block):
commit_message: str,
) -> tuple[str, str]:
api = get_api(credentials)
contents_url = repo_url + f"/contents/{file_path}"
content_base64 = base64.b64encode(content.encode()).decode()
# Convert content to base64
content_bytes = content.encode("utf-8")
content_base64 = base64.b64encode(content_bytes).decode("utf-8")
# Create the file using the GitHub API
contents_url = f"{repo_url}/contents/{file_path}"
data = {
"message": commit_message,
"content": content_base64,
"branch": branch,
}
response = await api.put(contents_url, json=data)
data = response.json()
return data["content"]["html_url"], data["commit"]["sha"]
response = api.put(contents_url, json=data)
result = response.json()
async def run(
return result["content"]["html_url"], result["commit"]["sha"]
def run(
self,
input_data: Input,
*,
@@ -801,7 +794,7 @@ class GithubCreateFileBlock(Block):
**kwargs,
) -> BlockOutput:
try:
url, sha = await self.create_file(
url, sha = self.create_file(
credentials,
input_data.repo_url,
input_data.file_path,
@@ -873,7 +866,7 @@ class GithubUpdateFileBlock(Block):
)
@staticmethod
async def update_file(
def update_file(
credentials: GithubCredentials,
repo_url: str,
file_path: str,
@@ -882,24 +875,30 @@ class GithubUpdateFileBlock(Block):
commit_message: str,
) -> tuple[str, str]:
api = get_api(credentials)
contents_url = repo_url + f"/contents/{file_path}"
# First get the current file to get its SHA
contents_url = f"{repo_url}/contents/{file_path}"
params = {"ref": branch}
response = await api.get(contents_url, params=params)
data = response.json()
response = api.get(contents_url, params=params)
current_file = response.json()
# Convert new content to base64
content_base64 = base64.b64encode(content.encode()).decode()
content_bytes = content.encode("utf-8")
content_base64 = base64.b64encode(content_bytes).decode("utf-8")
# Update the file
data = {
"message": commit_message,
"content": content_base64,
"sha": data["sha"],
"sha": current_file["sha"],
"branch": branch,
}
response = await api.put(contents_url, json=data)
data = response.json()
return data["content"]["html_url"], data["commit"]["sha"]
response = api.put(contents_url, json=data)
result = response.json()
async def run(
return result["content"]["html_url"], result["commit"]["sha"]
def run(
self,
input_data: Input,
*,
@@ -907,7 +906,7 @@ class GithubUpdateFileBlock(Block):
**kwargs,
) -> BlockOutput:
try:
url, sha = await self.update_file(
url, sha = self.update_file(
credentials,
input_data.repo_url,
input_data.file_path,
@@ -982,7 +981,7 @@ class GithubCreateRepositoryBlock(Block):
)
@staticmethod
async def create_repository(
def create_repository(
credentials: GithubCredentials,
name: str,
description: str,
@@ -990,19 +989,24 @@ class GithubCreateRepositoryBlock(Block):
auto_init: bool,
gitignore_template: str,
) -> tuple[str, str]:
api = get_api(credentials)
api = get_api(credentials, convert_urls=False) # Disable URL conversion
data = {
"name": name,
"description": description,
"private": private,
"auto_init": auto_init,
"gitignore_template": gitignore_template,
}
response = await api.post("https://api.github.com/user/repos", json=data)
data = response.json()
return data["html_url"], data["clone_url"]
async def run(
if gitignore_template:
data["gitignore_template"] = gitignore_template
# Create repository using the user endpoint
response = api.post("https://api.github.com/user/repos", json=data)
result = response.json()
return result["html_url"], result["clone_url"]
def run(
self,
input_data: Input,
*,
@@ -1010,7 +1014,7 @@ class GithubCreateRepositoryBlock(Block):
**kwargs,
) -> BlockOutput:
try:
url, clone_url = await self.create_repository(
url, clone_url = self.create_repository(
credentials,
input_data.name,
input_data.description,
@@ -1077,13 +1081,17 @@ class GithubListStargazersBlock(Block):
)
@staticmethod
async def list_stargazers(
def list_stargazers(
credentials: GithubCredentials, repo_url: str
) -> list[Output.StargazerItem]:
api = get_api(credentials)
stargazers_url = repo_url + "/stargazers"
response = await api.get(stargazers_url)
# Add /stargazers to the repo URL to get stargazers endpoint
stargazers_url = f"{repo_url}/stargazers"
# Set accept header to get starred_at timestamp
headers = {"Accept": "application/vnd.github.star+json"}
response = api.get(stargazers_url, headers=headers)
data = response.json()
stargazers: list[GithubListStargazersBlock.Output.StargazerItem] = [
{
"username": stargazer["login"],
@@ -1093,16 +1101,18 @@ class GithubListStargazersBlock(Block):
]
return stargazers
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
stargazers = await self.list_stargazers(
credentials,
input_data.repo_url,
)
for stargazer in stargazers:
yield "stargazer", stargazer
try:
stargazers = self.list_stargazers(
credentials,
input_data.repo_url,
)
yield from (("stargazer", stargazer) for stargazer in stargazers)
except Exception as e:
yield "error", str(e)

View File

@@ -1,182 +0,0 @@
from enum import Enum
from typing import Optional
from pydantic import BaseModel
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from ._api import get_api
from ._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
GithubFineGrainedAPICredentials,
GithubFineGrainedAPICredentialsField,
GithubFineGrainedAPICredentialsInput,
)
class StatusState(Enum):
ERROR = "error"
FAILURE = "failure"
PENDING = "pending"
SUCCESS = "success"
class GithubCreateStatusBlock(Block):
"""Block for creating a commit status on a GitHub repository."""
class Input(BlockSchema):
credentials: GithubFineGrainedAPICredentialsInput = (
GithubFineGrainedAPICredentialsField("repo:status")
)
repo_url: str = SchemaField(
description="URL of the GitHub repository",
placeholder="https://github.com/owner/repo",
)
sha: str = SchemaField(
description="The SHA of the commit to set status for",
)
state: StatusState = SchemaField(
description="The state of the status (error, failure, pending, success)",
)
target_url: Optional[str] = SchemaField(
description="URL with additional details about this status",
default=None,
)
description: Optional[str] = SchemaField(
description="Short description of the status",
default=None,
)
check_name: Optional[str] = SchemaField(
description="Label to differentiate this status from others",
default="AutoGPT Platform Checks",
advanced=False,
)
class Output(BlockSchema):
class StatusResult(BaseModel):
id: int
url: str
state: str
context: str
description: Optional[str]
target_url: Optional[str]
created_at: str
updated_at: str
status: StatusResult = SchemaField(description="Details of the created status")
error: str = SchemaField(description="Error message if status creation failed")
def __init__(self):
super().__init__(
id="3d67f123-a4b5-4c89-9d01-2e34f5c67890", # Generated UUID
description="Creates a new commit status in a GitHub repository",
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=GithubCreateStatusBlock.Input,
output_schema=GithubCreateStatusBlock.Output,
test_input={
"repo_url": "https://github.com/owner/repo",
"sha": "ce587453ced02b1526dfb4cb910479d431683101",
"state": StatusState.SUCCESS.value,
"target_url": "https://example.com/build/status",
"description": "The build succeeded!",
"check_name": "continuous-integration/jenkins",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"status",
{
"id": 1234567890,
"url": "https://api.github.com/repos/owner/repo/statuses/ce587453ced02b1526dfb4cb910479d431683101",
"state": "success",
"context": "continuous-integration/jenkins",
"description": "The build succeeded!",
"target_url": "https://example.com/build/status",
"created_at": "2024-01-21T10:00:00Z",
"updated_at": "2024-01-21T10:00:00Z",
},
),
],
test_mock={
"create_status": lambda *args, **kwargs: {
"id": 1234567890,
"url": "https://api.github.com/repos/owner/repo/statuses/ce587453ced02b1526dfb4cb910479d431683101",
"state": "success",
"context": "continuous-integration/jenkins",
"description": "The build succeeded!",
"target_url": "https://example.com/build/status",
"created_at": "2024-01-21T10:00:00Z",
"updated_at": "2024-01-21T10:00:00Z",
}
},
)
@staticmethod
async def create_status(
credentials: GithubFineGrainedAPICredentials,
repo_url: str,
sha: str,
state: StatusState,
target_url: Optional[str] = None,
description: Optional[str] = None,
context: str = "default",
) -> dict:
api = get_api(credentials)
class StatusData(BaseModel):
state: str
target_url: Optional[str] = None
description: Optional[str] = None
context: str
data = StatusData(
state=state.value,
context=context,
)
if target_url:
data.target_url = target_url
if description:
data.description = description
status_url = f"{repo_url}/statuses/{sha}"
response = await api.post(
status_url, data=data.model_dump_json(exclude_none=True)
)
result = response.json()
return {
"id": result["id"],
"url": result["url"],
"state": result["state"],
"context": result["context"],
"description": result.get("description"),
"target_url": result.get("target_url"),
"created_at": result["created_at"],
"updated_at": result["updated_at"],
}
async def run(
self,
input_data: Input,
*,
credentials: GithubFineGrainedAPICredentials,
**kwargs,
) -> BlockOutput:
try:
result = await self.create_status(
credentials=credentials,
repo_url=input_data.repo_url,
sha=input_data.sha,
state=input_data.state,
target_url=input_data.target_url,
description=input_data.description,
context=input_data.check_name or "AutoGPT Platform Checks",
)
yield "status", result
except Exception as e:
yield "error", str(e)

View File

@@ -12,7 +12,6 @@ from backend.data.block import (
BlockWebhookConfig,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from ._auth import (
TEST_CREDENTIALS,
@@ -37,7 +36,7 @@ class GitHubTriggerBase:
placeholder="{owner}/{repo}",
)
# --8<-- [start:example-payload-field]
payload: dict = SchemaField(hidden=True, default_factory=dict)
payload: dict = SchemaField(hidden=True, default={})
# --8<-- [end:example-payload-field]
class Output(BlockSchema):
@@ -53,7 +52,7 @@ class GitHubTriggerBase:
description="Error message if the payload could not be processed"
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "payload", input_data.payload
yield "triggered_by_user", input_data.payload["sender"]
@@ -124,7 +123,7 @@ class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block):
output_schema=GithubPullRequestTriggerBlock.Output,
# --8<-- [start:example-webhook_config]
webhook_config=BlockWebhookConfig(
provider=ProviderName.GITHUB,
provider="github",
webhook_type=GithubWebhookType.REPO,
resource_format="{repo}",
event_filter_input="events",
@@ -148,9 +147,8 @@ class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
async for name, value in super().run(input_data, **kwargs):
yield name, value
def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
yield from super().run(input_data, **kwargs)
yield "event", input_data.payload["action"]
yield "number", input_data.payload["number"]
yield "pull_request", input_data.payload["pull_request"]

View File

@@ -1,603 +0,0 @@
import asyncio
import enum
import uuid
from datetime import datetime, timedelta, timezone
from typing import Literal
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from pydantic import BaseModel
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.settings import AppEnvironment, Settings
from ._auth import (
GOOGLE_OAUTH_IS_CONFIGURED,
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
GoogleCredentials,
GoogleCredentialsField,
GoogleCredentialsInput,
)
class CalendarEvent(BaseModel):
"""Structured representation of a Google Calendar event."""
id: str
title: str
start_time: str
end_time: str
is_all_day: bool
location: str | None
description: str | None
organizer: str | None
attendees: list[str]
has_video_call: bool
video_link: str | None
calendar_link: str
is_recurring: bool
class GoogleCalendarReadEventsBlock(Block):
class Input(BlockSchema):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/calendar.readonly"]
)
calendar_id: str = SchemaField(
description="Calendar ID (use 'primary' for your main calendar)",
default="primary",
)
max_events: int = SchemaField(
description="Maximum number of events to retrieve", default=10
)
start_time: datetime = SchemaField(
description="Retrieve events starting from this time",
default_factory=lambda: datetime.now(tz=timezone.utc),
)
time_range_days: int = SchemaField(
description="Number of days to look ahead for events", default=30
)
search_term: str | None = SchemaField(
description="Optional search term to filter events by", default=None
)
page_token: str | None = SchemaField(
description="Page token from previous request to get the next batch of events. You can use this if you have lots of events you want to process in a loop",
default=None,
)
include_declined_events: bool = SchemaField(
description="Include events you've declined", default=False
)
class Output(BlockSchema):
events: list[CalendarEvent] = SchemaField(
description="List of calendar events in the requested time range",
default_factory=list,
)
event: CalendarEvent = SchemaField(
description="One of the calendar events in the requested time range"
)
next_page_token: str | None = SchemaField(
description="Token for retrieving the next page of events if more exist",
default=None,
)
error: str = SchemaField(
description="Error message if the request failed",
)
def __init__(self):
settings = Settings()
# Create realistic test data for events
test_now = datetime.now(tz=timezone.utc)
test_tomorrow = test_now + timedelta(days=1)
test_event_dict = {
"id": "event1id",
"title": "Team Meeting",
"start_time": test_tomorrow.strftime("%Y-%m-%d %H:%M"),
"end_time": (test_tomorrow + timedelta(hours=1)).strftime("%Y-%m-%d %H:%M"),
"is_all_day": False,
"location": "Conference Room A",
"description": "Weekly team sync",
"organizer": "manager@example.com",
"attendees": ["colleague1@example.com", "colleague2@example.com"],
"has_video_call": True,
"video_link": "https://meet.google.com/abc-defg-hij",
"calendar_link": "https://calendar.google.com/calendar/event?eid=event1id",
"is_recurring": True,
}
super().__init__(
id="80bc3ed1-e9a4-449e-8163-a8fc86f74f6a",
description="Retrieves upcoming events from a Google Calendar with filtering options",
categories={BlockCategory.PRODUCTIVITY, BlockCategory.DATA},
input_schema=GoogleCalendarReadEventsBlock.Input,
output_schema=GoogleCalendarReadEventsBlock.Output,
disabled=not GOOGLE_OAUTH_IS_CONFIGURED
or settings.config.app_env == AppEnvironment.PRODUCTION,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"calendar_id": "primary",
"max_events": 5,
"start_time": test_now.isoformat(),
"time_range_days": 7,
"search_term": None,
"include_declined_events": False,
"page_token": None,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("event", test_event_dict),
("events", [test_event_dict]),
],
test_mock={
"_read_calendar": lambda *args, **kwargs: {
"items": [
{
"id": "event1id",
"summary": "Team Meeting",
"start": {
"dateTime": test_tomorrow.isoformat(),
"timeZone": "UTC",
},
"end": {
"dateTime": (
test_tomorrow + timedelta(hours=1)
).isoformat(),
"timeZone": "UTC",
},
"location": "Conference Room A",
"description": "Weekly team sync",
"organizer": {"email": "manager@example.com"},
"attendees": [
{"email": "colleague1@example.com"},
{"email": "colleague2@example.com"},
],
"conferenceData": {
"conferenceUrl": "https://meet.google.com/abc-defg-hij"
},
"htmlLink": "https://calendar.google.com/calendar/event?eid=event1id",
"recurrence": ["RRULE:FREQ=WEEKLY;COUNT=10"],
}
],
"nextPageToken": None,
},
"_format_events": lambda *args, **kwargs: [test_event_dict],
},
)
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
try:
service = self._build_service(credentials, **kwargs)
# Calculate end time based on start time and time range
end_time = input_data.start_time + timedelta(
days=input_data.time_range_days
)
# Call Google Calendar API
result = await asyncio.to_thread(
self._read_calendar,
service=service,
calendarId=input_data.calendar_id,
time_min=input_data.start_time.isoformat(),
time_max=end_time.isoformat(),
max_results=input_data.max_events,
single_events=True,
search_term=input_data.search_term,
show_deleted=False,
show_hidden=input_data.include_declined_events,
page_token=input_data.page_token,
)
# Format events into a user-friendly structure
formatted_events = self._format_events(result.get("items", []))
# Include next page token if available
if next_page_token := result.get("nextPageToken"):
yield "next_page_token", next_page_token
for event in formatted_events:
yield "event", event
yield "events", formatted_events
except Exception as e:
yield "error", str(e)
@staticmethod
def _build_service(credentials: GoogleCredentials, **kwargs):
creds = Credentials(
token=(
credentials.access_token.get_secret_value()
if credentials.access_token
else None
),
refresh_token=(
credentials.refresh_token.get_secret_value()
if credentials.refresh_token
else None
),
token_uri="https://oauth2.googleapis.com/token",
client_id=Settings().secrets.google_client_id,
client_secret=Settings().secrets.google_client_secret,
scopes=credentials.scopes,
)
return build("calendar", "v3", credentials=creds)
def _read_calendar(
self,
service,
calendarId: str,
time_min: str,
time_max: str,
max_results: int,
single_events: bool,
search_term: str | None = None,
show_deleted: bool = False,
show_hidden: bool = False,
page_token: str | None = None,
) -> dict:
"""Read calendar events with optional filtering."""
calendar = service.events()
# Build query parameters
params = {
"calendarId": calendarId,
"timeMin": time_min,
"timeMax": time_max,
"maxResults": max_results,
"singleEvents": single_events,
"orderBy": "startTime",
"showDeleted": show_deleted,
"showHiddenInvitations": show_hidden,
**({"pageToken": page_token} if page_token else {}),
}
# Add search term if provided
if search_term:
params["q"] = search_term
result = calendar.list(**params).execute()
return result
def _format_events(self, events: list[dict]) -> list[CalendarEvent]:
"""Format Google Calendar API events into user-friendly structure."""
formatted_events = []
for event in events:
# Determine if all-day event
is_all_day = "date" in event.get("start", {})
# Format start and end times
if is_all_day:
start_time = event.get("start", {}).get("date", "")
end_time = event.get("end", {}).get("date", "")
else:
# Convert ISO format to more readable format
start_datetime = datetime.fromisoformat(
event.get("start", {}).get("dateTime", "").replace("Z", "+00:00")
)
end_datetime = datetime.fromisoformat(
event.get("end", {}).get("dateTime", "").replace("Z", "+00:00")
)
start_time = start_datetime.strftime("%Y-%m-%d %H:%M")
end_time = end_datetime.strftime("%Y-%m-%d %H:%M")
# Extract attendees
attendees = []
for attendee in event.get("attendees", []):
if email := attendee.get("email"):
attendees.append(email)
# Check for video call link
has_video_call = False
video_link = None
if conf_data := event.get("conferenceData"):
if conf_url := conf_data.get("conferenceUrl"):
has_video_call = True
video_link = conf_url
elif entry_points := conf_data.get("entryPoints", []):
for entry in entry_points:
if entry.get("entryPointType") == "video":
has_video_call = True
video_link = entry.get("uri")
break
# Create formatted event
formatted_event = CalendarEvent(
id=event.get("id", ""),
title=event.get("summary", "Untitled Event"),
start_time=start_time,
end_time=end_time,
is_all_day=is_all_day,
location=event.get("location"),
description=event.get("description"),
organizer=event.get("organizer", {}).get("email"),
attendees=attendees,
has_video_call=has_video_call,
video_link=video_link,
calendar_link=event.get("htmlLink", ""),
is_recurring=bool(event.get("recurrence")),
)
formatted_events.append(formatted_event)
return formatted_events
class ReminderPreset(enum.Enum):
"""Common reminder times before an event."""
TEN_MINUTES = 10
THIRTY_MINUTES = 30
ONE_HOUR = 60
ONE_DAY = 1440 # 24 hours in minutes
class RecurrenceFrequency(enum.Enum):
"""Frequency options for recurring events."""
DAILY = "DAILY"
WEEKLY = "WEEKLY"
MONTHLY = "MONTHLY"
YEARLY = "YEARLY"
class ExactTiming(BaseModel):
"""Model for specifying start and end times."""
discriminator: Literal["exact_timing"]
start_datetime: datetime
end_datetime: datetime
class DurationTiming(BaseModel):
"""Model for specifying start time and duration."""
discriminator: Literal["duration_timing"]
start_datetime: datetime
duration_minutes: int
class OneTimeEvent(BaseModel):
"""Model for a one-time event."""
discriminator: Literal["one_time"]
class RecurringEvent(BaseModel):
"""Model for a recurring event."""
discriminator: Literal["recurring"]
frequency: RecurrenceFrequency
count: int
class GoogleCalendarCreateEventBlock(Block):
class Input(BlockSchema):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/calendar"]
)
# Event Details
event_title: str = SchemaField(description="Title of the event")
location: str | None = SchemaField(
description="Location of the event", default=None
)
description: str | None = SchemaField(
description="Description of the event", default=None
)
# Timing
timing: ExactTiming | DurationTiming = SchemaField(
discriminator="discriminator",
advanced=False,
description="Specify when the event starts and ends",
default_factory=lambda: DurationTiming(
discriminator="duration_timing",
start_datetime=datetime.now().replace(microsecond=0, second=0, minute=0)
+ timedelta(hours=1),
duration_minutes=60,
),
)
# Calendar selection
calendar_id: str = SchemaField(
description="Calendar ID (use 'primary' for your main calendar)",
default="primary",
)
# Guests
guest_emails: list[str] = SchemaField(
description="Email addresses of guests to invite", default_factory=list
)
send_notifications: bool = SchemaField(
description="Send email notifications to guests", default=True
)
# Extras
add_google_meet: bool = SchemaField(
description="Include a Google Meet video conference link", default=False
)
recurrence: OneTimeEvent | RecurringEvent = SchemaField(
discriminator="discriminator",
description="Whether the event repeats",
default_factory=lambda: OneTimeEvent(discriminator="one_time"),
)
reminder_minutes: list[ReminderPreset] = SchemaField(
description="When to send reminders before the event",
default_factory=lambda: [ReminderPreset.TEN_MINUTES],
)
class Output(BlockSchema):
event_id: str = SchemaField(description="ID of the created event")
event_link: str = SchemaField(
description="Link to view the event in Google Calendar"
)
error: str = SchemaField(description="Error message if event creation failed")
def __init__(self):
settings = Settings()
super().__init__(
id="ed2ec950-fbff-4204-94c0-023fb1d625e0",
description="This block creates a new event in Google Calendar with customizable parameters.",
categories={BlockCategory.PRODUCTIVITY},
input_schema=GoogleCalendarCreateEventBlock.Input,
output_schema=GoogleCalendarCreateEventBlock.Output,
disabled=not GOOGLE_OAUTH_IS_CONFIGURED
or settings.config.app_env == AppEnvironment.PRODUCTION,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"event_title": "Team Meeting",
"location": "Conference Room A",
"description": "Weekly team sync-up",
"calendar_id": "primary",
"guest_emails": ["colleague1@example.com", "colleague2@example.com"],
"add_google_meet": True,
"send_notifications": True,
"reminder_minutes": [
ReminderPreset.TEN_MINUTES.value,
ReminderPreset.ONE_HOUR.value,
],
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("event_id", "abc123event_id"),
("event_link", "https://calendar.google.com/calendar/event?eid=abc123"),
],
test_mock={
"_create_event": lambda *args, **kwargs: {
"id": "abc123event_id",
"htmlLink": "https://calendar.google.com/calendar/event?eid=abc123",
}
},
)
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
try:
service = self._build_service(credentials, **kwargs)
# Create event body
# Get start and end times based on the timing option
if input_data.timing.discriminator == "exact_timing":
start_datetime = input_data.timing.start_datetime
end_datetime = input_data.timing.end_datetime
else: # duration_timing
start_datetime = input_data.timing.start_datetime
end_datetime = start_datetime + timedelta(
minutes=input_data.timing.duration_minutes
)
# Format datetimes for Google Calendar API
start_time_str = start_datetime.isoformat()
end_time_str = end_datetime.isoformat()
# Build the event body
event_body = {
"summary": input_data.event_title,
"start": {"dateTime": start_time_str},
"end": {"dateTime": end_time_str},
}
# Add optional fields
if input_data.location:
event_body["location"] = input_data.location
if input_data.description:
event_body["description"] = input_data.description
# Add guests
if input_data.guest_emails:
event_body["attendees"] = [
{"email": email} for email in input_data.guest_emails
]
# Add reminders
if input_data.reminder_minutes:
event_body["reminders"] = {
"useDefault": False,
"overrides": [
{"method": "popup", "minutes": reminder.value}
for reminder in input_data.reminder_minutes
],
}
# Add Google Meet
if input_data.add_google_meet:
event_body["conferenceData"] = {
"createRequest": {
"requestId": f"meet-{uuid.uuid4()}",
"conferenceSolutionKey": {"type": "hangoutsMeet"},
}
}
# Add recurrence
if input_data.recurrence.discriminator == "recurring":
rule = f"RRULE:FREQ={input_data.recurrence.frequency.value}"
rule += f";COUNT={input_data.recurrence.count}"
event_body["recurrence"] = [rule]
# Create the event
result = await asyncio.to_thread(
self._create_event,
service=service,
calendar_id=input_data.calendar_id,
event_body=event_body,
send_notifications=input_data.send_notifications,
conference_data_version=1 if input_data.add_google_meet else 0,
)
yield "event_id", result["id"]
yield "event_link", result["htmlLink"]
except Exception as e:
yield "error", str(e)
@staticmethod
def _build_service(credentials: GoogleCredentials, **kwargs):
creds = Credentials(
token=(
credentials.access_token.get_secret_value()
if credentials.access_token
else None
),
refresh_token=(
credentials.refresh_token.get_secret_value()
if credentials.refresh_token
else None
),
token_uri="https://oauth2.googleapis.com/token",
client_id=Settings().secrets.google_client_id,
client_secret=Settings().secrets.google_client_secret,
scopes=credentials.scopes,
)
return build("calendar", "v3", credentials=creds)
def _create_event(
self,
service,
calendar_id: str,
event_body: dict,
send_notifications: bool = False,
conference_data_version: int = 0,
) -> dict:
"""Create a new event in Google Calendar."""
calendar = service.events()
# Make the API call
result = calendar.insert(
calendarId=calendar_id,
body=event_body,
sendNotifications=send_notifications,
conferenceDataVersion=conference_data_version,
).execute()
return result

View File

@@ -1,4 +1,3 @@
import asyncio
import base64
from email.utils import parseaddr
from typing import List
@@ -9,7 +8,6 @@ from pydantic import BaseModel
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.settings import Settings
from ._auth import (
GOOGLE_OAUTH_IS_CONFIGURED,
@@ -129,13 +127,11 @@ class GmailReadBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
messages = await asyncio.to_thread(
self._read_emails, service, input_data.query, input_data.max_results
)
service = self._build_service(credentials, **kwargs)
messages = self._read_emails(service, input_data.query, input_data.max_results)
for email in messages:
yield "email", email
yield "emails", messages
@@ -154,8 +150,8 @@ class GmailReadBlock(Block):
else None
),
token_uri="https://oauth2.googleapis.com/token",
client_id=Settings().secrets.google_client_id,
client_secret=Settings().secrets.google_client_secret,
client_id=kwargs.get("client_id"),
client_secret=kwargs.get("client_secret"),
scopes=credentials.scopes,
)
return build("gmail", "v1", credentials=creds)
@@ -289,18 +285,14 @@ class GmailSendBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
result = await asyncio.to_thread(
self._send_email,
service,
input_data.to,
input_data.subject,
input_data.body,
send_result = self._send_email(
service, input_data.to, input_data.subject, input_data.body
)
yield "result", result
yield "result", send_result
def _send_email(self, service, to: str, subject: str, body: str) -> dict:
if not to or not subject or not body:
@@ -365,12 +357,12 @@ class GmailListLabelsBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
result = await asyncio.to_thread(self._list_labels, service)
yield "result", result
labels = self._list_labels(service)
yield "result", labels
def _list_labels(self, service) -> list[dict]:
results = service.users().labels().list(userId="me").execute()
@@ -426,13 +418,11 @@ class GmailAddLabelBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
result = await asyncio.to_thread(
self._add_label, service, input_data.message_id, input_data.label_name
)
result = self._add_label(service, input_data.message_id, input_data.label_name)
yield "result", result
def _add_label(self, service, message_id: str, label_name: str) -> dict:
@@ -511,12 +501,12 @@ class GmailRemoveLabelBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
result = await asyncio.to_thread(
self._remove_label, service, input_data.message_id, input_data.label_name
result = self._remove_label(
service, input_data.message_id, input_data.label_name
)
yield "result", result

View File

@@ -1,11 +1,8 @@
import asyncio
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.settings import AppEnvironment, Settings
from ._auth import (
GOOGLE_OAUTH_IS_CONFIGURED,
@@ -38,15 +35,13 @@ class GoogleSheetsReadBlock(Block):
)
def __init__(self):
settings = Settings()
super().__init__(
id="5724e902-3635-47e9-a108-aaa0263a4988",
description="This block reads data from a Google Sheets spreadsheet.",
categories={BlockCategory.DATA},
input_schema=GoogleSheetsReadBlock.Input,
output_schema=GoogleSheetsReadBlock.Output,
disabled=not GOOGLE_OAUTH_IS_CONFIGURED
or settings.config.app_env == AppEnvironment.PRODUCTION,
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
test_input={
"spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
"range": "Sheet1!A1:B2",
@@ -70,13 +65,11 @@ class GoogleSheetsReadBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = self._build_service(credentials, **kwargs)
data = await asyncio.to_thread(
self._read_sheet, service, input_data.spreadsheet_id, input_data.range
)
data = self._read_sheet(service, input_data.spreadsheet_id, input_data.range)
yield "result", data
@staticmethod
@@ -93,8 +86,8 @@ class GoogleSheetsReadBlock(Block):
else None
),
token_uri="https://oauth2.googleapis.com/token",
client_id=Settings().secrets.google_client_id,
client_secret=Settings().secrets.google_client_secret,
client_id=kwargs.get("client_id"),
client_secret=kwargs.get("client_secret"),
scopes=credentials.scopes,
)
return build("sheets", "v4", credentials=creds)
@@ -161,12 +154,11 @@ class GoogleSheetsWriteBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GoogleSheetsReadBlock._build_service(credentials, **kwargs)
result = await asyncio.to_thread(
self._write_sheet,
result = self._write_sheet(
service,
input_data.spreadsheet_id,
input_data.range,

View File

@@ -103,7 +103,7 @@ class GoogleMapsSearchBlock(Block):
test_credentials=TEST_CREDENTIALS,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
places = self.search_places(

View File

@@ -1,17 +1,14 @@
from typing import Any, Optional
from backend.util.request import Requests
from backend.util.request import requests
class GetRequest:
@classmethod
async def get_request(
def get_request(
cls, url: str, headers: Optional[dict] = None, json: bool = False
) -> Any:
if headers is None:
headers = {}
response = await Requests().get(url, headers=headers)
if json:
return response.json()
else:
return response.text()
response = requests.get(url, headers=headers)
return response.json() if json else response.text

View File

@@ -1,22 +1,10 @@
import json
import logging
from enum import Enum
from io import BytesIO
from pathlib import Path
import aiofiles
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.file import (
MediaFileType,
get_exec_file_path,
get_mime_type,
store_media_file,
)
from backend.util.request import Requests
logger = logging.getLogger(name=__name__)
from backend.util.request import requests
class HttpMethod(Enum):
@@ -41,131 +29,59 @@ class SendWebRequestBlock(Block):
)
headers: dict[str, str] = SchemaField(
description="The headers to include in the request",
default_factory=dict,
default={},
)
json_format: bool = SchemaField(
title="JSON format",
description="If true, send the body as JSON (unless files are also present).",
description="Whether to send and receive body as JSON",
default=True,
)
body: dict | None = SchemaField(
description="Form/JSON body payload. If files are supplied, this must be a mapping of formfields.",
body: Any = SchemaField(
description="The body of the request",
default=None,
)
files_name: str = SchemaField(
description="The name of the file field in the form data.",
default="file",
)
files: list[MediaFileType] = SchemaField(
description="Mapping of *form field name* → Image url / path / base64 url.",
default_factory=list,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
client_error: object = SchemaField(description="Errors on 4xx status codes")
server_error: object = SchemaField(description="Errors on 5xx status codes")
error: str = SchemaField(description="Errors for all other exceptions")
client_error: object = SchemaField(description="The error on 4xx status codes")
server_error: object = SchemaField(description="The error on 5xx status codes")
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="Make an HTTP request (JSON / form / multipart).",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
@staticmethod
async def _prepare_files(
graph_exec_id: str,
files_name: str,
files: list[MediaFileType],
) -> list[tuple[str, tuple[str, BytesIO, str]]]:
"""
Prepare files for the request by storing them and reading their content.
Returns a list of tuples in the format:
(files_name, (filename, BytesIO, mime_type))
"""
files_payload: list[tuple[str, tuple[str, BytesIO, str]]] = []
for media in files:
# Normalise to a list so we can repeat the same key
rel_path = await store_media_file(
graph_exec_id, media, return_content=False
)
abs_path = get_exec_file_path(graph_exec_id, rel_path)
async with aiofiles.open(abs_path, "rb") as f:
content = await f.read()
handle = BytesIO(content)
mime = get_mime_type(abs_path)
files_payload.append((files_name, (Path(abs_path).name, handle, mime)))
return files_payload
async def run(
self, input_data: Input, *, graph_exec_id: str, **kwargs
) -> BlockOutput:
# ─── Parse/normalise body ────────────────────────────────────
def run(self, input_data: Input, **kwargs) -> BlockOutput:
body = input_data.body
if isinstance(body, str):
try:
# Validate JSON string length to prevent DoS attacks
if len(body) > 10_000_000: # 10MB limit
raise ValueError("JSON body too large")
parsed_body = json.loads(body)
# Validate that parsed JSON is safe (basic object/array/primitive types)
if (
isinstance(parsed_body, (dict, list, str, int, float, bool))
or parsed_body is None
):
body = parsed_body
else:
# Unexpected type, treat as plain text
if input_data.json_format:
if isinstance(body, str):
try:
# Try to parse as JSON first
body = json.loads(body)
except json.JSONDecodeError:
# If it's not valid JSON and just plain text,
# we should send it as plain text instead
input_data.json_format = False
except (json.JSONDecodeError, ValueError):
# Invalid JSON or too large treat as formfield value instead
input_data.json_format = False
# ─── Prepare files (if any) ──────────────────────────────────
use_files = bool(input_data.files)
files_payload: list[tuple[str, tuple[str, BytesIO, str]]] = []
if use_files:
files_payload = await self._prepare_files(
graph_exec_id, input_data.files_name, input_data.files
)
# Enforce body format rules
if use_files and input_data.json_format:
raise ValueError(
"json_format=True cannot be combined with file uploads; set json_format=False and put form fields in `body`."
)
# ─── Execute request ─────────────────────────────────────────
response = await Requests().request(
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
files=files_payload if use_files else None,
# * If files → multipart ⇒ pass formfields via data=
json=body if input_data.json_format else None,
data=body if not input_data.json_format else None,
# * Else, choose JSON vs urlencoded based on flag
json=body if (input_data.json_format and not use_files) else None,
)
result = response.json() if input_data.json_format else response.text
# Decide how to parse the response
if response.headers.get("content-type", "").startswith("application/json"):
result = None if response.status == 204 else response.json()
else:
result = response.text()
# Yield according to status code bucket
if 200 <= response.status < 300:
if response.status_code // 100 == 2:
yield "response", result
elif 400 <= response.status < 500:
elif response.status_code // 100 == 4:
yield "client_error", result
else:
elif response.status_code // 100 == 5:
yield "server_error", result
else:
raise ValueError(f"Unexpected status code: {response.status_code}")

View File

@@ -5,7 +5,7 @@ from backend.blocks.hubspot._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class HubSpotCompanyBlock(Block):
@@ -15,8 +15,7 @@ class HubSpotCompanyBlock(Block):
description="Operation to perform (create, update, get)", default="get"
)
company_data: dict = SchemaField(
description="Company data for create/update operations",
default_factory=dict,
description="Company data for create/update operations", default={}
)
domain: str = SchemaField(
description="Company domain for get/update operations", default=""
@@ -35,7 +34,7 @@ class HubSpotCompanyBlock(Block):
output_schema=HubSpotCompanyBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
@@ -45,7 +44,7 @@ class HubSpotCompanyBlock(Block):
}
if input_data.operation == "create":
response = await Requests().post(
response = requests.post(
base_url, headers=headers, json={"properties": input_data.company_data}
)
result = response.json()
@@ -67,16 +66,14 @@ class HubSpotCompanyBlock(Block):
}
]
}
search_response = await Requests().post(
search_url, headers=headers, json=search_data
)
search_result = search_response.json()
yield "search_company", search_result.get("results", [{}])[0]
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "company", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
# First get company ID by domain
search_response = await Requests().post(
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
@@ -93,11 +90,10 @@ class HubSpotCompanyBlock(Block):
]
},
)
search_result = search_response.json()
company_id = search_result.get("results", [{}])[0].get("id")
company_id = search_response.json().get("results", [{}])[0].get("id")
if company_id:
response = await Requests().patch(
response = requests.patch(
f"{base_url}/{company_id}",
headers=headers,
json={"properties": input_data.company_data},

View File

@@ -5,7 +5,7 @@ from backend.blocks.hubspot._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class HubSpotContactBlock(Block):
@@ -15,8 +15,7 @@ class HubSpotContactBlock(Block):
description="Operation to perform (create, update, get)", default="get"
)
contact_data: dict = SchemaField(
description="Contact data for create/update operations",
default_factory=dict,
description="Contact data for create/update operations", default={}
)
email: str = SchemaField(
description="Email address for get/update operations", default=""
@@ -35,7 +34,7 @@ class HubSpotContactBlock(Block):
output_schema=HubSpotContactBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/contacts"
@@ -45,7 +44,7 @@ class HubSpotContactBlock(Block):
}
if input_data.operation == "create":
response = await Requests().post(
response = requests.post(
base_url, headers=headers, json={"properties": input_data.contact_data}
)
result = response.json()
@@ -53,6 +52,7 @@ class HubSpotContactBlock(Block):
yield "status", "created"
elif input_data.operation == "get":
# Search for contact by email
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
@@ -67,15 +67,13 @@ class HubSpotContactBlock(Block):
}
]
}
response = await Requests().post(
search_url, headers=headers, json=search_data
)
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "contact", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
search_response = await Requests().post(
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
@@ -92,11 +90,10 @@ class HubSpotContactBlock(Block):
]
},
)
search_result = search_response.json()
contact_id = search_result.get("results", [{}])[0].get("id")
contact_id = search_response.json().get("results", [{}])[0].get("id")
if contact_id:
response = await Requests().patch(
response = requests.patch(
f"{base_url}/{contact_id}",
headers=headers,
json={"properties": input_data.contact_data},

View File

@@ -7,7 +7,7 @@ from backend.blocks.hubspot._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class HubSpotEngagementBlock(Block):
@@ -19,7 +19,7 @@ class HubSpotEngagementBlock(Block):
)
email_data: dict = SchemaField(
description="Email data including recipient, subject, content",
default_factory=dict,
default={},
)
contact_id: str = SchemaField(
description="Contact ID for engagement tracking", default=""
@@ -27,6 +27,7 @@ class HubSpotEngagementBlock(Block):
timeframe_days: int = SchemaField(
description="Number of days to look back for engagement",
default=30,
optional=True,
)
class Output(BlockSchema):
@@ -42,7 +43,7 @@ class HubSpotEngagementBlock(Block):
output_schema=HubSpotEngagementBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com"
@@ -66,9 +67,7 @@ class HubSpotEngagementBlock(Block):
}
}
response = await Requests().post(
email_url, headers=headers, json=email_data
)
response = requests.post(email_url, headers=headers, json=email_data)
result = response.json()
yield "result", result
yield "status", "email_sent"
@@ -82,9 +81,7 @@ class HubSpotEngagementBlock(Block):
params = {"limit": 100, "after": from_date.isoformat()}
response = await Requests().get(
engagement_url, headers=headers, params=params
)
response = requests.get(engagement_url, headers=headers, params=params)
engagements = response.json()
# Process engagement metrics

View File

@@ -12,7 +12,7 @@ from backend.data.model import (
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.request import Requests
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
@@ -142,16 +142,6 @@ class IdeogramModelBlock(Block):
title="Color Palette Preset",
advanced=True,
)
custom_color_palette: Optional[list[str]] = SchemaField(
description=(
"Only available for model version V_2 or V_2_TURBO. Provide one or more color hex codes "
"(e.g., ['#000030', '#1C0C47', '#9900FF', '#4285F4', '#FFFFFF']) to define a custom color "
"palette. Only used if 'color_palette_name' is 'NONE'."
),
default=None,
title="Custom Color Palette",
advanced=True,
)
class Output(BlockSchema):
result: str = SchemaField(description="Generated image URL")
@@ -161,7 +151,7 @@ class IdeogramModelBlock(Block):
super().__init__(
id="6ab085e2-20b3-4055-bc3e-08036e01eca6",
description="This block runs Ideogram models with both simple and advanced settings.",
categories={BlockCategory.AI, BlockCategory.MULTIMEDIA},
categories={BlockCategory.AI},
input_schema=IdeogramModelBlock.Input,
output_schema=IdeogramModelBlock.Output,
test_input={
@@ -174,13 +164,6 @@ class IdeogramModelBlock(Block):
"style_type": StyleType.AUTO,
"negative_prompt": None,
"color_palette_name": ColorPalettePreset.NONE,
"custom_color_palette": [
"#000030",
"#1C0C47",
"#9900FF",
"#4285F4",
"#FFFFFF",
],
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[
@@ -190,19 +173,19 @@ class IdeogramModelBlock(Block):
),
],
test_mock={
"run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name, custom_colors: "https://ideogram.ai/api/images/test-generated-image-url.png",
"run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name: "https://ideogram.ai/api/images/test-generated-image-url.png",
"upscale_image": lambda api_key, image_url: "https://ideogram.ai/api/images/test-upscaled-image-url.png",
},
test_credentials=TEST_CREDENTIALS,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
seed = input_data.seed
# Step 1: Generate the image
result = await self.run_model(
result = self.run_model(
api_key=credentials.api_key,
model_name=input_data.ideogram_model_name.value,
prompt=input_data.prompt,
@@ -212,19 +195,18 @@ class IdeogramModelBlock(Block):
style_type=input_data.style_type.value,
negative_prompt=input_data.negative_prompt,
color_palette_name=input_data.color_palette_name.value,
custom_colors=input_data.custom_color_palette,
)
# Step 2: Upscale the image if requested
if input_data.upscale == UpscaleOption.AI_UPSCALE:
result = await self.upscale_image(
result = self.upscale_image(
api_key=credentials.api_key,
image_url=result,
)
yield "result", result
async def run_model(
def run_model(
self,
api_key: SecretStr,
model_name: str,
@@ -235,7 +217,6 @@ class IdeogramModelBlock(Block):
style_type: str,
negative_prompt: Optional[str],
color_palette_name: str,
custom_colors: Optional[list[str]],
):
url = "https://api.ideogram.ai/generate"
headers = {
@@ -260,19 +241,15 @@ class IdeogramModelBlock(Block):
data["image_request"]["negative_prompt"] = negative_prompt
if color_palette_name != "NONE":
data["color_palette"] = {"name": color_palette_name}
elif custom_colors:
data["color_palette"] = {
"members": [{"color_hex": color} for color in custom_colors]
}
data["image_request"]["color_palette"] = {"name": color_palette_name}
try:
response = await Requests().post(url, headers=headers, json=data)
response = requests.post(url, json=data, headers=headers)
return response.json()["data"][0]["url"]
except RequestException as e:
raise Exception(f"Failed to fetch image: {str(e)}")
async def upscale_image(self, api_key: SecretStr, image_url: str):
def upscale_image(self, api_key: SecretStr, image_url: str):
url = "https://api.ideogram.ai/upscale"
headers = {
"Api-Key": api_key.get_secret_value(),
@@ -280,22 +257,23 @@ class IdeogramModelBlock(Block):
try:
# Step 1: Download the image from the provided URL
response = await Requests().get(image_url)
image_content = response.content
image_response = requests.get(image_url)
# Step 2: Send the downloaded image to the upscale API
files = {
"image_file": ("image.png", image_content, "image/png"),
"image_file": ("image.png", image_response.content, "image/png"),
}
response = await Requests().post(
response = requests.post(
url,
headers=headers,
data={"image_request": "{}"},
data={
"image_request": "{}", # Empty JSON object
},
files=files,
)
return (response.json())["data"][0]["url"]
return response.json()["data"][0]["url"]
except RequestException as e:
raise Exception(f"Failed to upscale image: {str(e)}")

View File

@@ -1,556 +0,0 @@
import copy
from datetime import date, time
from typing import Any, Optional
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
from backend.data.model import SchemaField
from backend.util.file import store_media_file
from backend.util.mock import MockObject
from backend.util.settings import Config
from backend.util.text import TextFormatter
from backend.util.type import LongTextType, MediaFileType, ShortTextType
formatter = TextFormatter()
config = Config()
class AgentInputBlock(Block):
"""
This block is used to provide input to the graph.
It takes in a value, name, description, default values list and bool to limit selection to default values.
It Outputs the value passed as input.
"""
class Input(BlockSchema):
name: str = SchemaField(description="The name of the input.")
value: Any = SchemaField(
description="The value to be passed as input.",
default=None,
)
title: str | None = SchemaField(
description="The title of the input.", default=None, advanced=True
)
description: str | None = SchemaField(
description="The description of the input.",
default=None,
advanced=True,
)
placeholder_values: list = SchemaField(
description="The placeholder values to be passed as input.",
default_factory=list,
advanced=True,
hidden=True,
)
advanced: bool = SchemaField(
description="Whether to show the input in the advanced section, if the field is not required.",
default=False,
advanced=True,
)
secret: bool = SchemaField(
description="Whether the input should be treated as a secret.",
default=False,
advanced=True,
)
def generate_schema(self):
schema = copy.deepcopy(self.get_field_schema("value"))
if possible_values := self.placeholder_values:
schema["enum"] = possible_values
return schema
class Output(BlockSchema):
result: Any = SchemaField(description="The value passed as input.")
def __init__(self, **kwargs):
super().__init__(
**{
"id": "c0a8e994-ebf1-4a9c-a4d8-89d09c86741b",
"description": "Base block for user inputs.",
"input_schema": AgentInputBlock.Input,
"output_schema": AgentInputBlock.Output,
"test_input": [
{
"value": "Hello, World!",
"name": "input_1",
"description": "Example test input.",
"placeholder_values": [],
},
{
"value": "Hello, World!",
"name": "input_2",
"description": "Example test input with placeholders.",
"placeholder_values": ["Hello, World!"],
},
],
"test_output": [
("result", "Hello, World!"),
("result", "Hello, World!"),
],
"categories": {BlockCategory.INPUT, BlockCategory.BASIC},
"block_type": BlockType.INPUT,
"static_output": True,
**kwargs,
}
)
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
if input_data.value is not None:
yield "result", input_data.value
class AgentOutputBlock(Block):
"""
Records the output of the graph for users to see.
Behavior:
If `format` is provided and the `value` is of a type that can be formatted,
the block attempts to format the recorded_value using the `format`.
If formatting fails or no `format` is provided, the raw `value` is output.
"""
class Input(BlockSchema):
value: Any = SchemaField(
description="The value to be recorded as output.",
default=None,
advanced=False,
)
name: str = SchemaField(description="The name of the output.")
title: str | None = SchemaField(
description="The title of the output.",
default=None,
advanced=True,
)
description: str | None = SchemaField(
description="The description of the output.",
default=None,
advanced=True,
)
format: str = SchemaField(
description="The format string to be used to format the recorded_value. Use Jinja2 syntax.",
default="",
advanced=True,
)
advanced: bool = SchemaField(
description="Whether to treat the output as advanced.",
default=False,
advanced=True,
)
secret: bool = SchemaField(
description="Whether the output should be treated as a secret.",
default=False,
advanced=True,
)
def generate_schema(self):
return self.get_field_schema("value")
class Output(BlockSchema):
output: Any = SchemaField(description="The value recorded as output.")
name: Any = SchemaField(description="The name of the value recorded as output.")
def __init__(self):
super().__init__(
id="363ae599-353e-4804-937e-b2ee3cef3da4",
description="Stores the output of the graph for users to see.",
input_schema=AgentOutputBlock.Input,
output_schema=AgentOutputBlock.Output,
test_input=[
{
"value": "Hello, World!",
"name": "output_1",
"description": "This is a test output.",
"format": "{{ output_1 }}!!",
},
{
"value": "42",
"name": "output_2",
"description": "This is another test output.",
"format": "{{ output_2 }}",
},
{
"value": MockObject(value="!!", key="key"),
"name": "output_3",
"description": "This is a test output with a mock object.",
"format": "{{ output_3 }}",
},
],
test_output=[
("output", "Hello, World!!!"),
("output", "42"),
("output", MockObject(value="!!", key="key")),
],
categories={BlockCategory.OUTPUT, BlockCategory.BASIC},
block_type=BlockType.OUTPUT,
static_output=True,
)
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
"""
Attempts to format the recorded_value using the fmt_string if provided.
If formatting fails or no fmt_string is given, returns the original recorded_value.
"""
if input_data.format:
try:
yield "output", formatter.format_string(
input_data.format, {input_data.name: input_data.value}
)
except Exception as e:
yield "output", f"Error: {e}, {input_data.value}"
else:
yield "output", input_data.value
yield "name", input_data.name
class AgentShortTextInputBlock(AgentInputBlock):
class Input(AgentInputBlock.Input):
value: Optional[ShortTextType] = SchemaField(
description="Short text input.",
default=None,
advanced=False,
title="Default Value",
)
class Output(AgentInputBlock.Output):
result: str = SchemaField(description="Short text result.")
def __init__(self):
super().__init__(
id="7fcd3bcb-8e1b-4e69-903d-32d3d4a92158",
description="Block for short text input (single-line).",
disabled=not config.enable_agent_input_subtype_blocks,
input_schema=AgentShortTextInputBlock.Input,
output_schema=AgentShortTextInputBlock.Output,
test_input=[
{
"value": "Hello",
"name": "short_text_1",
"description": "Short text example 1",
"placeholder_values": [],
},
{
"value": "Quick test",
"name": "short_text_2",
"description": "Short text example 2",
"placeholder_values": ["Quick test", "Another option"],
},
],
test_output=[
("result", "Hello"),
("result", "Quick test"),
],
)
class AgentLongTextInputBlock(AgentInputBlock):
class Input(AgentInputBlock.Input):
value: Optional[LongTextType] = SchemaField(
description="Long text input (potentially multi-line).",
default=None,
advanced=False,
title="Default Value",
)
class Output(AgentInputBlock.Output):
result: str = SchemaField(description="Long text result.")
def __init__(self):
super().__init__(
id="90a56ffb-7024-4b2b-ab50-e26c5e5ab8ba",
description="Block for long text input (multi-line).",
disabled=not config.enable_agent_input_subtype_blocks,
input_schema=AgentLongTextInputBlock.Input,
output_schema=AgentLongTextInputBlock.Output,
test_input=[
{
"value": "Lorem ipsum dolor sit amet...",
"name": "long_text_1",
"description": "Long text example 1",
"placeholder_values": [],
},
{
"value": "Another multiline text input.",
"name": "long_text_2",
"description": "Long text example 2",
"placeholder_values": ["Another multiline text input."],
},
],
test_output=[
("result", "Lorem ipsum dolor sit amet..."),
("result", "Another multiline text input."),
],
)
class AgentNumberInputBlock(AgentInputBlock):
class Input(AgentInputBlock.Input):
value: Optional[int] = SchemaField(
description="Number input.",
default=None,
advanced=False,
title="Default Value",
)
class Output(AgentInputBlock.Output):
result: int = SchemaField(description="Number result.")
def __init__(self):
super().__init__(
id="96dae2bb-97a2-41c2-bd2f-13a3b5a8ea98",
description="Block for number input.",
disabled=not config.enable_agent_input_subtype_blocks,
input_schema=AgentNumberInputBlock.Input,
output_schema=AgentNumberInputBlock.Output,
test_input=[
{
"value": 42,
"name": "number_input_1",
"description": "Number example 1",
"placeholder_values": [],
},
{
"value": 314,
"name": "number_input_2",
"description": "Number example 2",
"placeholder_values": [314, 2718],
},
],
test_output=[
("result", 42),
("result", 314),
],
)
class AgentDateInputBlock(AgentInputBlock):
class Input(AgentInputBlock.Input):
value: Optional[date] = SchemaField(
description="Date input (YYYY-MM-DD).",
default=None,
advanced=False,
title="Default Value",
)
class Output(AgentInputBlock.Output):
result: date = SchemaField(description="Date result.")
def __init__(self):
super().__init__(
id="7e198b09-4994-47db-8b4d-952d98241817",
description="Block for date input.",
disabled=not config.enable_agent_input_subtype_blocks,
input_schema=AgentDateInputBlock.Input,
output_schema=AgentDateInputBlock.Output,
test_input=[
{
# If your system can parse JSON date strings to date objects
"value": str(date(2025, 3, 19)),
"name": "date_input_1",
"description": "Example date input 1",
},
{
"value": str(date(2023, 12, 31)),
"name": "date_input_2",
"description": "Example date input 2",
},
],
test_output=[
("result", date(2025, 3, 19)),
("result", date(2023, 12, 31)),
],
)
class AgentTimeInputBlock(AgentInputBlock):
class Input(AgentInputBlock.Input):
value: Optional[time] = SchemaField(
description="Time input (HH:MM:SS).",
default=None,
advanced=False,
title="Default Value",
)
class Output(AgentInputBlock.Output):
result: time = SchemaField(description="Time result.")
def __init__(self):
super().__init__(
id="2a1c757e-86cf-4c7e-aacf-060dc382e434",
description="Block for time input.",
disabled=not config.enable_agent_input_subtype_blocks,
input_schema=AgentTimeInputBlock.Input,
output_schema=AgentTimeInputBlock.Output,
test_input=[
{
"value": str(time(9, 30, 0)),
"name": "time_input_1",
"description": "Time example 1",
},
{
"value": str(time(23, 59, 59)),
"name": "time_input_2",
"description": "Time example 2",
},
],
test_output=[
("result", time(9, 30, 0)),
("result", time(23, 59, 59)),
],
)
class AgentFileInputBlock(AgentInputBlock):
"""
A simplified file-upload block. In real usage, you might have a custom
file type or handle binary data. Here, we'll store a string path as the example.
"""
class Input(AgentInputBlock.Input):
value: Optional[MediaFileType] = SchemaField(
description="Path or reference to an uploaded file.",
default=None,
advanced=False,
title="Default Value",
)
class Output(AgentInputBlock.Output):
result: str = SchemaField(description="File reference/path result.")
def __init__(self):
super().__init__(
id="95ead23f-8283-4654-aef3-10c053b74a31",
description="Block for file upload input (string path for example).",
disabled=not config.enable_agent_input_subtype_blocks,
input_schema=AgentFileInputBlock.Input,
output_schema=AgentFileInputBlock.Output,
test_input=[
{
"value": "data:image/png;base64,MQ==",
"name": "file_upload_1",
"description": "Example file upload 1",
},
],
test_output=[
("result", str),
],
)
async def run(
self,
input_data: Input,
*,
graph_exec_id: str,
**kwargs,
) -> BlockOutput:
if not input_data.value:
return
file_path = await store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.value,
return_content=False,
)
yield "result", file_path
class AgentDropdownInputBlock(AgentInputBlock):
"""
A specialized text input block that relies on placeholder_values to present a dropdown.
"""
class Input(AgentInputBlock.Input):
value: Optional[str] = SchemaField(
description="Text selected from a dropdown.",
default=None,
advanced=False,
title="Default Value",
)
placeholder_values: list = SchemaField(
description="Possible values for the dropdown.",
default_factory=list,
advanced=False,
title="Dropdown Options",
)
class Output(AgentInputBlock.Output):
result: str = SchemaField(description="Selected dropdown value.")
def __init__(self):
super().__init__(
id="655d6fdf-a334-421c-b733-520549c07cd1",
description="Block for dropdown text selection.",
disabled=not config.enable_agent_input_subtype_blocks,
input_schema=AgentDropdownInputBlock.Input,
output_schema=AgentDropdownInputBlock.Output,
test_input=[
{
"value": "Option A",
"name": "dropdown_1",
"placeholder_values": ["Option A", "Option B", "Option C"],
"description": "Dropdown example 1",
},
{
"value": "Option C",
"name": "dropdown_2",
"placeholder_values": ["Option A", "Option B", "Option C"],
"description": "Dropdown example 2",
},
],
test_output=[
("result", "Option A"),
("result", "Option C"),
],
)
class AgentToggleInputBlock(AgentInputBlock):
class Input(AgentInputBlock.Input):
value: bool = SchemaField(
description="Boolean toggle input.",
default=False,
advanced=False,
title="Default Value",
)
class Output(AgentInputBlock.Output):
result: bool = SchemaField(description="Boolean toggle result.")
def __init__(self):
super().__init__(
id="cbf36ab5-df4a-43b6-8a7f-f7ed8652116e",
description="Block for boolean toggle input.",
disabled=not config.enable_agent_input_subtype_blocks,
input_schema=AgentToggleInputBlock.Input,
output_schema=AgentToggleInputBlock.Output,
test_input=[
{
"value": True,
"name": "toggle_1",
"description": "Toggle example 1",
},
{
"value": False,
"name": "toggle_2",
"description": "Toggle example 2",
},
],
test_output=[
("result", True),
("result", False),
],
)
IO_BLOCK_IDs = [
AgentInputBlock().id,
AgentOutputBlock().id,
AgentShortTextInputBlock().id,
AgentLongTextInputBlock().id,
AgentNumberInputBlock().id,
AgentDateInputBlock().id,
AgentTimeInputBlock().id,
AgentFileInputBlock().id,
AgentDropdownInputBlock().id,
AgentToggleInputBlock().id,
]

View File

@@ -11,13 +11,13 @@ class StepThroughItemsBlock(Block):
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default_factory=list,
default=[],
)
items_object: dict = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default_factory=dict,
default={},
)
items_str: str = SchemaField(
advanced=False,
@@ -53,7 +53,7 @@ class StepThroughItemsBlock(Block):
test_mock={},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
for data in [input_data.items, input_data.items_object, input_data.items_str]:
if not data:
continue

View File

@@ -5,7 +5,7 @@ from backend.blocks.jina._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class JinaChunkingBlock(Block):
@@ -23,7 +23,7 @@ class JinaChunkingBlock(Block):
class Output(BlockSchema):
chunks: list = SchemaField(description="List of chunked texts")
tokens: list = SchemaField(
description="List of token information for each chunk",
description="List of token information for each chunk", optional=True
)
def __init__(self):
@@ -35,7 +35,7 @@ class JinaChunkingBlock(Block):
output_schema=JinaChunkingBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
@@ -55,7 +55,7 @@ class JinaChunkingBlock(Block):
"max_chunk_length": str(input_data.max_chunk_length),
}
response = await Requests().post(url, headers=headers, json=data)
response = requests.post(url, headers=headers, json=data)
result = response.json()
all_chunks.extend(result.get("chunks", []))

View File

@@ -5,7 +5,7 @@ from backend.blocks.jina._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class JinaEmbeddingBlock(Block):
@@ -29,7 +29,7 @@ class JinaEmbeddingBlock(Block):
output_schema=JinaEmbeddingBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.jina.ai/v1/embeddings"
@@ -38,6 +38,6 @@ class JinaEmbeddingBlock(Block):
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
data = {"input": input_data.texts, "model": input_data.model}
response = await Requests().post(url, headers=headers, json=data)
response = requests.post(url, headers=headers, json=data)
embeddings = [e["embedding"] for e in response.json()["data"]]
yield "embeddings", embeddings

View File

@@ -1,5 +1,7 @@
from urllib.parse import quote
import requests
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
@@ -7,7 +9,6 @@ from backend.blocks.jina._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class FactCheckerBlock(Block):
@@ -34,7 +35,7 @@ class FactCheckerBlock(Block):
output_schema=FactCheckerBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
encoded_statement = quote(input_data.statement)
@@ -45,7 +46,8 @@ class FactCheckerBlock(Block):
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
response = await Requests().get(url, headers=headers)
response = requests.get(url, headers=headers)
response.raise_for_status()
data = response.json()
if "data" in data:

View File

@@ -1,4 +1,4 @@
from urllib.parse import quote
from groq._utils._utils import quote
from backend.blocks.jina._auth import (
TEST_CREDENTIALS,
@@ -39,7 +39,7 @@ class SearchTheWebBlock(Block, GetRequest):
test_mock={"get_request": lambda *args, **kwargs: "search content"},
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
# Encode the search query
@@ -51,7 +51,7 @@ class SearchTheWebBlock(Block, GetRequest):
# Prepend the Jina Search URL to the encoded query
jina_search_url = f"https://s.jina.ai/{encoded_query}"
results = await self.get_request(jina_search_url, headers=headers, json=False)
results = self.get_request(jina_search_url, headers=headers, json=False)
# Output the search results
yield "results", results
@@ -90,7 +90,7 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
if input_data.raw_content:
@@ -103,5 +103,5 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
content = await self.get_request(url, json=False, headers=headers)
content = self.get_request(url, json=False, headers=headers)
yield "content", content

View File

@@ -1,273 +0,0 @@
from __future__ import annotations
import json
from typing import Any, Dict, Optional
from backend.blocks.linear._auth import LinearCredentials
from backend.blocks.linear.models import (
CreateCommentResponse,
CreateIssueResponse,
Issue,
Project,
)
from backend.util.request import Requests
class LinearAPIException(Exception):
def __init__(self, message: str, status_code: int):
super().__init__(message)
self.status_code = status_code
class LinearClient:
"""Client for the Linear API
If you're looking for the schema: https://studio.apollographql.com/public/Linear-API/variant/current/schema
"""
API_URL = "https://api.linear.app/graphql"
def __init__(
self,
credentials: LinearCredentials | None = None,
custom_requests: Optional[Requests] = None,
):
if custom_requests:
self._requests = custom_requests
else:
headers: Dict[str, str] = {
"Content-Type": "application/json",
}
if credentials:
headers["Authorization"] = credentials.auth_header()
self._requests = Requests(
extra_headers=headers,
trusted_origins=["https://api.linear.app"],
raise_for_status=False,
)
async def _execute_graphql_request(
self, query: str, variables: dict | None = None
) -> Any:
"""
Executes a GraphQL request against the Linear API and returns the response data.
Args:
query: The GraphQL query string.
variables (optional): Any GraphQL query variables
Returns:
The parsed JSON response data, or raises a LinearAPIException on error.
"""
payload: Dict[str, Any] = {"query": query}
if variables:
payload["variables"] = variables
response = await self._requests.post(self.API_URL, json=payload)
if not response.ok:
try:
error_data = response.json()
error_message = error_data.get("errors", [{}])[0].get("message", "")
except json.JSONDecodeError:
error_message = response.text()
raise LinearAPIException(
f"Linear API request failed ({response.status}): {error_message}",
response.status,
)
response_data = response.json()
if "errors" in response_data:
error_messages = [
error.get("message", "") for error in response_data["errors"]
]
raise LinearAPIException(
f"Linear API returned errors: {', '.join(error_messages)}",
response.status,
)
return response_data["data"]
async def query(self, query: str, variables: Optional[dict] = None) -> dict:
"""Executes a GraphQL query.
Args:
query: The GraphQL query string.
variables: Query variables, if any.
Returns:
The response data.
"""
return await self._execute_graphql_request(query, variables)
async def mutate(self, mutation: str, variables: Optional[dict] = None) -> dict:
"""Executes a GraphQL mutation.
Args:
mutation: The GraphQL mutation string.
variables: Query variables, if any.
Returns:
The response data.
"""
return await self._execute_graphql_request(mutation, variables)
async def try_create_comment(
self, issue_id: str, comment: str
) -> CreateCommentResponse:
try:
mutation = """
mutation CommentCreate($input: CommentCreateInput!) {
commentCreate(input: $input) {
success
comment {
id
body
}
}
}
"""
variables = {
"input": {
"body": comment,
"issueId": issue_id,
}
}
added_comment = await self.mutate(mutation, variables)
# Select the commentCreate field from the mutation response
return CreateCommentResponse(**added_comment["commentCreate"])
except LinearAPIException as e:
raise e
async def try_get_team_by_name(self, team_name: str) -> str:
try:
query = """
query GetTeamId($searchTerm: String!) {
teams(filter: {
or: [
{ name: { eqIgnoreCase: $searchTerm } },
{ key: { eqIgnoreCase: $searchTerm } }
]
}) {
nodes {
id
name
key
}
}
}
"""
variables: dict[str, Any] = {
"searchTerm": team_name,
}
team_id = await self.query(query, variables)
return team_id["teams"]["nodes"][0]["id"]
except LinearAPIException as e:
raise e
async def try_create_issue(
self,
team_id: str,
title: str,
description: str | None = None,
priority: int | None = None,
project_id: str | None = None,
) -> CreateIssueResponse:
try:
mutation = """
mutation IssueCreate($input: IssueCreateInput!) {
issueCreate(input: $input) {
issue {
title
description
id
identifier
priority
}
}
}
"""
variables: dict[str, Any] = {
"input": {
"teamId": team_id,
"title": title,
}
}
if project_id:
variables["input"]["projectId"] = project_id
if description:
variables["input"]["description"] = description
if priority:
variables["input"]["priority"] = priority
added_issue = await self.mutate(mutation, variables)
return CreateIssueResponse(**added_issue["issueCreate"])
except LinearAPIException as e:
raise e
async def try_search_projects(self, term: str) -> list[Project]:
try:
query = """
query SearchProjects($term: String!, $includeComments: Boolean!) {
searchProjects(term: $term, includeComments: $includeComments) {
nodes {
id
name
description
priority
progress
content
}
}
}
"""
variables: dict[str, Any] = {
"term": term,
"includeComments": True,
}
projects = await self.query(query, variables)
return [
Project(**project) for project in projects["searchProjects"]["nodes"]
]
except LinearAPIException as e:
raise e
async def try_search_issues(self, term: str) -> list[Issue]:
try:
query = """
query SearchIssues($term: String!, $includeComments: Boolean!) {
searchIssues(term: $term, includeComments: $includeComments) {
nodes {
id
identifier
title
description
priority
}
}
}
"""
variables: dict[str, Any] = {
"term": term,
"includeComments": True,
}
issues = await self.query(query, variables)
return [Issue(**issue) for issue in issues["searchIssues"]["nodes"]]
except LinearAPIException as e:
raise e

View File

@@ -1,101 +0,0 @@
from enum import Enum
from typing import Literal
from pydantic import SecretStr
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
OAuth2Credentials,
)
from backend.integrations.providers import ProviderName
from backend.util.settings import Secrets
secrets = Secrets()
LINEAR_OAUTH_IS_CONFIGURED = bool(
secrets.linear_client_id and secrets.linear_client_secret
)
LinearCredentials = OAuth2Credentials | APIKeyCredentials
# LinearCredentialsInput = CredentialsMetaInput[
# Literal[ProviderName.LINEAR],
# Literal["oauth2", "api_key"] if LINEAR_OAUTH_IS_CONFIGURED else Literal["oauth2"],
# ]
LinearCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.LINEAR], Literal["oauth2"]
]
# (required) Comma separated list of scopes:
# read - (Default) Read access for the user's account. This scope will always be present.
# write - Write access for the user's account. If your application only needs to create comments, use a more targeted scope
# issues:create - Allows creating new issues and their attachments
# comments:create - Allows creating new issue comments
# timeSchedule:write - Allows creating and modifying time schedules
# admin - Full access to admin level endpoints. You should never ask for this permission unless it's absolutely needed
class LinearScope(str, Enum):
READ = "read"
WRITE = "write"
ISSUES_CREATE = "issues:create"
COMMENTS_CREATE = "comments:create"
TIME_SCHEDULE_WRITE = "timeSchedule:write"
ADMIN = "admin"
def LinearCredentialsField(scopes: list[LinearScope]) -> LinearCredentialsInput:
"""
Creates a Linear credentials input on a block.
Params:
scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes))
""" # noqa
return CredentialsField(
required_scopes=set([LinearScope.READ.value]).union(
set([scope.value for scope in scopes])
),
description="The Linear integration can be used with OAuth, "
"or any API key with sufficient permissions for the blocks it is used on.",
)
TEST_CREDENTIALS_OAUTH = OAuth2Credentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="linear",
title="Mock Linear API key",
username="mock-linear-username",
access_token=SecretStr("mock-linear-access-token"),
access_token_expires_at=None,
refresh_token=SecretStr("mock-linear-refresh-token"),
refresh_token_expires_at=None,
scopes=["mock-linear-scopes"],
)
TEST_CREDENTIALS_API_KEY = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="linear",
title="Mock Linear API key",
api_key=SecretStr("mock-linear-api-key"),
expires_at=None,
)
TEST_CREDENTIALS_INPUT_OAUTH = {
"provider": TEST_CREDENTIALS_OAUTH.provider,
"id": TEST_CREDENTIALS_OAUTH.id,
"type": TEST_CREDENTIALS_OAUTH.type,
"title": TEST_CREDENTIALS_OAUTH.type,
}
TEST_CREDENTIALS_INPUT_API_KEY = {
"provider": TEST_CREDENTIALS_API_KEY.provider,
"id": TEST_CREDENTIALS_API_KEY.id,
"type": TEST_CREDENTIALS_API_KEY.type,
"title": TEST_CREDENTIALS_API_KEY.type,
}

Some files were not shown because too many files have changed in this diff Show More