Compare commits

..

61 Commits

Author SHA1 Message Date
Abhimanyu Yadav
54bbafc431 Merge branch 'dev' into ci-chromatic 2025-04-22 20:26:42 +05:30
Abhimanyu Yadav
5662783624 Merge branch 'dev' into ci-chromatic 2025-04-22 10:29:24 +05:30
Nicholas Tindle
a5f448af98 Merge branch 'dev' into ci-chromatic 2025-03-06 11:24:49 -06:00
Nicholas Tindle
c766bd66e1 fix(frontend): typechecking 2025-02-05 17:15:24 -06:00
Nicholas Tindle
6d11ad8051 fix(frontend): format 2025-02-05 17:12:33 -06:00
Nicholas Tindle
d476983bd2 fix: doesn't crash 2025-02-05 17:10:21 -06:00
Nicholas Tindle
3ac1ce5a3f fix: format 2025-02-05 16:50:51 -06:00
Nicholas Tindle
3b89e6d2b7 Merge branch 'ci-chromatic' of https://github.com/Significant-Gravitas/AutoGPT into ci-chromatic 2025-02-05 16:49:32 -06:00
Nicholas Tindle
c7a7652b9f Merge branch 'dev' into ci-chromatic 2025-02-05 16:47:46 -06:00
Nicholas Tindle
b6b0d0b209 Merge branch 'dev' into ci-chromatic 2025-02-03 11:55:31 -06:00
Nicholas Tindle
a5b1495062 Merge branch 'dev' into ci-chromatic 2025-02-03 07:13:53 -06:00
Nicholas Tindle
026f16c10f Merge branch 'dev' into ci-chromatic 2025-01-31 04:50:48 -06:00
Nicholas Tindle
c468201c53 Update mock_client.ts 2025-01-29 07:10:03 -06:00
Nicholas Tindle
5beb581d1c feat(frontend): minimocking 2025-01-29 07:04:38 -06:00
Nicholas Tindle
df2339c1cf feat: add mock backend for rendering the storybook stuff 2025-01-29 06:46:50 -06:00
Nicholas Tindle
327db54321 Merge branch 'open-2047-add-type-checking-step-to-front-end-ci' into ci-chromatic 2025-01-29 12:26:19 +00:00
Nicholas Tindle
234d6f78ba Merge branch 'dev' into open-2047-add-type-checking-step-to-front-end-ci 2025-01-29 12:17:23 +00:00
Nicholas Tindle
43088ddff8 fix: incorrect meshing of types and test 2025-01-29 06:16:28 -06:00
Nicholas Tindle
fd955fba25 ref: add providers to the story previews 2025-01-29 05:35:56 -06:00
Nicholas Tindle
83943d9ddb Merge branch 'open-2047-add-type-checking-step-to-front-end-ci' into ci-chromatic 2025-01-29 10:57:00 +00:00
Nicholas Tindle
60c26e62f6 Merge branch 'dev' into open-2047-add-type-checking-step-to-front-end-ci 2025-01-29 10:53:51 +00:00
Nicholas Tindle
1fc8f9ba66 fix: handle conditions better for feature flagging 2025-01-28 18:04:41 +00:00
Nicholas Tindle
33d747f457 ref: remove unused code 2025-01-28 16:39:11 +00:00
Nicholas Tindle
06fa001a37 ref: use data structure for copy and paste data 2025-01-28 16:36:07 +00:00
Nicholas Tindle
4e7b56b814 ref: pr changes 2025-01-28 16:31:25 +00:00
Nicholas Tindle
d6b03a4f18 ref: pr change request 2025-01-28 16:30:13 +00:00
Nicholas Tindle
fae9aeb49a fix: linting 2025-01-28 16:30:05 +00:00
Nicholas Tindle
5e8c1e274e fix: linting 2025-01-28 16:29:58 +00:00
Nicholas Tindle
55f7dc4853 fix: drop classname unused 2025-01-28 16:25:23 +00:00
Nicholas Tindle
b317adb9cf ref: remove classname from navbar link 2025-01-28 16:23:14 +00:00
Nicholas Tindle
c873ba04b8 ref: split out type-check step + fix tsc error 2025-01-28 15:38:05 +00:00
Nicholas Tindle
00f0311dd0 ref: split out type-check step + fix tsc error 2025-01-28 15:31:52 +00:00
Nicholas Tindle
9b2bd756fa Update platform-frontend-ci.yml 2025-01-28 15:17:42 +00:00
Nicholas Tindle
bceb83ca30 fix: workingdir required 2025-01-28 15:17:42 +00:00
Nicholas Tindle
eadbfcd920 Update platform-frontend-ci.yml 2025-01-28 15:17:41 +00:00
SwiftyOS
9768540b60 Merge branch 'dev' into open-2047-add-type-checking-step-to-front-end-ci 2025-01-28 15:46:21 +01:00
Nicholas Tindle
697436be07 Merge branch 'dev' into open-2047-add-type-checking-step-to-front-end-ci 2025-01-28 07:53:27 +00:00
Nicholas Tindle
d725e105a0 Merge branch 'dev' into open-2047-add-type-checking-step-to-front-end-ci 2025-01-26 15:27:50 +01:00
Nicholas Tindle
927f43f52f fix: formatting 2025-01-26 12:18:11 +00:00
Nicholas Tindle
eedcc92d6f fix: add secret to all the subschemas 2025-01-26 12:15:37 +00:00
Nicholas Tindle
f0c378c70d fix: missing type addition 2025-01-26 12:12:12 +00:00
Nicholas Tindle
c6c2b852df fix: missing inputs based on changes 2025-01-26 12:11:37 +00:00
Nicholas Tindle
aaab8b1e0e fix: more formatting 2025-01-26 11:56:07 +00:00
Nicholas Tindle
a4eeb4535a fix: formatting 2025-01-26 11:55:56 +00:00
Nicholas Tindle
db068c598c fix: missing types 2025-01-26 11:46:35 +00:00
Nicholas Tindle
d4d9efc73e fix: missing attribute 2025-01-26 11:46:25 +00:00
Nicholas Tindle
ffaf77df4e fix: type the params 2025-01-26 11:46:10 +00:00
Nicholas Tindle
2daf08434e fix: type the params 2025-01-26 11:46:01 +00:00
Nicholas Tindle
745137f4c2 fix: pass correct subclass 2025-01-26 11:45:46 +00:00
Nicholas Tindle
3a2c3deb0e fix: remove import + impossible case 2025-01-26 11:45:34 +00:00
Nicholas Tindle
66a15a7b8c fix: user correct object when deleting 2025-01-26 11:44:25 +00:00
Nicholas Tindle
669c61de76 fix: take in classnames as used by the outer component
we probbaly shouldn't be doing this?
2025-01-26 11:44:04 +00:00
Nicholas Tindle
e860bde3d4 fix: coalesce types and use a default
@aarushik93 is this okay?
2025-01-26 11:43:40 +00:00
Nicholas Tindle
f5394f6d65 fix: expose interface for sub object so it can be used in other places to fix type errors 2025-01-26 11:43:00 +00:00
Nicholas Tindle
06e845abe7 feat: take in props for navbar
Is this desired?
2025-01-26 11:42:28 +00:00
Nicholas Tindle
c2c3c29018 fix: use proper state object 2025-01-26 11:42:08 +00:00
Nicholas Tindle
31fd0b557a fix: add missing import 2025-01-26 11:41:44 +00:00
Nicholas Tindle
9350fe1d2b fix: fully disable unused page 2025-01-26 11:41:12 +00:00
Nicholas Tindle
5ae92820b4 fix: remove unused classnames 2025-01-26 11:40:57 +00:00
Nicholas Tindle
66a87e5a14 ci: typechecker for frontend 2025-01-26 11:22:38 +00:00
Nicholas Tindle
e1f8882e2d fix: stories being broken 2025-01-26 11:18:12 +00:00
1031 changed files with 31546 additions and 79787 deletions

View File

@@ -27,7 +27,7 @@
!autogpt_platform/frontend/src/
!autogpt_platform/frontend/public/
!autogpt_platform/frontend/package.json
!autogpt_platform/frontend/pnpm-lock.yaml
!autogpt_platform/frontend/yarn.lock
!autogpt_platform/frontend/tsconfig.json
!autogpt_platform/frontend/README.md
## config

View File

@@ -10,19 +10,17 @@ updates:
commit-message:
prefix: "chore(libs/deps)"
prefix-development: "chore(libs/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# backend (Poetry project)
- package-ecosystem: "pip"
@@ -34,19 +32,17 @@ updates:
commit-message:
prefix: "chore(backend/deps)"
prefix-development: "chore(backend/deps-dev)"
ignore:
- dependency-name: "poetry"
groups:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# frontend (Next.js project)
- package-ecosystem: "npm"
@@ -62,13 +58,13 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# infra (Terraform)
- package-ecosystem: "terraform"
@@ -85,13 +81,14 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# GitHub Actions
- package-ecosystem: "github-actions"
@@ -104,13 +101,14 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# Docker
- package-ecosystem: "docker"
@@ -123,16 +121,16 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
# Docs
- package-ecosystem: "pip"
- package-ecosystem: 'pip'
directory: "docs/"
schedule:
interval: "weekly"
@@ -144,10 +142,10 @@ updates:
production-dependencies:
dependency-type: "production"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"
development-dependencies:
dependency-type: "development"
update-types:
- "minor"
- "patch"
- "minor"
- "patch"

5
.github/labeler.yml vendored
View File

@@ -24,9 +24,8 @@ platform/frontend:
platform/backend:
- changed-files:
- all-globs-to-any-file:
- autogpt_platform/backend/**
- '!autogpt_platform/backend/backend/blocks/**'
- any-glob-to-any-file: autogpt_platform/backend/**
- all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**'
platform/blocks:
- changed-files:

View File

@@ -1,47 +0,0 @@
name: Claude Code
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
issues:
types: [opened, assigned]
pull_request_review:
types: [submitted]
jobs:
claude:
if: |
(
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
) && (
github.event.comment.author_association == 'OWNER' ||
github.event.comment.author_association == 'MEMBER' ||
github.event.comment.author_association == 'COLLABORATOR' ||
github.event.review.author_association == 'OWNER' ||
github.event.review.author_association == 'MEMBER' ||
github.event.review.author_association == 'COLLABORATOR' ||
github.event.issue.author_association == 'OWNER' ||
github.event.issue.author_association == 'MEMBER' ||
github.event.issue.author_association == 'COLLABORATOR'
)
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
issues: read
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@beta
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}

View File

@@ -32,7 +32,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.11"]
python-version: ["3.10"]
runs-on: ubuntu-latest
services:
@@ -50,23 +50,6 @@ jobs:
env:
RABBITMQ_DEFAULT_USER: ${{ env.RABBITMQ_DEFAULT_USER }}
RABBITMQ_DEFAULT_PASS: ${{ env.RABBITMQ_DEFAULT_PASS }}
clamav:
image: clamav/clamav-debian:latest
ports:
- 3310:3310
env:
CLAMAV_NO_FRESHCLAMD: false
CLAMD_CONF_StreamMaxLength: 50M
CLAMD_CONF_MaxFileSize: 100M
CLAMD_CONF_MaxScanSize: 100M
CLAMD_CONF_MaxThreads: 4
CLAMD_CONF_ReadTimeout: 300
options: >-
--health-cmd "clamdscan --version || exit 1"
--health-interval 30s
--health-timeout 10s
--health-retries 5
--health-start-period 180s
steps:
- name: Checkout repository
@@ -98,12 +81,12 @@ jobs:
- name: Install Poetry (Unix)
run: |
# Extract Poetry version from backend/poetry.lock
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
HEAD_POETRY_VERSION=$(head -n 1 poetry.lock | grep -oP '(?<=Poetry )[0-9]+\.[0-9]+\.[0-9]+')
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
if [ -n "$BASE_REF" ]; then
BASE_BRANCH=${BASE_REF/refs\/heads\//}
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | head -n 1 | grep -oP '(?<=Poetry )[0-9]+\.[0-9]+\.[0-9]+')
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
else
@@ -148,35 +131,6 @@ jobs:
# outputs:
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
- name: Wait for ClamAV to be ready
run: |
echo "Waiting for ClamAV daemon to start..."
max_attempts=60
attempt=0
until nc -z localhost 3310 || [ $attempt -eq $max_attempts ]; do
echo "ClamAV is unavailable - sleeping (attempt $((attempt+1))/$max_attempts)"
sleep 5
attempt=$((attempt+1))
done
if [ $attempt -eq $max_attempts ]; then
echo "ClamAV failed to start after $((max_attempts*5)) seconds"
echo "Checking ClamAV service logs..."
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
fi
echo "ClamAV is ready!"
# Verify ClamAV is responsive
echo "Testing ClamAV connection..."
timeout 10 bash -c 'echo "PING" | nc localhost 3310' || {
echo "ClamAV is not responding to PING"
docker logs $(docker ps -q --filter "ancestor=clamav/clamav-debian:latest") 2>&1 | tail -50 || echo "No ClamAV container found"
exit 1
}
- name: Run Database Migrations
run: poetry run prisma migrate dev --name updates
env:
@@ -190,9 +144,9 @@ jobs:
- name: Run pytest with coverage
run: |
if [[ "${{ runner.debug }}" == "1" ]]; then
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
else
poetry run pytest -s -vv
poetry run pytest -s -vv test
fi
if: success() || (failure() && steps.lint.outcome == 'failure')
env:
@@ -205,7 +159,6 @@ jobs:
REDIS_HOST: "localhost"
REDIS_PORT: "6379"
REDIS_PASSWORD: "testpassword"
ENCRYPTION_KEY: "dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw=" # DO NOT USE IN PRODUCTION!!
env:
CI: true

View File

@@ -1,198 +0,0 @@
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
on:
pull_request:
types: [closed]
issue_comment:
types: [created]
permissions:
issues: write
pull-requests: write
jobs:
dispatch:
runs-on: ubuntu-latest
steps:
- name: Check comment permissions and deployment status
id: check_status
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
uses: actions/github-script@v7
with:
script: |
const commentBody = context.payload.comment.body.trim();
const commentUser = context.payload.comment.user.login;
const prAuthor = context.payload.issue.user.login;
const authorAssociation = context.payload.comment.author_association;
// Check permissions
const hasPermission = (
authorAssociation === 'OWNER' ||
authorAssociation === 'MEMBER' ||
authorAssociation === 'COLLABORATOR'
);
core.setOutput('comment_body', commentBody);
core.setOutput('has_permission', hasPermission);
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
core.setOutput('permission_denied', 'true');
return;
}
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
return;
}
// Process deploy command
if (commentBody === '!deploy') {
core.setOutput('should_deploy', 'true');
}
// Process undeploy command
else if (commentBody === '!undeploy') {
core.setOutput('should_undeploy', 'true');
}
- name: Post permission denied comment
if: steps.check_status.outputs.permission_denied == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
});
- name: Get PR details for deployment
id: pr_details
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
const pr = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: context.issue.number
});
core.setOutput('pr_number', pr.data.number);
core.setOutput('pr_title', pr.data.title);
core.setOutput('pr_state', pr.data.state);
- name: Dispatch Deploy Event
if: steps.check_status.outputs.should_deploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "deploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post deploy success comment
if: steps.check_status.outputs.should_deploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
});
- name: Dispatch Undeploy Event (from comment)
if: steps.check_status.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
"repo": "${{ github.repository }}"
}
- name: Post undeploy success comment
if: steps.check_status.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
});
- name: Check deployment status on PR close
id: check_pr_close
if: github.event_name == 'pull_request' && github.event.action == 'closed'
uses: actions/github-script@v7
with:
script: |
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number
});
let lastDeployIndex = -1;
let lastUndeployIndex = -1;
comments.data.forEach((comment, index) => {
if (comment.body.trim() === '!deploy') {
lastDeployIndex = index;
} else if (comment.body.trim() === '!undeploy') {
lastUndeployIndex = index;
}
});
// Should undeploy if there's a !deploy without a subsequent !undeploy
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
core.setOutput('should_undeploy', shouldUndeploy);
- name: Dispatch Undeploy Event (PR closed with active deployment)
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.DISPATCH_TOKEN }}
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
event-type: pr-event
client-payload: |
{
"action": "undeploy",
"pr_number": "${{ github.event.pull_request.number }}",
"pr_title": "${{ github.event.pull_request.title }}",
"pr_state": "${{ github.event.pull_request.state }}",
"repo": "${{ github.repository }}"
}
- name: Post PR close undeploy comment
if: >-
github.event_name == 'pull_request' &&
github.event.action == 'closed' &&
steps.check_pr_close.outputs.should_undeploy == 'true'
uses: actions/github-script@v7
with:
script: |
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
});

View File

@@ -18,107 +18,49 @@ defaults:
working-directory: autogpt_platform/frontend
jobs:
setup:
runs-on: ubuntu-latest
outputs:
cache-key: ${{ steps.cache-key.outputs.key }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Generate cache key
id: cache-key
run: echo "key=${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}" >> $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ steps.cache-key.outputs.key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
lint:
runs-on: ubuntu-latest
needs: setup
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
run: |
yarn install --frozen-lockfile
- name: Run lint
run: pnpm lint
run: |
yarn lint
type-check:
runs-on: ubuntu-latest
needs: setup
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
run: |
yarn install --frozen-lockfile
- name: Run tsc check
run: pnpm type-check
run: |
yarn type-check
chromatic:
design:
runs-on: ubuntu-latest
needs: setup
# Only run on dev branch pushes or PRs targeting dev
if: github.ref == 'refs/heads/dev' || github.base_ref == 'dev'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -127,31 +69,19 @@ jobs:
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
run: |
yarn install --frozen-lockfile
- name: Run Chromatic
uses: chromaui/action@latest
with:
projectToken: chpt_9e7c1a76478c9c8
onlyChanged: true
# ⚠️ Make sure to configure a `CHROMATIC_PROJECT_TOKEN` repository secret
projectToken: ${{ secrets.CHROMATIC_PROJECT_TOKEN }}
workingDir: autogpt_platform/frontend
token: ${{ secrets.GITHUB_TOKEN }}
test:
runs-on: ubuntu-latest
needs: setup
strategy:
fail-fast: false
matrix:
@@ -168,9 +98,6 @@ jobs:
with:
node-version: "21"
- name: Enable corepack
run: corepack enable
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
@@ -189,35 +116,26 @@ jobs:
run: |
docker compose -f ../docker-compose.yml up -d
- name: Restore dependencies cache
uses: actions/cache@v4
with:
path: ~/.pnpm-store
key: ${{ needs.setup.outputs.cache-key }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install --frozen-lockfile
run: |
yarn install --frozen-lockfile
- name: Setup .env
run: cp .env.example .env
- name: Build frontend
run: pnpm build --turbo
# uses Turbopack, much faster and safe enough for a test pipeline
- name: Setup Builder .env
run: |
cp .env.example .env
- name: Install Browser '${{ matrix.browser }}'
run: pnpm playwright install --with-deps ${{ matrix.browser }}
run: yarn playwright install --with-deps ${{ matrix.browser }}
- name: Run Playwright tests
run: pnpm test:no-build --project=${{ matrix.browser }}
env:
BROWSER_TYPE: ${{ matrix.browser }}
- name: Run tests
timeout-minutes: 20
run: |
yarn test --project=${{ matrix.browser }}
- name: Print Final Docker Compose logs
if: always()
run: docker compose -f ../docker-compose.yml logs
run: |
docker compose -f ../docker-compose.yml logs
- uses: actions/upload-artifact@v4
if: ${{ !cancelled() }}

View File

@@ -16,7 +16,7 @@ jobs:
# operations-per-run: 5000
stale-issue-message: >
This issue has automatically been marked as _stale_ because it has not had
any activity in the last 170 days. You can _unstale_ it by commenting or
any activity in the last 50 days. You can _unstale_ it by commenting or
removing the label. Otherwise, this issue will be closed in 10 days.
stale-pr-message: >
This pull request has automatically been marked as _stale_ because it has
@@ -25,7 +25,7 @@ jobs:
close-issue-message: >
This issue was closed automatically because it has been stale for 10 days
with no activity.
days-before-stale: 170
days-before-stale: 100
days-before-close: 10
# Do not touch meta issues:
exempt-issue-labels: meta,fridge,project management

View File

@@ -1,60 +0,0 @@
#!/usr/bin/env python3
import sys
if sys.version_info < (3, 11):
print("Python version 3.11 or higher required")
sys.exit(1)
import tomllib
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
"""Extract package version from poetry.lock file."""
try:
if lockfile_path == "-":
data = tomllib.load(sys.stdin.buffer)
else:
with open(lockfile_path, "rb") as f:
data = tomllib.load(f)
except FileNotFoundError:
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
sys.exit(1)
except tomllib.TOMLDecodeError as e:
print(f"Error parsing TOML file: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error reading file: {e}", file=sys.stderr)
sys.exit(1)
# Look for the package in the packages list
packages = data.get("package", [])
for package in packages:
if package.get("name", "").lower() == package_name.lower():
return package.get("version")
return None
def main():
if len(sys.argv) not in (2, 3):
print(
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
file=sys.stderr,
)
sys.exit(1)
package_name = sys.argv[1]
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
version = get_package_version(package_name, lockfile_path)
if version:
print(version)
else:
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

3
.gitignore vendored
View File

@@ -165,7 +165,7 @@ package-lock.json
# Allow for locally private items
# private
pri*
pri*
# ignore
ig*
.github_access_token
@@ -176,4 +176,3 @@ autogpt_platform/backend/settings.py
*.ign.*
.test-contents
.claude/settings.local.json

View File

@@ -17,7 +17,7 @@ repos:
name: Detect secrets
description: Detects high entropy strings that are likely to be passwords.
files: ^autogpt_platform/
stages: [pre-push]
stages: [push]
- repo: local
# For proper type checking, all dependencies need to be up-to-date.
@@ -235,44 +235,44 @@ repos:
hooks:
- id: tsc
name: Typecheck - AutoGPT Platform - Frontend
entry: bash -c 'cd autogpt_platform/frontend && pnpm type-check'
entry: bash -c 'cd autogpt_platform/frontend && npm run type-check'
files: ^autogpt_platform/frontend/
types: [file]
language: system
pass_filenames: false
# - repo: local
# hooks:
# - id: pytest
# name: Run tests - AutoGPT Platform - Backend
# alias: pytest-platform-backend
# entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
# # include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
# files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
- repo: local
hooks:
- id: pytest
name: Run tests - AutoGPT Platform - Backend
alias: pytest-platform-backend
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
# - id: pytest
# name: Run tests - Classic - AutoGPT (excl. slow tests)
# alias: pytest-classic-autogpt
# entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# # include forge source (since it's a path dependency) but exclude *_test.py files:
# files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
# language: system
# pass_filenames: false
- id: pytest
name: Run tests - Classic - AutoGPT (excl. slow tests)
alias: pytest-classic-autogpt
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
# include forge source (since it's a path dependency) but exclude *_test.py files:
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
language: system
pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Forge (excl. slow tests)
# alias: pytest-classic-forge
# entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
# files: ^classic/forge/(forge/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false
- id: pytest
name: Run tests - Classic - Forge (excl. slow tests)
alias: pytest-classic-forge
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
language: system
pass_filenames: false
# - id: pytest
# name: Run tests - Classic - Benchmark
# alias: pytest-classic-benchmark
# entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
# files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
# language: system
# pass_filenames: false
- id: pytest
name: Run tests - Classic - Benchmark
alias: pytest-classic-benchmark
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
language: system
pass_filenames: false

6
.vscode/launch.json vendored
View File

@@ -32,9 +32,9 @@
"type": "debugpy",
"request": "launch",
"module": "backend.app",
"env": {
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY": "YES"
},
// "env": {
// "ENV": "dev"
// },
"envFile": "${workspaceFolder}/backend/.env",
"justMyCode": false,
"cwd": "${workspaceFolder}/autogpt_platform/backend"

View File

@@ -1,53 +0,0 @@
# AutoGPT Platform Contribution Guide
This guide provides context for Codex when updating the **autogpt_platform** folder.
## Directory overview
- `autogpt_platform/backend` FastAPI based backend service.
- `autogpt_platform/autogpt_libs` Shared Python libraries.
- `autogpt_platform/frontend` Next.js + Typescript frontend.
- `autogpt_platform/docker-compose.yml` development stack.
See `docs/content/platform/getting-started.md` for setup instructions.
## Code style
- Format Python code with `poetry run format`.
- Format frontend code using `pnpm format`.
## Testing
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
Always run the relevant linters and tests before committing.
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
Types:
- feat
- fix
- refactor
- ci
- dx (developer experience)
Scopes:
- platform
- platform/library
- platform/marketplace
- backend
- backend/executor
- frontend
- frontend/library
- frontend/marketplace
- blocks
## Pull requests
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
- Rely on the pre-commit checks for linting and formatting
- Fill out the **Changes** section and the checklist.
- Use conventional commit titles with a scope (e.g. `feat(frontend): add feature`).
- Keep out-of-scope changes under 20% of the PR.
- Ensure PR descriptions are complete.
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs

View File

@@ -15,35 +15,8 @@
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
### System Requirements
Before proceeding with the installation, ensure your system meets the following requirements:
#### Hardware Requirements
- CPU: 4+ cores recommended
- RAM: Minimum 8GB, 16GB recommended
- Storage: At least 10GB of free space
#### Software Requirements
- Operating Systems:
- Linux (Ubuntu 20.04 or newer recommended)
- macOS (10.15 or newer)
- Windows 10/11 with WSL2
- Required Software (with minimum versions):
- Docker Engine (20.10.0 or newer)
- Docker Compose (2.0.0 or newer)
- Git (2.30 or newer)
- Node.js (16.x or newer)
- npm (8.x or newer)
- VSCode (1.60 or newer) or any modern code editor
#### Network Requirements
- Stable internet connection
- Access to required ports (will be configured in Docker)
- Ability to make outbound HTTPS connections
### Updated Setup Instructions:
We've moved to a fully maintained and regularly updated documentation site.
Weve moved to a fully maintained and regularly updated documentation site.
👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/)
@@ -179,7 +152,7 @@ Just clone the repo, install dependencies with `./run setup`, and you should be
[![Join us on Discord](https://invidget.switchblade.xyz/autogpt)](https://discord.gg/autogpt)
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn't created an issue for the same topic.
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasnt created an issue for the same topic.
## 🤝 Sister projects

View File

@@ -1,147 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Repository Overview
AutoGPT Platform is a monorepo containing:
- **Backend** (`/backend`): Python FastAPI server with async support
- **Frontend** (`/frontend`): Next.js React application
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
## Essential Commands
### Backend Development
```bash
# Install dependencies
cd backend && poetry install
# Run database migrations
poetry run prisma migrate dev
# Start all services (database, redis, rabbitmq, clamav)
docker compose up -d
# Run the backend server
poetry run serve
# Run tests
poetry run test
# Run specific test
poetry run pytest path/to/test_file.py::test_function_name
# Lint and format
# prefer format if you want to just "fix" it and only get the errors that can't be autofixed
poetry run format # Black + isort
poetry run lint # ruff
```
More details can be found in TESTING.md
#### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Frontend Development
```bash
# Install dependencies
cd frontend && npm install
# Start development server
npm run dev
# Run E2E tests
npm run test
# Run Storybook for component development
npm run storybook
# Build production
npm run build
# Type checking
npm run type-check
```
## Architecture Overview
### Backend Architecture
- **API Layer**: FastAPI with REST and WebSocket endpoints
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
- **Queue System**: RabbitMQ for async task processing
- **Execution Engine**: Separate executor service processes agent workflows
- **Authentication**: JWT-based with Supabase integration
- **Security**: Cache protection middleware prevents sensitive data caching in browsers/proxies
### Frontend Architecture
- **Framework**: Next.js App Router with React Server Components
- **State Management**: React hooks + Supabase client for real-time updates
- **Workflow Builder**: Visual graph editor using @xyflow/react
- **UI Components**: Radix UI primitives with Tailwind CSS styling
- **Feature Flags**: LaunchDarkly integration
### Key Concepts
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
3. **Integrations**: OAuth and API connections stored per user
4. **Store**: Marketplace for sharing agent templates
5. **Virus Scanning**: ClamAV integration for file upload security
### Testing Approach
- Backend uses pytest with snapshot testing for API responses
- Test files are colocated with source files (`*_test.py`)
- Frontend uses Playwright for E2E tests
- Component testing via Storybook
### Database Schema
Key models (defined in `/backend/schema.prisma`):
- `User`: Authentication and profile data
- `AgentGraph`: Workflow definitions with version control
- `AgentGraphExecution`: Execution history and results
- `AgentNode`: Individual nodes in a workflow
- `StoreListing`: Marketplace listings for sharing agents
### Environment Configuration
- Backend: `.env` file in `/backend`
- Frontend: `.env.local` file in `/frontend`
- Both require Supabase credentials and API keys for various services
### Common Development Tasks
**Adding a new block:**
1. Create new file in `/backend/backend/blocks/`
2. Inherit from `Block` base class
3. Define input/output schemas
4. Implement `run` method
5. Register in block registry
6. Generate the block uuid using `uuid.uuid4()`
**Modifying the API:**
1. Update route in `/backend/backend/server/routers/`
2. Add/update Pydantic models in same directory
3. Write tests alongside the route file
4. Run `poetry run test` to verify
**Frontend feature development:**
1. Components go in `/frontend/src/components/`
2. Use existing UI components from `/frontend/src/components/ui/`
3. Add Storybook stories for new components
4. Test with Playwright if user-facing
### Security Implementation
**Cache Protection Middleware:**
- Located in `/backend/backend/server/middleware/security.py`
- Default behavior: Disables caching for ALL endpoints with `Cache-Control: no-store, no-cache, must-revalidate, private`
- Uses an allow list approach - only explicitly permitted paths can be cached
- Cacheable paths include: static assets (`/static/*`, `/_next/static/*`), health checks, public store pages, documentation
- Prevents sensitive data (auth tokens, API keys, user data) from being cached by browsers/proxies
- To allow caching for a new endpoint, add it to `CACHEABLE_PATHS` in the middleware
- Applied to both main API server and external API applications

View File

@@ -15,63 +15,44 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
To run the AutoGPT Platform, follow these steps:
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
```
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
cd AutoGPT/autogpt_platform
```
2. Run the following command:
```
cp .env.example .env
```
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
3. Run the following command:
```
docker compose up -d
```
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
4. Navigate to `frontend` within the `autogpt_platform` directory:
```
cd frontend
```
You will need to run your frontend application separately on your local machine.
5. Run the following command:
5. Run the following command:
```
cp .env.example .env.local
```
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
6. Run the following command:
Enable corepack and install dependencies by running:
```
corepack enable
pnpm i
npm install
npm run dev
```
Generate the API client (this step is required before running the frontend):
This command will install the necessary dependencies and start the frontend application in development mode.
If you are using Yarn, you can run the following commands instead:
```
pnpm generate:api-client
```
Then start the frontend application in development mode:
```
pnpm dev
yarn install && yarn dev
```
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
@@ -87,52 +68,43 @@ Here are some useful Docker Compose commands for managing your AutoGPT Platform:
- `docker compose down`: Stop and remove containers, networks, and volumes.
- `docker compose watch`: Watch for changes in your services and automatically update them.
### Sample Scenarios
Here are some common scenarios where you might use multiple Docker Compose commands:
1. Updating and restarting a specific service:
```
docker compose build api_srv
docker compose up -d --no-deps api_srv
```
This rebuilds the `api_srv` service and restarts it without affecting other services.
2. Viewing logs for troubleshooting:
```
docker compose logs -f api_srv ws_srv
```
This shows and follows the logs for both `api_srv` and `ws_srv` services.
3. Scaling a service for increased load:
```
docker compose up -d --scale executor=3
```
This scales the `executor` service to 3 instances to handle increased load.
4. Stopping the entire system for maintenance:
```
docker compose stop
docker compose rm -f
docker compose pull
docker compose up -d
```
This stops all services, removes containers, pulls the latest images, and restarts the system.
5. Developing with live updates:
```
docker compose watch
```
This watches for changes in your code and automatically updates the relevant services.
6. Checking the status of services:
@@ -143,6 +115,7 @@ Here are some common scenarios where you might use multiple Docker Compose comma
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
### Persisting Data
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
@@ -170,27 +143,3 @@ To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml
3. Save the file and run `docker compose up -d` to apply the changes.
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
### API Client Generation
The platform includes scripts for generating and managing the API client:
- `pnpm fetch:openapi`: Fetches the OpenAPI specification from the backend service (requires backend to be running on port 8006)
- `pnpm generate:api-client`: Generates the TypeScript API client from the OpenAPI specification using Orval
- `pnpm generate:api-all`: Runs both fetch and generate commands in sequence
#### Manual API Client Updates
If you need to update the API client after making changes to the backend API:
1. Ensure the backend services are running:
```
docker compose up -d
```
2. Generate the updated API client:
```
pnpm generate:api-all
```
This will fetch the latest OpenAPI specification and regenerate the TypeScript client code.

View File

@@ -1,3 +1,3 @@
# AutoGPT Libs
This is a new project to store shared functionality across different services in the AutoGPT Platform (e.g. authentication)
This is a new project to store shared functionality across different services in NextGen AutoGPT (e.g. authentication)

View File

@@ -31,5 +31,4 @@ class APIKeyManager:
"""Verify if a provided API key matches the stored hash."""
if not provided_key.startswith(self.PREFIX):
return False
provided_hash = hashlib.sha256(provided_key.encode()).hexdigest()
return secrets.compare_digest(provided_hash, stored_hash)
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash

View File

@@ -1,6 +1,5 @@
import inspect
import logging
import secrets
from typing import Any, Callable, Optional
from fastapi import HTTPException, Request, Security
@@ -17,7 +16,7 @@ logger = logging.getLogger(__name__)
async def auth_middleware(request: Request):
if not settings.ENABLE_AUTH:
# If authentication is disabled, allow the request to proceed
logger.warning("Auth disabled")
logger.warn("Auth disabled")
return {}
security = HTTPBearer()
@@ -94,11 +93,7 @@ class APIKeyValidator:
self.error_message = error_message
async def default_validator(self, api_key: str) -> bool:
if not self.expected_token:
raise ValueError(
"Expected Token Required to be set when uisng API Key Validator default validation"
)
return secrets.compare_digest(api_key, self.expected_token)
return api_key == self.expected_token
async def __call__(
self, request: Request, api_key: str = Security(APIKeyHeader)

View File

@@ -1,15 +1,15 @@
import asyncio
from contextlib import asynccontextmanager
from contextlib import contextmanager
from threading import Lock
from typing import TYPE_CHECKING, Any
from expiringdict import ExpiringDict
if TYPE_CHECKING:
from redis.asyncio import Redis as AsyncRedis
from redis.asyncio.lock import Lock as AsyncRedisLock
from redis import Redis
from redis.lock import Lock as RedisLock
class AsyncRedisKeyedMutex:
class RedisKeyedMutex:
"""
This class provides a mutex that can be locked and unlocked by a specific key,
using Redis as a distributed locking provider.
@@ -17,45 +17,41 @@ class AsyncRedisKeyedMutex:
in case the key is not unlocked for a specified duration, to prevent memory leaks.
"""
def __init__(self, redis: "AsyncRedis", timeout: int | None = 60):
def __init__(self, redis: "Redis", timeout: int | None = 60):
self.redis = redis
self.timeout = timeout
self.locks: dict[Any, "AsyncRedisLock"] = ExpiringDict(
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
max_len=6000, max_age_seconds=self.timeout
)
self.locks_lock = asyncio.Lock()
self.locks_lock = Lock()
@asynccontextmanager
async def locked(self, key: Any):
lock = await self.acquire(key)
@contextmanager
def locked(self, key: Any):
lock = self.acquire(key)
try:
yield
finally:
if (await lock.locked()) and (await lock.owned()):
await lock.release()
if lock.locked():
lock.release()
async def acquire(self, key: Any) -> "AsyncRedisLock":
def acquire(self, key: Any) -> "RedisLock":
"""Acquires and returns a lock with the given key"""
async with self.locks_lock:
with self.locks_lock:
if key not in self.locks:
self.locks[key] = self.redis.lock(
str(key), self.timeout, thread_local=False
)
lock = self.locks[key]
await lock.acquire()
lock.acquire()
return lock
async def release(self, key: Any):
if (
(lock := self.locks.get(key))
and (await lock.locked())
and (await lock.owned())
):
await lock.release()
def release(self, key: Any):
if (lock := self.locks.get(key)) and lock.locked() and lock.owned():
lock.release()
async def release_all_locks(self):
def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
async with self.locks_lock:
for lock in self.locks.values():
if (await lock.locked()) and (await lock.owned()):
await lock.release()
self.locks_lock.acquire(blocking=False)
for lock in self.locks.values():
if lock.locked() and lock.owned():
lock.release()

View File

@@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
[[package]]
name = "aiohappyeyeballs"
@@ -177,7 +177,7 @@ files = [
{file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
{file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
]
markers = {main = "python_version < \"3.11\"", dev = "python_full_version < \"3.11.3\""}
markers = {main = "python_version == \"3.10\"", dev = "python_full_version < \"3.11.3\""}
[[package]]
name = "attrs"
@@ -323,21 +323,6 @@ files = [
{file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
]
[[package]]
name = "click"
version = "8.2.1"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"},
{file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "colorama"
version = "0.4.6"
@@ -390,7 +375,7 @@ description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
groups = ["main"]
markers = "python_version < \"3.11\""
markers = "python_version == \"3.10\""
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
@@ -414,27 +399,6 @@ files = [
[package.extras]
tests = ["coverage", "coveralls", "dill", "mock", "nose"]
[[package]]
name = "fastapi"
version = "0.115.12"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "fastapi-0.115.12-py3-none-any.whl", hash = "sha256:e94613d6c05e27be7ffebdd6ea5f388112e5e430c8f7d6494a9d1d88d43e814d"},
{file = "fastapi-0.115.12.tar.gz", hash = "sha256:1e2c2a2646905f9e83d32f04a3f86aff4a286669c6c950ca95b5fd68c2602681"},
]
[package.dependencies]
pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0"
starlette = ">=0.40.0,<0.47.0"
typing-extensions = ">=4.8.0"
[package.extras]
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
[[package]]
name = "frozenlist"
version = "1.4.1"
@@ -598,19 +562,19 @@ protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4
[[package]]
name = "google-cloud-audit-log"
version = "0.3.2"
version = "0.3.0"
description = "Google Cloud Audit Protos"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "google_cloud_audit_log-0.3.2-py3-none-any.whl", hash = "sha256:daaedfb947a0d77f524e1bd2b560242ab4836fe1afd6b06b92f152b9658554ed"},
{file = "google_cloud_audit_log-0.3.2.tar.gz", hash = "sha256:2598f1533a7d7cdd6c7bf448c12e5519c1d53162d78784e10bcdd1df67791bc3"},
{file = "google_cloud_audit_log-0.3.0-py2.py3-none-any.whl", hash = "sha256:8340793120a1d5aa143605def8704ecdcead15106f754ef1381ae3bab533722f"},
{file = "google_cloud_audit_log-0.3.0.tar.gz", hash = "sha256:901428b257020d8c1d1133e0fa004164a555e5a395c7ca3cdbb8486513df3a65"},
]
[package.dependencies]
googleapis-common-protos = ">=1.56.2,<2.0.0"
protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
googleapis-common-protos = ">=1.56.2,<2.0dev"
protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev"
[[package]]
name = "google-cloud-core"
@@ -633,30 +597,30 @@ grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"]
[[package]]
name = "google-cloud-logging"
version = "3.12.1"
version = "3.11.4"
description = "Stackdriver Logging API client library"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "google_cloud_logging-3.12.1-py2.py3-none-any.whl", hash = "sha256:6817878af76ec4e7568976772839ab2c43ddfd18fbbf2ce32b13ef549cd5a862"},
{file = "google_cloud_logging-3.12.1.tar.gz", hash = "sha256:36efc823985055b203904e83e1c8f9f999b3c64270bcda39d57386ca4effd678"},
{file = "google_cloud_logging-3.11.4-py2.py3-none-any.whl", hash = "sha256:1d465ac62df29fb94bba4d6b4891035e57d573d84541dd8a40eebbc74422b2f0"},
{file = "google_cloud_logging-3.11.4.tar.gz", hash = "sha256:32305d989323f3c58603044e2ac5d9cf23e9465ede511bbe90b4309270d3195c"},
]
[package.dependencies]
google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0", extras = ["grpc"]}
google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0"
google-cloud-appengine-logging = ">=0.1.3,<2.0.0"
google-cloud-audit-log = ">=0.3.1,<1.0.0"
google-cloud-core = ">=2.0.0,<3.0.0"
grpc-google-iam-v1 = ">=0.12.4,<1.0.0"
google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]}
google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev"
google-cloud-appengine-logging = ">=0.1.3,<2.0.0dev"
google-cloud-audit-log = ">=0.2.4,<1.0.0dev"
google-cloud-core = ">=2.0.0,<3.0.0dev"
grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev"
opentelemetry-api = ">=1.9.0"
proto-plus = [
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
{version = ">=1.22.2,<2.0.0", markers = "python_version >= \"3.11\" and python_version < \"3.13\""},
{version = ">=1.22.0,<2.0.0", markers = "python_version < \"3.11\""},
{version = ">=1.25.0,<2.0.0dev", markers = "python_version >= \"3.13\""},
{version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\" and python_version < \"3.13\""},
{version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""},
]
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev"
[[package]]
name = "googleapis-common-protos"
@@ -931,47 +895,6 @@ files = [
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
[[package]]
name = "launchdarkly-eventsource"
version = "1.2.4"
description = "LaunchDarkly SSE Client"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "launchdarkly_eventsource-1.2.4-py3-none-any.whl", hash = "sha256:048ef8c4440d0d8219778661ee4d4b5e12aa6ed2c29a3004417ede44c2386e8c"},
{file = "launchdarkly_eventsource-1.2.4.tar.gz", hash = "sha256:b8b9342681f55e1d35c56243431cbbaca4eb9812d6785f8de204af322104e066"},
]
[package.dependencies]
urllib3 = ">=1.26.0,<3"
[[package]]
name = "launchdarkly-server-sdk"
version = "9.11.1"
description = "LaunchDarkly SDK for Python"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "launchdarkly_server_sdk-9.11.1-py3-none-any.whl", hash = "sha256:128569cebf666dd115cc0ba03c48ff75f6acc9788301a7e2c3a54d06107e445a"},
{file = "launchdarkly_server_sdk-9.11.1.tar.gz", hash = "sha256:150e29656cb8c506d1967f3c59e62b69310d345ec27217640a6146dd1db5d250"},
]
[package.dependencies]
certifi = ">=2018.4.16"
expiringdict = ">=1.1.4"
launchdarkly-eventsource = ">=1.2.4,<2.0.0"
pyRFC3339 = ">=1.0"
semver = ">=2.10.2"
urllib3 = ">=1.26.0,<3"
[package.extras]
consul = ["python-consul (>=1.0.1)"]
dynamodb = ["boto3 (>=1.9.71)"]
redis = ["redis (>=2.10.5)"]
test-filesource = ["pyyaml (>=5.3.1)", "watchdog (>=3.0.0)"]
[[package]]
name = "multidict"
version = "6.1.0"
@@ -1315,19 +1238,19 @@ pyasn1 = ">=0.4.6,<0.7.0"
[[package]]
name = "pydantic"
version = "2.11.4"
version = "2.11.1"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
{file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
{file = "pydantic-2.11.1-py3-none-any.whl", hash = "sha256:5b6c415eee9f8123a14d859be0c84363fec6b1feb6b688d6435801230b56e0b8"},
{file = "pydantic-2.11.1.tar.gz", hash = "sha256:442557d2910e75c991c39f4b4ab18963d57b9b55122c8b2a9cd176d8c29ce968"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
pydantic-core = "2.33.2"
pydantic-core = "2.33.0"
typing-extensions = ">=4.12.2"
typing-inspection = ">=0.4.0"
@@ -1337,111 +1260,111 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows
[[package]]
name = "pydantic-core"
version = "2.33.2"
version = "2.33.0"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"},
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"},
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"},
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"},
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"},
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"},
{file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"},
{file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"},
{file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"},
{file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"},
{file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"},
{file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"},
{file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"},
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"},
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"},
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"},
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"},
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"},
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"},
{file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"},
{file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"},
{file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"},
{file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"},
{file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"},
{file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"},
{file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"},
{file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"},
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"},
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"},
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"},
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"},
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"},
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"},
{file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"},
{file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"},
{file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"},
{file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"},
{file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"},
{file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"},
{file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"},
{file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"},
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"},
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"},
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"},
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"},
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"},
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"},
{file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"},
{file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"},
{file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"},
{file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"},
{file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"},
{file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"},
{file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"},
{file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"},
{file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"},
{file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"},
{file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"},
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"},
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"},
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"},
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"},
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"},
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"},
{file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"},
{file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"},
{file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"},
{file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"},
{file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"},
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"},
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"},
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"},
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"},
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"},
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"},
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"},
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"},
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"},
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"},
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"},
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"},
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"},
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"},
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"},
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"},
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"},
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"},
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"},
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"},
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"},
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"},
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"},
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"},
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"},
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"},
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"},
{file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"},
{file = "pydantic_core-2.33.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71dffba8fe9ddff628c68f3abd845e91b028361d43c5f8e7b3f8b91d7d85413e"},
{file = "pydantic_core-2.33.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:abaeec1be6ed535a5d7ffc2e6c390083c425832b20efd621562fbb5bff6dc518"},
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:759871f00e26ad3709efc773ac37b4d571de065f9dfb1778012908bcc36b3a73"},
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dcfebee69cd5e1c0b76a17e17e347c84b00acebb8dd8edb22d4a03e88e82a207"},
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b1262b912435a501fa04cd213720609e2cefa723a07c92017d18693e69bf00b"},
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4726f1f3f42d6a25678c67da3f0b10f148f5655813c5aca54b0d1742ba821b8f"},
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e790954b5093dff1e3a9a2523fddc4e79722d6f07993b4cd5547825c3cbf97b5"},
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:34e7fb3abe375b5c4e64fab75733d605dda0f59827752debc99c17cb2d5f3276"},
{file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ecb158fb9b9091b515213bed3061eb7deb1d3b4e02327c27a0ea714ff46b0760"},
{file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:4d9149e7528af8bbd76cc055967e6e04617dcb2a2afdaa3dea899406c5521faa"},
{file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e81a295adccf73477220e15ff79235ca9dcbcee4be459eb9d4ce9a2763b8386c"},
{file = "pydantic_core-2.33.0-cp310-cp310-win32.whl", hash = "sha256:f22dab23cdbce2005f26a8f0c71698457861f97fc6318c75814a50c75e87d025"},
{file = "pydantic_core-2.33.0-cp310-cp310-win_amd64.whl", hash = "sha256:9cb2390355ba084c1ad49485d18449b4242da344dea3e0fe10babd1f0db7dcfc"},
{file = "pydantic_core-2.33.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a608a75846804271cf9c83e40bbb4dab2ac614d33c6fd5b0c6187f53f5c593ef"},
{file = "pydantic_core-2.33.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e1c69aa459f5609dec2fa0652d495353accf3eda5bdb18782bc5a2ae45c9273a"},
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9ec80eb5a5f45a2211793f1c4aeddff0c3761d1c70d684965c1807e923a588b"},
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e925819a98318d17251776bd3d6aa9f3ff77b965762155bdad15d1a9265c4cfd"},
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bf68bb859799e9cec3d9dd8323c40c00a254aabb56fe08f907e437005932f2b"},
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1b2ea72dea0825949a045fa4071f6d5b3d7620d2a208335207793cf29c5a182d"},
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1583539533160186ac546b49f5cde9ffc928062c96920f58bd95de32ffd7bffd"},
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:23c3e77bf8a7317612e5c26a3b084c7edeb9552d645742a54a5867635b4f2453"},
{file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a7a7f2a3f628d2f7ef11cb6188bcf0b9e1558151d511b974dfea10a49afe192b"},
{file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:f1fb026c575e16f673c61c7b86144517705865173f3d0907040ac30c4f9f5915"},
{file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:635702b2fed997e0ac256b2cfbdb4dd0bf7c56b5d8fba8ef03489c03b3eb40e2"},
{file = "pydantic_core-2.33.0-cp311-cp311-win32.whl", hash = "sha256:07b4ced28fccae3f00626eaa0c4001aa9ec140a29501770a88dbbb0966019a86"},
{file = "pydantic_core-2.33.0-cp311-cp311-win_amd64.whl", hash = "sha256:4927564be53239a87770a5f86bdc272b8d1fbb87ab7783ad70255b4ab01aa25b"},
{file = "pydantic_core-2.33.0-cp311-cp311-win_arm64.whl", hash = "sha256:69297418ad644d521ea3e1aa2e14a2a422726167e9ad22b89e8f1130d68e1e9a"},
{file = "pydantic_core-2.33.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6c32a40712e3662bebe524abe8abb757f2fa2000028d64cc5a1006016c06af43"},
{file = "pydantic_core-2.33.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8ec86b5baa36f0a0bfb37db86c7d52652f8e8aa076ab745ef7725784183c3fdd"},
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4deac83a8cc1d09e40683be0bc6d1fa4cde8df0a9bf0cda5693f9b0569ac01b6"},
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:175ab598fb457a9aee63206a1993874badf3ed9a456e0654273e56f00747bbd6"},
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f36afd0d56a6c42cf4e8465b6441cf546ed69d3a4ec92724cc9c8c61bd6ecf4"},
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a98257451164666afafc7cbf5fb00d613e33f7e7ebb322fbcd99345695a9a61"},
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecc6d02d69b54a2eb83ebcc6f29df04957f734bcf309d346b4f83354d8376862"},
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a69b7596c6603afd049ce7f3835bcf57dd3892fc7279f0ddf987bebed8caa5a"},
{file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ea30239c148b6ef41364c6f51d103c2988965b643d62e10b233b5efdca8c0099"},
{file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:abfa44cf2f7f7d7a199be6c6ec141c9024063205545aa09304349781b9a125e6"},
{file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20d4275f3c4659d92048c70797e5fdc396c6e4446caf517ba5cad2db60cd39d3"},
{file = "pydantic_core-2.33.0-cp312-cp312-win32.whl", hash = "sha256:918f2013d7eadea1d88d1a35fd4a1e16aaf90343eb446f91cb091ce7f9b431a2"},
{file = "pydantic_core-2.33.0-cp312-cp312-win_amd64.whl", hash = "sha256:aec79acc183865bad120b0190afac467c20b15289050648b876b07777e67ea48"},
{file = "pydantic_core-2.33.0-cp312-cp312-win_arm64.whl", hash = "sha256:5461934e895968655225dfa8b3be79e7e927e95d4bd6c2d40edd2fa7052e71b6"},
{file = "pydantic_core-2.33.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f00e8b59e1fc8f09d05594aa7d2b726f1b277ca6155fc84c0396db1b373c4555"},
{file = "pydantic_core-2.33.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a73be93ecef45786d7d95b0c5e9b294faf35629d03d5b145b09b81258c7cd6d"},
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff48a55be9da6930254565ff5238d71d5e9cd8c5487a191cb85df3bdb8c77365"},
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a4ea04195638dcd8c53dadb545d70badba51735b1594810e9768c2c0b4a5da"},
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41d698dcbe12b60661f0632b543dbb119e6ba088103b364ff65e951610cb7ce0"},
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae62032ef513fe6281ef0009e30838a01057b832dc265da32c10469622613885"},
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f225f3a3995dbbc26affc191d0443c6c4aa71b83358fd4c2b7d63e2f6f0336f9"},
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5bdd36b362f419c78d09630cbaebc64913f66f62bda6d42d5fbb08da8cc4f181"},
{file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2a0147c0bef783fd9abc9f016d66edb6cac466dc54a17ec5f5ada08ff65caf5d"},
{file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:c860773a0f205926172c6644c394e02c25421dc9a456deff16f64c0e299487d3"},
{file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:138d31e3f90087f42aa6286fb640f3c7a8eb7bdae829418265e7e7474bd2574b"},
{file = "pydantic_core-2.33.0-cp313-cp313-win32.whl", hash = "sha256:d20cbb9d3e95114325780f3cfe990f3ecae24de7a2d75f978783878cce2ad585"},
{file = "pydantic_core-2.33.0-cp313-cp313-win_amd64.whl", hash = "sha256:ca1103d70306489e3d006b0f79db8ca5dd3c977f6f13b2c59ff745249431a606"},
{file = "pydantic_core-2.33.0-cp313-cp313-win_arm64.whl", hash = "sha256:6291797cad239285275558e0a27872da735b05c75d5237bbade8736f80e4c225"},
{file = "pydantic_core-2.33.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7b79af799630af263eca9ec87db519426d8c9b3be35016eddad1832bac812d87"},
{file = "pydantic_core-2.33.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eabf946a4739b5237f4f56d77fa6668263bc466d06a8036c055587c130a46f7b"},
{file = "pydantic_core-2.33.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8a1d581e8cdbb857b0e0e81df98603376c1a5c34dc5e54039dcc00f043df81e7"},
{file = "pydantic_core-2.33.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:7c9c84749f5787781c1c45bb99f433402e484e515b40675a5d121ea14711cf61"},
{file = "pydantic_core-2.33.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:64672fa888595a959cfeff957a654e947e65bbe1d7d82f550417cbd6898a1d6b"},
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26bc7367c0961dec292244ef2549afa396e72e28cc24706210bd44d947582c59"},
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ce72d46eb201ca43994303025bd54d8a35a3fc2a3495fac653d6eb7205ce04f4"},
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14229c1504287533dbf6b1fc56f752ce2b4e9694022ae7509631ce346158de11"},
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:085d8985b1c1e48ef271e98a658f562f29d89bda98bf120502283efbc87313eb"},
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31860fbda80d8f6828e84b4a4d129fd9c4535996b8249cfb8c720dc2a1a00bb8"},
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f200b2f20856b5a6c3a35f0d4e344019f805e363416e609e9b47c552d35fd5ea"},
{file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f72914cfd1d0176e58ddc05c7a47674ef4222c8253bf70322923e73e14a4ac3"},
{file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:91301a0980a1d4530d4ba7e6a739ca1a6b31341252cb709948e0aca0860ce0ae"},
{file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7419241e17c7fbe5074ba79143d5523270e04f86f1b3a0dff8df490f84c8273a"},
{file = "pydantic_core-2.33.0-cp39-cp39-win32.whl", hash = "sha256:7a25493320203005d2a4dac76d1b7d953cb49bce6d459d9ae38e30dd9f29bc9c"},
{file = "pydantic_core-2.33.0-cp39-cp39-win_amd64.whl", hash = "sha256:82a4eba92b7ca8af1b7d5ef5f3d9647eee94d1f74d21ca7c21e3a2b92e008358"},
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2762c568596332fdab56b07060c8ab8362c56cf2a339ee54e491cd503612c50"},
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bf637300ff35d4f59c006fff201c510b2b5e745b07125458a5389af3c0dff8c"},
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c151ce3d59ed56ebd7ce9ce5986a409a85db697d25fc232f8e81f195aa39a1"},
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee65f0cc652261744fd07f2c6e6901c914aa6c5ff4dcfaf1136bc394d0dd26b"},
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:024d136ae44d233e6322027bbf356712b3940bee816e6c948ce4b90f18471b3d"},
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e37f10f6d4bc67c58fbd727108ae1d8b92b397355e68519f1e4a7babb1473442"},
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:502ed542e0d958bd12e7c3e9a015bce57deaf50eaa8c2e1c439b512cb9db1e3a"},
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:715c62af74c236bf386825c0fdfa08d092ab0f191eb5b4580d11c3189af9d330"},
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bccc06fa0372151f37f6b69834181aa9eb57cf8665ed36405fb45fbf6cac3bae"},
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5d8dc9f63a26f7259b57f46a7aab5af86b2ad6fbe48487500bb1f4b27e051e4c"},
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:30369e54d6d0113d2aa5aee7a90d17f225c13d87902ace8fcd7bbf99b19124db"},
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3eb479354c62067afa62f53bb387827bee2f75c9c79ef25eef6ab84d4b1ae3b"},
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0310524c833d91403c960b8a3cf9f46c282eadd6afd276c8c5edc617bd705dc9"},
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eddb18a00bbb855325db27b4c2a89a4ba491cd6a0bd6d852b225172a1f54b36c"},
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ade5dbcf8d9ef8f4b28e682d0b29f3008df9842bb5ac48ac2c17bc55771cc976"},
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:2c0afd34f928383e3fd25740f2050dbac9d077e7ba5adbaa2227f4d4f3c8da5c"},
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7da333f21cd9df51d5731513a6d39319892947604924ddf2e24a4612975fb936"},
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4b6d77c75a57f041c5ee915ff0b0bb58eabb78728b69ed967bc5b780e8f701b8"},
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba95691cf25f63df53c1d342413b41bd7762d9acb425df8858d7efa616c0870e"},
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f1ab031feb8676f6bd7c85abec86e2935850bf19b84432c64e3e239bffeb1ec"},
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58c1151827eef98b83d49b6ca6065575876a02d2211f259fb1a6b7757bd24dd8"},
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a66d931ea2c1464b738ace44b7334ab32a2fd50be023d863935eb00f42be1778"},
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0bcf0bab28995d483f6c8d7db25e0d05c3efa5cebfd7f56474359e7137f39856"},
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:89670d7a0045acb52be0566df5bc8b114ac967c662c06cf5e0c606e4aadc964b"},
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:b716294e721d8060908dbebe32639b01bfe61b15f9f57bcc18ca9a0e00d9520b"},
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fc53e05c16697ff0c1c7c2b98e45e131d4bfb78068fffff92a82d169cbb4c7b7"},
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:68504959253303d3ae9406b634997a2123a0b0c1da86459abbd0ffc921695eac"},
{file = "pydantic_core-2.33.0.tar.gz", hash = "sha256:40eb8af662ba409c3cbf4a8150ad32ae73514cd7cb1f1a2113af39763dd616b3"},
]
[package.dependencies]
@@ -1449,25 +1372,22 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pydantic-settings"
version = "2.9.1"
version = "2.8.1"
description = "Settings management using Pydantic"
optional = false
python-versions = ">=3.9"
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"},
{file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"},
{file = "pydantic_settings-2.8.1-py3-none-any.whl", hash = "sha256:81942d5ac3d905f7f3ee1a70df5dfb62d5569c12f51a5a647defc1c3d9ee2e9c"},
{file = "pydantic_settings-2.8.1.tar.gz", hash = "sha256:d5c663dfbe9db9d5e1c646b2e161da12f0d734d422ee56f567d0ea2cee4e8585"},
]
[package.dependencies]
pydantic = ">=2.7.0"
python-dotenv = ">=0.21.0"
typing-inspection = ">=0.4.0"
[package.extras]
aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"]
azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"]
gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"]
toml = ["tomli (>=2.0.1)"]
yaml = ["pyyaml (>=6.0.1)"]
@@ -1489,18 +1409,6 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
[[package]]
name = "pyrfc3339"
version = "2.0.1"
description = "Generate and parse RFC 3339 timestamps"
optional = false
python-versions = "*"
groups = ["main"]
files = [
{file = "pyRFC3339-2.0.1-py3-none-any.whl", hash = "sha256:30b70a366acac3df7386b558c21af871522560ed7f3f73cf344b8c2cbb8b0c9d"},
{file = "pyrfc3339-2.0.1.tar.gz", hash = "sha256:e47843379ea35c1296c3b6c67a948a1a490ae0584edfcbdea0eaffb5dd29960b"},
]
[[package]]
name = "pytest"
version = "8.3.3"
@@ -1667,42 +1575,30 @@ pyasn1 = ">=0.1.3"
[[package]]
name = "ruff"
version = "0.12.2"
version = "0.11.2"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
groups = ["dev"]
files = [
{file = "ruff-0.12.2-py3-none-linux_armv6l.whl", hash = "sha256:093ea2b221df1d2b8e7ad92fc6ffdca40a2cb10d8564477a987b44fd4008a7be"},
{file = "ruff-0.12.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:09e4cf27cc10f96b1708100fa851e0daf21767e9709e1649175355280e0d950e"},
{file = "ruff-0.12.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:8ae64755b22f4ff85e9c52d1f82644abd0b6b6b6deedceb74bd71f35c24044cc"},
{file = "ruff-0.12.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eb3a6b2db4d6e2c77e682f0b988d4d61aff06860158fdb413118ca133d57922"},
{file = "ruff-0.12.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:73448de992d05517170fc37169cbca857dfeaeaa8c2b9be494d7bcb0d36c8f4b"},
{file = "ruff-0.12.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b8b94317cbc2ae4a2771af641739f933934b03555e51515e6e021c64441532d"},
{file = "ruff-0.12.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:45fc42c3bf1d30d2008023a0a9a0cfb06bf9835b147f11fe0679f21ae86d34b1"},
{file = "ruff-0.12.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce48f675c394c37e958bf229fb5c1e843e20945a6d962cf3ea20b7a107dcd9f4"},
{file = "ruff-0.12.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:793d8859445ea47591272021a81391350205a4af65a9392401f418a95dfb75c9"},
{file = "ruff-0.12.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6932323db80484dda89153da3d8e58164d01d6da86857c79f1961934354992da"},
{file = "ruff-0.12.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:6aa7e623a3a11538108f61e859ebf016c4f14a7e6e4eba1980190cacb57714ce"},
{file = "ruff-0.12.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2a4a20aeed74671b2def096bdf2eac610c7d8ffcbf4fb0e627c06947a1d7078d"},
{file = "ruff-0.12.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:71a4c550195612f486c9d1f2b045a600aeba851b298c667807ae933478fcef04"},
{file = "ruff-0.12.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4987b8f4ceadf597c927beee65a5eaf994c6e2b631df963f86d8ad1bdea99342"},
{file = "ruff-0.12.2-py3-none-win32.whl", hash = "sha256:369ffb69b70cd55b6c3fc453b9492d98aed98062db9fec828cdfd069555f5f1a"},
{file = "ruff-0.12.2-py3-none-win_amd64.whl", hash = "sha256:dca8a3b6d6dc9810ed8f328d406516bf4d660c00caeaef36eb831cf4871b0639"},
{file = "ruff-0.12.2-py3-none-win_arm64.whl", hash = "sha256:48d6c6bfb4761df68bc05ae630e24f506755e702d4fb08f08460be778c7ccb12"},
{file = "ruff-0.12.2.tar.gz", hash = "sha256:d7b4f55cd6f325cb7621244f19c873c565a08aff5a4ba9c69aa7355f3f7afd3e"},
]
[[package]]
name = "semver"
version = "3.0.4"
description = "Python helper for Semantic Versioning (https://semver.org)"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "semver-3.0.4-py3-none-any.whl", hash = "sha256:9c824d87ba7f7ab4a1890799cec8596f15c1241cb473404ea1cb0c55e4b04746"},
{file = "semver-3.0.4.tar.gz", hash = "sha256:afc7d8c584a5ed0a11033af086e8af226a9c0b206f313e0301f8dd7b6b589602"},
{file = "ruff-0.11.2-py3-none-linux_armv6l.whl", hash = "sha256:c69e20ea49e973f3afec2c06376eb56045709f0212615c1adb0eda35e8a4e477"},
{file = "ruff-0.11.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:2c5424cc1c4eb1d8ecabe6d4f1b70470b4f24a0c0171356290b1953ad8f0e272"},
{file = "ruff-0.11.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:ecf20854cc73f42171eedb66f006a43d0a21bfb98a2523a809931cda569552d9"},
{file = "ruff-0.11.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c543bf65d5d27240321604cee0633a70c6c25c9a2f2492efa9f6d4b8e4199bb"},
{file = "ruff-0.11.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20967168cc21195db5830b9224be0e964cc9c8ecf3b5a9e3ce19876e8d3a96e3"},
{file = "ruff-0.11.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:955a9ce63483999d9f0b8f0b4a3ad669e53484232853054cc8b9d51ab4c5de74"},
{file = "ruff-0.11.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:86b3a27c38b8fce73bcd262b0de32e9a6801b76d52cdb3ae4c914515f0cef608"},
{file = "ruff-0.11.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3b66a03b248c9fcd9d64d445bafdf1589326bee6fc5c8e92d7562e58883e30f"},
{file = "ruff-0.11.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0397c2672db015be5aa3d4dac54c69aa012429097ff219392c018e21f5085147"},
{file = "ruff-0.11.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869bcf3f9abf6457fbe39b5a37333aa4eecc52a3b99c98827ccc371a8e5b6f1b"},
{file = "ruff-0.11.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2a2b50ca35457ba785cd8c93ebbe529467594087b527a08d487cf0ee7b3087e9"},
{file = "ruff-0.11.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7c69c74bf53ddcfbc22e6eb2f31211df7f65054bfc1f72288fc71e5f82db3eab"},
{file = "ruff-0.11.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6e8fb75e14560f7cf53b15bbc55baf5ecbe373dd5f3aab96ff7aa7777edd7630"},
{file = "ruff-0.11.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:842a472d7b4d6f5924e9297aa38149e5dcb1e628773b70e6387ae2c97a63c58f"},
{file = "ruff-0.11.2-py3-none-win32.whl", hash = "sha256:aca01ccd0eb5eb7156b324cfaa088586f06a86d9e5314b0eb330cb48415097cc"},
{file = "ruff-0.11.2-py3-none-win_amd64.whl", hash = "sha256:3170150172a8f994136c0c66f494edf199a0bbea7a409f649e4bc8f4d7084080"},
{file = "ruff-0.11.2-py3-none-win_arm64.whl", hash = "sha256:52933095158ff328f4c77af3d74f0379e34fd52f175144cefc1b192e7ccd32b4"},
{file = "ruff-0.11.2.tar.gz", hash = "sha256:ec47591497d5a1050175bdf4e1a4e6272cddff7da88a2ad595e1e326041d8d94"},
]
[[package]]
@@ -1729,24 +1625,6 @@ files = [
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "starlette"
version = "0.46.2"
description = "The little ASGI library that shines."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35"},
{file = "starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5"},
]
[package.dependencies]
anyio = ">=3.6.2,<5"
[package.extras]
full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"]
[[package]]
name = "storage3"
version = "0.11.0"
@@ -1782,14 +1660,14 @@ test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"]
[[package]]
name = "supabase"
version = "2.15.1"
version = "2.15.0"
description = "Supabase client for Python."
optional = false
python-versions = "<4.0,>=3.9"
groups = ["main"]
files = [
{file = "supabase-2.15.1-py3-none-any.whl", hash = "sha256:749299cdd74ecf528f52045c1e60d9dba81cc2054656f754c0ca7fba0dd34827"},
{file = "supabase-2.15.1.tar.gz", hash = "sha256:66e847dab9346062aa6a25b4e81ac786b972c5d4299827c57d1d5bd6a0346070"},
{file = "supabase-2.15.0-py3-none-any.whl", hash = "sha256:a665c7ab6c8ad1d80609ab62ad657f66fdaf38070ec9e0db5c7887fd72b109c0"},
{file = "supabase-2.15.0.tar.gz", hash = "sha256:2e66289ad74ae9c4cb04a69f9de00cd2ce880cd890de23269a40ac5b69151d26"},
]
[package.dependencies]
@@ -1823,7 +1701,7 @@ description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version < \"3.11\""
markers = "python_version == \"3.10\""
files = [
{file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"},
{file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"},
@@ -1874,26 +1752,6 @@ h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "uvicorn"
version = "0.34.3"
description = "The lightning-fast ASGI server."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "uvicorn-0.34.3-py3-none-any.whl", hash = "sha256:16246631db62bdfbf069b0645177d6e8a77ba950cfedbfd093acef9444e4d885"},
{file = "uvicorn-0.34.3.tar.gz", hash = "sha256:35919a9a979d7a59334b6b10e05d77c1d0d574c50e0fc98b8b1a0f165708b55a"},
]
[package.dependencies]
click = ">=7.0"
h11 = ">=0.8"
typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
[package.extras]
standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"]
[[package]]
name = "websockets"
version = "12.0"
@@ -2176,4 +2034,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<4.0"
content-hash = "574057127b05f28c2ae39f7b11aa0d7c52f857655e9223e23a27c9989b2ac10f"
content-hash = "c8e23c0609cae0717447f575849b658bee9203b784ec7270b62629cddbbbd9ca"

View File

@@ -7,23 +7,20 @@ readme = "README.md"
packages = [{ include = "autogpt_libs" }]
[tool.poetry.dependencies]
python = ">=3.10,<4.0"
colorama = "^0.4.6"
expiringdict = "^1.2.2"
google-cloud-logging = "^3.12.1"
pydantic = "^2.11.4"
pydantic-settings = "^2.9.1"
google-cloud-logging = "^3.11.4"
pydantic = "^2.11.1"
pydantic-settings = "^2.8.1"
pyjwt = "^2.10.1"
pytest-asyncio = "^0.26.0"
pytest-mock = "^3.14.0"
supabase = "^2.15.1"
launchdarkly-server-sdk = "^9.11.1"
fastapi = "^0.115.12"
uvicorn = "^0.34.3"
python = ">=3.10,<4.0"
supabase = "^2.15.0"
[tool.poetry.group.dev.dependencies]
redis = "^5.2.1"
ruff = "^0.12.2"
ruff = "^0.11.0"
[build-system]
requires = ["poetry-core"]

View File

@@ -13,6 +13,7 @@ PRISMA_SCHEMA="postgres/schema.prisma"
# EXECUTOR
NUM_GRAPH_WORKERS=10
NUM_NODE_WORKERS=3
BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
@@ -65,13 +66,6 @@ MEDIA_GCS_BUCKET_NAME=
## and tunnel it to your locally running backend.
PLATFORM_BASE_URL=http://localhost:3000
## Cloudflare Turnstile (CAPTCHA) Configuration
## Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
## This is the backend secret key
TURNSTILE_SECRET_KEY=
## This is the verify URL
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
## == INTEGRATION CREDENTIALS == ##
# Each set of server side credentials is required for the corresponding 3rd party
# integration to work.
@@ -126,10 +120,8 @@ TODOIST_CLIENT_SECRET=
# LLM
OPENAI_API_KEY=
ANTHROPIC_API_KEY=
AIML_API_KEY=
GROQ_API_KEY=
OPEN_ROUTER_API_KEY=
LLAMA_API_KEY=
# Reddit
# Go to https://www.reddit.com/prefs/apps and create a new app

View File

@@ -1,237 +0,0 @@
# Backend Testing Guide
This guide covers testing practices for the AutoGPT Platform backend, with a focus on snapshot testing for API endpoints.
## Table of Contents
- [Overview](#overview)
- [Running Tests](#running-tests)
- [Snapshot Testing](#snapshot-testing)
- [Writing Tests for API Routes](#writing-tests-for-api-routes)
- [Best Practices](#best-practices)
## Overview
The backend uses pytest for testing with the following key libraries:
- `pytest` - Test framework
- `pytest-asyncio` - Async test support
- `pytest-mock` - Mocking support
- `pytest-snapshot` - Snapshot testing for API responses
## Running Tests
### Run all tests
```bash
poetry run test
```
### Run specific test file
```bash
poetry run pytest path/to/test_file.py
```
### Run with verbose output
```bash
poetry run pytest -v
```
### Run with coverage
```bash
poetry run pytest --cov=backend
```
## Snapshot Testing
Snapshot testing captures the output of your code and compares it against previously saved snapshots. This is particularly useful for testing API responses.
### How Snapshot Testing Works
1. First run: Creates snapshot files in `snapshots/` directories
2. Subsequent runs: Compares output against saved snapshots
3. Changes detected: Test fails if output differs from snapshot
### Creating/Updating Snapshots
When you first write a test or when the expected output changes:
```bash
poetry run pytest path/to/test.py --snapshot-update
```
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
### Snapshot Test Example
```python
import json
from pytest_snapshot.plugin import Snapshot
def test_api_endpoint(snapshot: Snapshot):
response = client.get("/api/endpoint")
# Snapshot the response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"endpoint_response"
)
```
### Best Practices for Snapshots
1. **Use descriptive names**: `"user_list_response"` not `"response1"`
2. **Sort JSON keys**: Ensures consistent snapshots
3. **Format JSON**: Use `indent=2` for readable diffs
4. **Exclude dynamic data**: Remove timestamps, IDs, etc. that change between runs
Example of excluding dynamic data:
```python
response_data = response.json()
# Remove dynamic fields for snapshot
response_data.pop("created_at", None)
response_data.pop("id", None)
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response_data, indent=2, sort_keys=True),
"static_response_data"
)
```
## Writing Tests for API Routes
### Basic Structure
```python
import json
import fastapi
import fastapi.testclient
import pytest
from pytest_snapshot.plugin import Snapshot
from backend.server.v2.myroute import router
app = fastapi.FastAPI()
app.include_router(router)
client = fastapi.testclient.TestClient(app)
def test_endpoint_success(snapshot: Snapshot):
response = client.get("/endpoint")
assert response.status_code == 200
# Test specific fields
data = response.json()
assert data["status"] == "success"
# Snapshot the full response
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(data, indent=2, sort_keys=True),
"endpoint_success_response"
)
```
### Testing with Authentication
```python
def override_auth_middleware():
return {"sub": "test-user-id"}
def override_get_user_id():
return "test-user-id"
app.dependency_overrides[auth_middleware] = override_auth_middleware
app.dependency_overrides[get_user_id] = override_get_user_id
```
### Mocking External Services
```python
def test_external_api_call(mocker, snapshot):
# Mock external service
mock_response = {"external": "data"}
mocker.patch(
"backend.services.external_api.call",
return_value=mock_response
)
response = client.post("/api/process")
assert response.status_code == 200
snapshot.snapshot_dir = "snapshots"
snapshot.assert_match(
json.dumps(response.json(), indent=2, sort_keys=True),
"process_with_external_response"
)
```
## Best Practices
### 1. Test Organization
- Place tests next to the code: `routes.py``routes_test.py`
- Use descriptive test names: `test_create_user_with_invalid_email`
- Group related tests in classes when appropriate
### 2. Test Coverage
- Test happy path and error cases
- Test edge cases (empty data, invalid formats)
- Test authentication and authorization
### 3. Snapshot Testing Guidelines
- Review all snapshot changes carefully
- Don't snapshot sensitive data
- Keep snapshots focused and minimal
- Update snapshots intentionally, not accidentally
### 4. Async Testing
- Use regular `def` for FastAPI TestClient tests
- Use `async def` with `@pytest.mark.asyncio` for testing async functions directly
### 5. Fixtures
Create reusable fixtures for common test data:
```python
@pytest.fixture
def sample_user():
return {
"email": "test@example.com",
"name": "Test User"
}
def test_create_user(sample_user, snapshot):
response = client.post("/users", json=sample_user)
# ... test implementation
```
## CI/CD Integration
The GitHub Actions workflow automatically runs tests on:
- Pull requests
- Pushes to main branch
Snapshot tests work in CI by:
1. Committing snapshot files to the repository
2. CI compares against committed snapshots
3. Fails if snapshots don't match
## Troubleshooting
### Snapshot Mismatches
- Review the diff carefully
- If changes are expected: `poetry run pytest --snapshot-update`
- If changes are unexpected: Fix the code causing the difference
### Async Test Issues
- Ensure async functions use `@pytest.mark.asyncio`
- Use `AsyncMock` for mocking async functions
- FastAPI TestClient handles async automatically
### Import Errors
- Check that all dependencies are in `pyproject.toml`
- Run `poetry install` to ensure dependencies are installed
- Verify import paths are correct
## Summary
Snapshot testing provides a powerful way to ensure API responses remain consistent. Combined with traditional assertions, it creates a robust test suite that catches regressions while remaining maintainable.
Remember: Good tests are as important as good code!

View File

@@ -1,4 +1,3 @@
import functools
import importlib
import os
import re
@@ -11,16 +10,22 @@ if TYPE_CHECKING:
T = TypeVar("T")
@functools.cache
_AVAILABLE_BLOCKS: dict[str, type["Block"]] = {}
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import Block
if _AVAILABLE_BLOCKS:
return _AVAILABLE_BLOCKS
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py" and not f.name.startswith("test_")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
@@ -30,9 +35,9 @@ def load_all_blocks() -> dict[str, type["Block"]]:
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
available_blocks: dict[str, type["Block"]] = {}
for block_cls in all_subclasses(Block):
class_name = block_cls.__name__
@@ -53,7 +58,7 @@ def load_all_blocks() -> dict[str, type["Block"]]:
f"Block ID {block.name} error: {block.id} is not a valid UUID"
)
if block.id in available_blocks:
if block.id in _AVAILABLE_BLOCKS:
raise ValueError(
f"Block ID {block.name} error: {block.id} is already in use"
)
@@ -84,9 +89,9 @@ def load_all_blocks() -> dict[str, type["Block"]]:
f"{block.name} has a boolean field with no default value"
)
available_blocks[block.id] = block_cls
_AVAILABLE_BLOCKS[block.id] = block_cls
return available_blocks
return _AVAILABLE_BLOCKS
__all__ = ["load_all_blocks"]

View File

@@ -1,8 +1,5 @@
import asyncio
import logging
from typing import Any, Optional
from pydantic import JsonValue
from typing import Any
from backend.data.block import (
Block,
@@ -15,9 +12,9 @@ from backend.data.block import (
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json, retry
from backend.util import json
_logger = logging.getLogger(__name__)
logger = logging.getLogger(__name__)
class AgentExecutorBlock(Block):
@@ -26,21 +23,17 @@ class AgentExecutorBlock(Block):
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
inputs: BlockInput = SchemaField(description="Input data for the graph")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
nodes_input_masks: Optional[dict[str, dict[str, JsonValue]]] = SchemaField(
default=None, hidden=True
)
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("inputs", {})
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
@@ -64,94 +57,36 @@ class AgentExecutorBlock(Block):
categories={BlockCategory.AGENT},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
from backend.executor import utils as execution_utils
graph_exec = await execution_utils.add_graph_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
inputs=input_data.inputs,
nodes_input_masks=input_data.nodes_input_masks,
use_db_query=False,
)
logger = execution_utils.LogMetadata(
logger=_logger,
user_id=input_data.user_id,
graph_eid=graph_exec.id,
graph_id=input_data.graph_id,
node_eid="*",
node_id="*",
block_name=self.name,
)
try:
async for name, data in self._run(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
graph_exec_id=graph_exec.id,
user_id=input_data.user_id,
logger=logger,
):
yield name, data
except asyncio.CancelledError:
await self._stop(
graph_exec_id=graph_exec.id,
user_id=input_data.user_id,
logger=logger,
)
logger.warning(
f"Execution of graph {input_data.graph_id}v{input_data.graph_version} was cancelled."
)
except Exception as e:
await self._stop(
graph_exec_id=graph_exec.id,
user_id=input_data.user_id,
logger=logger,
)
logger.error(
f"Execution of graph {input_data.graph_id}v{input_data.graph_version} failed: {e}, execution is stopped."
)
raise
async def _run(
self,
graph_id: str,
graph_version: int,
graph_exec_id: str,
user_id: str,
logger,
) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
from backend.data.execution import ExecutionEventType
from backend.executor import utils as execution_utils
event_bus = execution_utils.get_async_execution_event_bus()
event_bus = execution_utils.get_execution_event_bus()
log_id = f"Graph #{graph_id}-V{graph_version}, exec-id: {graph_exec_id}"
graph_exec = execution_utils.add_graph_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
inputs=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.id}"
logger.info(f"Starting execution of {log_id}")
async for event in event_bus.listen(
user_id=user_id,
graph_id=graph_id,
graph_exec_id=graph_exec_id,
for event in event_bus.listen(
user_id=graph_exec.user_id,
graph_id=graph_exec.graph_id,
graph_exec_id=graph_exec.id,
):
if event.status not in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.debug(
f"Execution {log_id} received event {event.event_type} with status {event.status}"
)
continue
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
# If the graph execution is COMPLETED, TERMINATED, or FAILED,
# we can stop listening for further events.
break
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
logger.debug(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
@@ -175,25 +110,3 @@ class AgentExecutorBlock(Block):
f"Execution {log_id} produced {output_name}: {output_data}"
)
yield output_name, output_data
@retry.func_retry
async def _stop(
self,
graph_exec_id: str,
user_id: str,
logger,
) -> None:
from backend.executor import utils as execution_utils
log_id = f"Graph exec-id: {graph_exec_id}"
logger.info(f"Stopping execution of {log_id}")
try:
await execution_utils.stop_graph_execution(
graph_exec_id=graph_exec_id,
user_id=user_id,
use_db_query=False,
)
logger.info(f"Execution {log_id} stopped successfully.")
except Exception as e:
logger.error(f"Failed to stop execution {log_id}: {e}")

View File

@@ -1,8 +1,8 @@
from enum import Enum
from typing import Literal
import replicate
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.data.block import Block, BlockCategory, BlockSchema
@@ -165,15 +165,15 @@ class AIImageGeneratorBlock(Block):
},
)
async def _run_client(
def _run_client(
self, credentials: APIKeyCredentials, model_name: str, input_params: dict
):
try:
# Initialize Replicate client
client = ReplicateClient(api_token=credentials.api_key.get_secret_value())
client = replicate.Client(api_token=credentials.api_key.get_secret_value())
# Run the model with input parameters
output = await client.async_run(model_name, input=input_params, wait=False)
output = client.run(model_name, input=input_params, wait=False)
# Process output
if isinstance(output, list) and len(output) > 0:
@@ -195,7 +195,7 @@ class AIImageGeneratorBlock(Block):
except Exception as e:
raise RuntimeError(f"Unexpected error during model execution: {e}")
async def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
try:
# Handle style-based prompt modification for models without native style support
modified_prompt = input_data.prompt
@@ -213,7 +213,7 @@ class AIImageGeneratorBlock(Block):
"steps": 40,
"cfg_scale": 7.0,
}
output = await self._run_client(
output = self._run_client(
credentials,
"stability-ai/stable-diffusion-3.5-medium",
input_params,
@@ -231,7 +231,7 @@ class AIImageGeneratorBlock(Block):
"output_format": "jpg", # Set to jpg for Flux models
"output_quality": 90,
}
output = await self._run_client(
output = self._run_client(
credentials, "black-forest-labs/flux-1.1-pro", input_params
)
return output
@@ -246,7 +246,7 @@ class AIImageGeneratorBlock(Block):
"output_format": "jpg",
"output_quality": 90,
}
output = await self._run_client(
output = self._run_client(
credentials, "black-forest-labs/flux-1.1-pro-ultra", input_params
)
return output
@@ -257,7 +257,7 @@ class AIImageGeneratorBlock(Block):
"size": SIZE_TO_RECRAFT_DIMENSIONS[input_data.size],
"style": input_data.style.value,
}
output = await self._run_client(
output = self._run_client(
credentials, "recraft-ai/recraft-v3", input_params
)
return output
@@ -296,9 +296,9 @@ class AIImageGeneratorBlock(Block):
style_text = style_map.get(style, "")
return f"{style_text} of" if style_text else ""
async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
try:
url = await self.generate_image(input_data, credentials)
url = self.generate_image(input_data, credentials)
if url:
yield "image_url", url
else:

View File

@@ -1,10 +1,10 @@
import asyncio
import logging
import time
from enum import Enum
from typing import Literal
import replicate
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
@@ -142,7 +142,7 @@ class AIMusicGeneratorBlock(Block):
test_credentials=TEST_CREDENTIALS,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
max_retries = 3
@@ -154,7 +154,7 @@ class AIMusicGeneratorBlock(Block):
logger.debug(
f"[AIMusicGeneratorBlock] - Running model (attempt {attempt + 1})"
)
result = await self.run_model(
result = self.run_model(
api_key=credentials.api_key,
music_gen_model_version=input_data.music_gen_model_version,
prompt=input_data.prompt,
@@ -176,13 +176,13 @@ class AIMusicGeneratorBlock(Block):
last_error = f"Unexpected error: {str(e)}"
logger.error(f"[AIMusicGeneratorBlock] - Error: {last_error}")
if attempt < max_retries - 1:
await asyncio.sleep(retry_delay)
time.sleep(retry_delay)
continue
# If we've exhausted all retries, yield the error
yield "error", f"Failed after {max_retries} attempts. Last error: {last_error}"
async def run_model(
def run_model(
self,
api_key: SecretStr,
music_gen_model_version: MusicGenModelVersion,
@@ -196,10 +196,10 @@ class AIMusicGeneratorBlock(Block):
normalization_strategy: NormalizationStrategy,
):
# Initialize Replicate client with the API key
client = ReplicateClient(api_token=api_key.get_secret_value())
client = replicate.Client(api_token=api_key.get_secret_value())
# Run the model with parameters
output = await client.async_run(
output = client.run(
"meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb",
input={
"prompt": prompt,

View File

@@ -1,4 +1,3 @@
import asyncio
import logging
import time
from enum import Enum
@@ -14,7 +13,7 @@ from backend.data.model import (
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.request import Requests
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
@@ -53,7 +52,6 @@ class AudioTrack(str, Enum):
REFRESHER = ("Refresher",)
TOURIST = ("Tourist",)
TWIN_TYCHES = ("Twin Tyches",)
DONT_STOP_ME_ABSTRACT_FUTURE_BASS = ("Dont Stop Me Abstract Future Bass",)
@property
def audio_url(self):
@@ -79,7 +77,6 @@ class AudioTrack(str, Enum):
AudioTrack.REFRESHER: "https://cdn.tfrv.xyz/audio/refresher.mp3",
AudioTrack.TOURIST: "https://cdn.tfrv.xyz/audio/tourist.mp3",
AudioTrack.TWIN_TYCHES: "https://cdn.tfrv.xyz/audio/twin-tynches.mp3",
AudioTrack.DONT_STOP_ME_ABSTRACT_FUTURE_BASS: "https://cdn.revid.ai/audio/_dont-stop-me-abstract-future-bass.mp3",
}
return audio_urls[self]
@@ -107,7 +104,6 @@ class GenerationPreset(str, Enum):
MOVIE = ("Movie",)
STYLIZED_ILLUSTRATION = ("Stylized Illustration",)
MANGA = ("Manga",)
DEFAULT = ("DEFAULT",)
class Voice(str, Enum):
@@ -117,7 +113,6 @@ class Voice(str, Enum):
JESSICA = "Jessica"
CHARLOTTE = "Charlotte"
CALLUM = "Callum"
EVA = "Eva"
@property
def voice_id(self):
@@ -128,7 +123,6 @@ class Voice(str, Enum):
Voice.JESSICA: "cgSgspJ2msm6clMCkdW9",
Voice.CHARLOTTE: "XB0fDUnXU5powFXDhCwa",
Voice.CALLUM: "N2lVS1w4EtoT3dr4eOWO",
Voice.EVA: "FGY2WhTYpPnrIDTdsKH5",
}
return voice_id_map[self]
@@ -146,8 +140,6 @@ logger = logging.getLogger(__name__)
class AIShortformVideoCreatorBlock(Block):
"""Creates a shortform texttovideo clip using stock or AI imagery."""
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.REVID], Literal["api_key"]
@@ -191,58 +183,6 @@ class AIShortformVideoCreatorBlock(Block):
video_url: str = SchemaField(description="The URL of the created video")
error: str = SchemaField(description="Error message if the request failed")
async def create_webhook(self) -> tuple[str, str]:
"""Create a new webhook URL for receiving notifications."""
url = "https://webhook.site/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = await Requests().post(url, headers=headers)
webhook_data = response.json()
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
async def create_video(self, api_key: SecretStr, payload: dict) -> dict:
"""Create a video using the Revid API."""
url = "https://www.revid.ai/api/public/v2/render"
headers = {"key": api_key.get_secret_value()}
response = await Requests().post(url, json=payload, headers=headers)
logger.debug(
f"API Response Status Code: {response.status}, Content: {response.text}"
)
return response.json()
async def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
"""Check the status of a video creation job."""
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
headers = {"key": api_key.get_secret_value()}
response = await Requests().get(url, headers=headers)
return response.json()
async def wait_for_video(
self,
api_key: SecretStr,
pid: str,
max_wait_time: int = 1000,
) -> str:
"""Wait for video creation to complete and return the video URL."""
start_time = time.time()
while time.time() - start_time < max_wait_time:
status = await self.check_video_status(api_key, pid)
logger.debug(f"Video status: {status}")
if status.get("status") == "ready" and "videoUrl" in status:
return status["videoUrl"]
elif status.get("status") == "error":
error_message = status.get("error", "Unknown error occurred")
logger.error(f"Video creation failed: {error_message}")
raise ValueError(f"Video creation failed: {error_message}")
elif status.get("status") in ["FAILED", "CANCELED"]:
logger.error(f"Video creation failed: {status.get('message')}")
raise ValueError(f"Video creation failed: {status.get('message')}")
await asyncio.sleep(10)
logger.error("Video creation timed out")
raise TimeoutError("Video creation timed out")
def __init__(self):
super().__init__(
id="361697fb-0c4f-4feb-aed3-8320c88c771b",
@@ -261,41 +201,91 @@ class AIShortformVideoCreatorBlock(Block):
"voice": Voice.LILY,
"video_style": VisualMediaType.STOCK_VIDEOS,
},
test_output=("video_url", "https://example.com/video.mp4"),
test_output=(
"video_url",
"https://example.com/video.mp4",
),
test_mock={
"create_webhook": lambda *args, **kwargs: (
"create_webhook": lambda: (
"test_uuid",
"https://webhook.site/test_uuid",
),
"create_video": lambda *args, **kwargs: {"pid": "test_pid"},
"check_video_status": lambda *args, **kwargs: {
"status": "ready",
"videoUrl": "https://example.com/video.mp4",
},
"wait_for_video": lambda *args, **kwargs: "https://example.com/video.mp4",
"create_video": lambda api_key, payload: {"pid": "test_pid"},
"wait_for_video": lambda api_key, pid, webhook_token, max_wait_time=1000: "https://example.com/video.mp4",
},
test_credentials=TEST_CREDENTIALS,
)
async def run(
def create_webhook(self):
url = "https://webhook.site/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = requests.post(url, headers=headers)
webhook_data = response.json()
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
def create_video(self, api_key: SecretStr, payload: dict) -> dict:
url = "https://www.revid.ai/api/public/v2/render"
headers = {"key": api_key.get_secret_value()}
response = requests.post(url, json=payload, headers=headers)
logger.debug(
f"API Response Status Code: {response.status_code}, Content: {response.text}"
)
return response.json()
def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
headers = {"key": api_key.get_secret_value()}
response = requests.get(url, headers=headers)
return response.json()
def wait_for_video(
self,
api_key: SecretStr,
pid: str,
webhook_token: str,
max_wait_time: int = 1000,
) -> str:
start_time = time.time()
while time.time() - start_time < max_wait_time:
status = self.check_video_status(api_key, pid)
logger.debug(f"Video status: {status}")
if status.get("status") == "ready" and "videoUrl" in status:
return status["videoUrl"]
elif status.get("status") == "error":
error_message = status.get("error", "Unknown error occurred")
logger.error(f"Video creation failed: {error_message}")
raise ValueError(f"Video creation failed: {error_message}")
elif status.get("status") in ["FAILED", "CANCELED"]:
logger.error(f"Video creation failed: {status.get('message')}")
raise ValueError(f"Video creation failed: {status.get('message')}")
time.sleep(10)
logger.error("Video creation timed out")
raise TimeoutError("Video creation timed out")
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
# Create a new Webhook.site URL
webhook_token, webhook_url = await self.create_webhook()
webhook_token, webhook_url = self.create_webhook()
logger.debug(f"Webhook URL: {webhook_url}")
audio_url = input_data.background_music.audio_url
payload = {
"frameRate": input_data.frame_rate,
"resolution": input_data.resolution,
"frameDurationMultiplier": 18,
"webhook": None,
"webhook": webhook_url,
"creationParams": {
"mediaType": input_data.video_style,
"captionPresetName": "Wrap 1",
"selectedVoice": input_data.voice.voice_id,
"hasEnhancedGeneration": True,
"generationPreset": input_data.generation_preset.name,
"selectedAudio": input_data.background_music.value,
"selectedAudio": input_data.background_music,
"origin": "/create",
"inputText": input_data.script,
"flowType": "text-to-video",
@@ -311,12 +301,12 @@ class AIShortformVideoCreatorBlock(Block):
"selectedStoryStyle": {"value": "custom", "label": "Custom"},
"hasToGenerateVideos": input_data.video_style
!= VisualMediaType.STOCK_VIDEOS,
"audioUrl": input_data.background_music.audio_url,
"audioUrl": audio_url,
},
}
logger.debug("Creating video...")
response = await self.create_video(credentials.api_key, payload)
response = self.create_video(credentials.api_key, payload)
pid = response.get("pid")
if not pid:
@@ -328,370 +318,6 @@ class AIShortformVideoCreatorBlock(Block):
logger.debug(
f"Video created with project ID: {pid}. Waiting for completion..."
)
video_url = await self.wait_for_video(credentials.api_key, pid)
video_url = self.wait_for_video(credentials.api_key, pid, webhook_token)
logger.debug(f"Video ready: {video_url}")
yield "video_url", video_url
class AIAdMakerVideoCreatorBlock(Block):
"""Generates a 30second vertical AI advert using optional usersupplied imagery."""
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.REVID], Literal["api_key"]
] = CredentialsField(
description="Credentials for Revid.ai API access.",
)
script: str = SchemaField(
description="Short advertising copy. Line breaks create new scenes.",
placeholder="Introducing Foobar [show product photo] the gadget that does it all.",
)
ratio: str = SchemaField(description="Aspect ratio", default="9 / 16")
target_duration: int = SchemaField(
description="Desired length of the ad in seconds.", default=30
)
voice: Voice = SchemaField(
description="Narration voice", default=Voice.EVA, placeholder=Voice.EVA
)
background_music: AudioTrack = SchemaField(
description="Background track",
default=AudioTrack.DONT_STOP_ME_ABSTRACT_FUTURE_BASS,
)
input_media_urls: list[str] = SchemaField(
description="List of image URLs to feature in the advert.", default=[]
)
use_only_provided_media: bool = SchemaField(
description="Restrict visuals to supplied images only.", default=True
)
class Output(BlockSchema):
video_url: str = SchemaField(description="URL of the finished advert")
error: str = SchemaField(description="Error message on failure")
async def create_webhook(self) -> tuple[str, str]:
"""Create a new webhook URL for receiving notifications."""
url = "https://webhook.site/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = await Requests().post(url, headers=headers)
webhook_data = response.json()
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
async def create_video(self, api_key: SecretStr, payload: dict) -> dict:
"""Create a video using the Revid API."""
url = "https://www.revid.ai/api/public/v2/render"
headers = {"key": api_key.get_secret_value()}
response = await Requests().post(url, json=payload, headers=headers)
logger.debug(
f"API Response Status Code: {response.status}, Content: {response.text}"
)
return response.json()
async def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
"""Check the status of a video creation job."""
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
headers = {"key": api_key.get_secret_value()}
response = await Requests().get(url, headers=headers)
return response.json()
async def wait_for_video(
self,
api_key: SecretStr,
pid: str,
max_wait_time: int = 1000,
) -> str:
"""Wait for video creation to complete and return the video URL."""
start_time = time.time()
while time.time() - start_time < max_wait_time:
status = await self.check_video_status(api_key, pid)
logger.debug(f"Video status: {status}")
if status.get("status") == "ready" and "videoUrl" in status:
return status["videoUrl"]
elif status.get("status") == "error":
error_message = status.get("error", "Unknown error occurred")
logger.error(f"Video creation failed: {error_message}")
raise ValueError(f"Video creation failed: {error_message}")
elif status.get("status") in ["FAILED", "CANCELED"]:
logger.error(f"Video creation failed: {status.get('message')}")
raise ValueError(f"Video creation failed: {status.get('message')}")
await asyncio.sleep(10)
logger.error("Video creation timed out")
raise TimeoutError("Video creation timed out")
def __init__(self):
super().__init__(
id="58bd2a19-115d-4fd1-8ca4-13b9e37fa6a0",
description="Creates an AIgenerated 30second advert (text + images)",
categories={BlockCategory.MARKETING, BlockCategory.AI},
input_schema=AIAdMakerVideoCreatorBlock.Input,
output_schema=AIAdMakerVideoCreatorBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"script": "Test product launch!",
"input_media_urls": [
"https://cdn.revid.ai/uploads/1747076315114-image.png",
],
},
test_output=("video_url", "https://example.com/ad.mp4"),
test_mock={
"create_webhook": lambda *args, **kwargs: (
"test_uuid",
"https://webhook.site/test_uuid",
),
"create_video": lambda *args, **kwargs: {"pid": "test_pid"},
"check_video_status": lambda *args, **kwargs: {
"status": "ready",
"videoUrl": "https://example.com/ad.mp4",
},
"wait_for_video": lambda *args, **kwargs: "https://example.com/ad.mp4",
},
test_credentials=TEST_CREDENTIALS,
)
async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
webhook_token, webhook_url = await self.create_webhook()
payload = {
"webhook": webhook_url,
"creationParams": {
"targetDuration": input_data.target_duration,
"ratio": input_data.ratio,
"mediaType": "aiVideo",
"inputText": input_data.script,
"flowType": "text-to-video",
"slug": "ai-ad-generator",
"slugNew": "",
"isCopiedFrom": False,
"hasToGenerateVoice": True,
"hasToTranscript": False,
"hasToSearchMedia": True,
"hasAvatar": False,
"hasWebsiteRecorder": False,
"hasTextSmallAtBottom": False,
"selectedAudio": input_data.background_music.value,
"selectedVoice": input_data.voice.voice_id,
"selectedAvatar": "https://cdn.revid.ai/avatars/young-woman.mp4",
"selectedAvatarType": "video/mp4",
"websiteToRecord": "",
"hasToGenerateCover": True,
"nbGenerations": 1,
"disableCaptions": False,
"mediaMultiplier": "medium",
"characters": [],
"captionPresetName": "Revid",
"sourceType": "contentScraping",
"selectedStoryStyle": {"value": "custom", "label": "General"},
"generationPreset": "DEFAULT",
"hasToGenerateMusic": False,
"isOptimizedForChinese": False,
"generationUserPrompt": "",
"enableNsfwFilter": False,
"addStickers": False,
"typeMovingImageAnim": "dynamic",
"hasToGenerateSoundEffects": False,
"forceModelType": "gpt-image-1",
"selectedCharacters": [],
"lang": "",
"voiceSpeed": 1,
"disableAudio": False,
"disableVoice": False,
"useOnlyProvidedMedia": input_data.use_only_provided_media,
"imageGenerationModel": "ultra",
"videoGenerationModel": "pro",
"hasEnhancedGeneration": True,
"hasEnhancedGenerationPro": True,
"inputMedias": [
{"url": url, "title": "", "type": "image"}
for url in input_data.input_media_urls
],
"hasToGenerateVideos": True,
"audioUrl": input_data.background_music.audio_url,
"watermark": None,
},
}
response = await self.create_video(credentials.api_key, payload)
pid = response.get("pid")
if not pid:
raise RuntimeError("Failed to create video: No project ID returned")
video_url = await self.wait_for_video(credentials.api_key, pid)
yield "video_url", video_url
class AIScreenshotToVideoAdBlock(Block):
"""Creates an advert where the supplied screenshot is narrated by an AI avatar."""
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.REVID], Literal["api_key"]
] = CredentialsField(description="Revid.ai API key")
script: str = SchemaField(
description="Narration that will accompany the screenshot.",
placeholder="Check out these amazing stats!",
)
screenshot_url: str = SchemaField(
description="Screenshot or image URL to showcase."
)
ratio: str = SchemaField(default="9 / 16")
target_duration: int = SchemaField(default=30)
voice: Voice = SchemaField(default=Voice.EVA)
background_music: AudioTrack = SchemaField(
default=AudioTrack.DONT_STOP_ME_ABSTRACT_FUTURE_BASS
)
class Output(BlockSchema):
video_url: str = SchemaField(description="Rendered video URL")
error: str = SchemaField(description="Error, if encountered")
async def create_webhook(self) -> tuple[str, str]:
"""Create a new webhook URL for receiving notifications."""
url = "https://webhook.site/token"
headers = {"Accept": "application/json", "Content-Type": "application/json"}
response = await Requests().post(url, headers=headers)
webhook_data = response.json()
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
async def create_video(self, api_key: SecretStr, payload: dict) -> dict:
"""Create a video using the Revid API."""
url = "https://www.revid.ai/api/public/v2/render"
headers = {"key": api_key.get_secret_value()}
response = await Requests().post(url, json=payload, headers=headers)
logger.debug(
f"API Response Status Code: {response.status}, Content: {response.text}"
)
return response.json()
async def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
"""Check the status of a video creation job."""
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
headers = {"key": api_key.get_secret_value()}
response = await Requests().get(url, headers=headers)
return response.json()
async def wait_for_video(
self,
api_key: SecretStr,
pid: str,
max_wait_time: int = 1000,
) -> str:
"""Wait for video creation to complete and return the video URL."""
start_time = time.time()
while time.time() - start_time < max_wait_time:
status = await self.check_video_status(api_key, pid)
logger.debug(f"Video status: {status}")
if status.get("status") == "ready" and "videoUrl" in status:
return status["videoUrl"]
elif status.get("status") == "error":
error_message = status.get("error", "Unknown error occurred")
logger.error(f"Video creation failed: {error_message}")
raise ValueError(f"Video creation failed: {error_message}")
elif status.get("status") in ["FAILED", "CANCELED"]:
logger.error(f"Video creation failed: {status.get('message')}")
raise ValueError(f"Video creation failed: {status.get('message')}")
await asyncio.sleep(10)
logger.error("Video creation timed out")
raise TimeoutError("Video creation timed out")
def __init__(self):
super().__init__(
id="0f3e4635-e810-43d9-9e81-49e6f4e83b7c",
description="Turns a screenshot into an engaging, avatarnarrated video advert.",
categories={BlockCategory.AI, BlockCategory.MARKETING},
input_schema=AIScreenshotToVideoAdBlock.Input,
output_schema=AIScreenshotToVideoAdBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"script": "Amazing numbers!",
"screenshot_url": "https://cdn.revid.ai/uploads/1747080376028-image.png",
},
test_output=("video_url", "https://example.com/screenshot.mp4"),
test_mock={
"create_webhook": lambda *args, **kwargs: (
"test_uuid",
"https://webhook.site/test_uuid",
),
"create_video": lambda *args, **kwargs: {"pid": "test_pid"},
"check_video_status": lambda *args, **kwargs: {
"status": "ready",
"videoUrl": "https://example.com/screenshot.mp4",
},
"wait_for_video": lambda *args, **kwargs: "https://example.com/screenshot.mp4",
},
test_credentials=TEST_CREDENTIALS,
)
async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
webhook_token, webhook_url = await self.create_webhook()
payload = {
"webhook": webhook_url,
"creationParams": {
"targetDuration": input_data.target_duration,
"ratio": input_data.ratio,
"mediaType": "aiVideo",
"hasAvatar": True,
"removeAvatarBackground": True,
"inputText": input_data.script,
"flowType": "text-to-video",
"slug": "ai-ad-generator",
"slugNew": "screenshot-to-video-ad",
"isCopiedFrom": "ai-ad-generator",
"hasToGenerateVoice": True,
"hasToTranscript": False,
"hasToSearchMedia": True,
"hasWebsiteRecorder": False,
"hasTextSmallAtBottom": False,
"selectedAudio": input_data.background_music.value,
"selectedVoice": input_data.voice.voice_id,
"selectedAvatar": "https://cdn.revid.ai/avatars/young-woman.mp4",
"selectedAvatarType": "video/mp4",
"websiteToRecord": "",
"hasToGenerateCover": True,
"nbGenerations": 1,
"disableCaptions": False,
"mediaMultiplier": "medium",
"characters": [],
"captionPresetName": "Revid",
"sourceType": "contentScraping",
"selectedStoryStyle": {"value": "custom", "label": "General"},
"generationPreset": "DEFAULT",
"hasToGenerateMusic": False,
"isOptimizedForChinese": False,
"generationUserPrompt": "",
"enableNsfwFilter": False,
"addStickers": False,
"typeMovingImageAnim": "dynamic",
"hasToGenerateSoundEffects": False,
"forceModelType": "gpt-image-1",
"selectedCharacters": [],
"lang": "",
"voiceSpeed": 1,
"disableAudio": False,
"disableVoice": False,
"useOnlyProvidedMedia": True,
"imageGenerationModel": "ultra",
"videoGenerationModel": "ultra",
"hasEnhancedGeneration": True,
"hasEnhancedGenerationPro": True,
"inputMedias": [
{"url": input_data.screenshot_url, "title": "", "type": "image"}
],
"hasToGenerateVideos": True,
"audioUrl": input_data.background_music.audio_url,
"watermark": None,
},
}
response = await self.create_video(credentials.api_key, payload)
pid = response.get("pid")
if not pid:
raise RuntimeError("Failed to create video: No project ID returned")
video_url = await self.wait_for_video(credentials.api_key, pid)
yield "video_url", video_url

View File

@@ -4,7 +4,6 @@ from typing import List
from backend.blocks.apollo._auth import ApolloCredentials
from backend.blocks.apollo.models import (
Contact,
EnrichPersonRequest,
Organization,
SearchOrganizationsRequest,
SearchOrganizationsResponse,
@@ -28,15 +27,14 @@ class ApolloClient:
def _get_headers(self) -> dict[str, str]:
return {"x-api-key": self.credentials.api_key.get_secret_value()}
async def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
"""Search for people in Apollo"""
response = await self.requests.post(
response = self.requests.get(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
json=query.model_dump(exclude={"max_results"}),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchPeopleResponse(**data)
parsed_response = SearchPeopleResponse(**response.json())
if parsed_response.pagination.total_entries == 0:
return []
@@ -54,29 +52,27 @@ class ApolloClient:
and len(parsed_response.people) > 0
):
query.page += 1
response = await self.requests.post(
response = self.requests.get(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
json=query.model_dump(exclude={"max_results"}),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchPeopleResponse(**data)
parsed_response = SearchPeopleResponse(**response.json())
people.extend(parsed_response.people[: query.max_results - len(people)])
logger.info(f"Found {len(people)} people")
return people[: query.max_results] if query.max_results else people
async def search_organizations(
def search_organizations(
self, query: SearchOrganizationsRequest
) -> List[Organization]:
"""Search for organizations in Apollo"""
response = await self.requests.post(
response = self.requests.get(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
json=query.model_dump(exclude={"max_results"}),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchOrganizationsResponse(**data)
parsed_response = SearchOrganizationsResponse(**response.json())
if parsed_response.pagination.total_entries == 0:
return []
@@ -94,13 +90,12 @@ class ApolloClient:
and len(parsed_response.organizations) > 0
):
query.page += 1
response = await self.requests.post(
response = self.requests.get(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
json=query.model_dump(exclude={"max_results"}),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchOrganizationsResponse(**data)
parsed_response = SearchOrganizationsResponse(**response.json())
organizations.extend(
parsed_response.organizations[
: query.max_results - len(organizations)
@@ -111,21 +106,3 @@ class ApolloClient:
return (
organizations[: query.max_results] if query.max_results else organizations
)
async def enrich_person(self, query: EnrichPersonRequest) -> Contact:
"""Enrich a person's data including email & phone reveal"""
response = await self.requests.post(
f"{self.API_URL}/people/match",
headers=self._get_headers(),
json=query.model_dump(),
params={
"reveal_personal_emails": "true",
},
)
data = response.json()
if "person" not in data:
raise ValueError(f"Person not found or enrichment failed: {data}")
contact = Contact(**data["person"])
contact.email = contact.email or "-"
return contact

View File

@@ -1,31 +1,17 @@
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel as OriginalBaseModel
from pydantic import ConfigDict
from pydantic import BaseModel, ConfigDict
from backend.data.model import SchemaField
class BaseModel(OriginalBaseModel):
def model_dump(self, *args, exclude: set[str] | None = None, **kwargs):
if exclude is None:
exclude = set("credentials")
else:
exclude.add("credentials")
kwargs.setdefault("exclude_none", True)
kwargs.setdefault("exclude_unset", True)
kwargs.setdefault("exclude_defaults", True)
return super().model_dump(*args, exclude=exclude, **kwargs)
class PrimaryPhone(BaseModel):
"""A primary phone in Apollo"""
number: Optional[str] = ""
source: Optional[str] = ""
sanitized_number: Optional[str] = ""
number: str
source: str
sanitized_number: str
class SenorityLevels(str, Enum):
@@ -56,102 +42,102 @@ class ContactEmailStatuses(str, Enum):
class RuleConfigStatus(BaseModel):
"""A rule config status in Apollo"""
_id: Optional[str] = ""
created_at: Optional[str] = ""
rule_action_config_id: Optional[str] = ""
rule_config_id: Optional[str] = ""
status_cd: Optional[str] = ""
updated_at: Optional[str] = ""
id: Optional[str] = ""
key: Optional[str] = ""
_id: str
created_at: str
rule_action_config_id: str
rule_config_id: str
status_cd: str
updated_at: str
id: str
key: str
class ContactCampaignStatus(BaseModel):
"""A contact campaign status in Apollo"""
id: Optional[str] = ""
emailer_campaign_id: Optional[str] = ""
send_email_from_user_id: Optional[str] = ""
inactive_reason: Optional[str] = ""
status: Optional[str] = ""
added_at: Optional[str] = ""
added_by_user_id: Optional[str] = ""
finished_at: Optional[str] = ""
paused_at: Optional[str] = ""
auto_unpause_at: Optional[str] = ""
send_email_from_email_address: Optional[str] = ""
send_email_from_email_account_id: Optional[str] = ""
manually_set_unpause: Optional[str] = ""
failure_reason: Optional[str] = ""
current_step_id: Optional[str] = ""
in_response_to_emailer_message_id: Optional[str] = ""
cc_emails: Optional[str] = ""
bcc_emails: Optional[str] = ""
to_emails: Optional[str] = ""
id: str
emailer_campaign_id: str
send_email_from_user_id: str
inactive_reason: str
status: str
added_at: str
added_by_user_id: str
finished_at: str
paused_at: str
auto_unpause_at: str
send_email_from_email_address: str
send_email_from_email_account_id: str
manually_set_unpause: str
failure_reason: str
current_step_id: str
in_response_to_emailer_message_id: str
cc_emails: str
bcc_emails: str
to_emails: str
class Account(BaseModel):
"""An account in Apollo"""
id: Optional[str] = ""
name: Optional[str] = ""
website_url: Optional[str] = ""
blog_url: Optional[str] = ""
angellist_url: Optional[str] = ""
linkedin_url: Optional[str] = ""
twitter_url: Optional[str] = ""
facebook_url: Optional[str] = ""
primary_phone: Optional[PrimaryPhone] = PrimaryPhone()
languages: Optional[list[str]] = []
alexa_ranking: Optional[int] = 0
phone: Optional[str] = ""
linkedin_uid: Optional[str] = ""
founded_year: Optional[int] = 0
publicly_traded_symbol: Optional[str] = ""
publicly_traded_exchange: Optional[str] = ""
logo_url: Optional[str] = ""
chrunchbase_url: Optional[str] = ""
primary_domain: Optional[str] = ""
domain: Optional[str] = ""
team_id: Optional[str] = ""
organization_id: Optional[str] = ""
account_stage_id: Optional[str] = ""
source: Optional[str] = ""
original_source: Optional[str] = ""
creator_id: Optional[str] = ""
owner_id: Optional[str] = ""
created_at: Optional[str] = ""
phone_status: Optional[str] = ""
hubspot_id: Optional[str] = ""
salesforce_id: Optional[str] = ""
crm_owner_id: Optional[str] = ""
parent_account_id: Optional[str] = ""
sanitized_phone: Optional[str] = ""
id: str
name: str
website_url: str
blog_url: str
angellist_url: str
linkedin_url: str
twitter_url: str
facebook_url: str
primary_phone: PrimaryPhone
languages: list[str]
alexa_ranking: int
phone: str
linkedin_uid: str
founded_year: int
publicly_traded_symbol: str
publicly_traded_exchange: str
logo_url: str
chrunchbase_url: str
primary_domain: str
domain: str
team_id: str
organization_id: str
account_stage_id: str
source: str
original_source: str
creator_id: str
owner_id: str
created_at: str
phone_status: str
hubspot_id: str
salesforce_id: str
crm_owner_id: str
parent_account_id: str
sanitized_phone: str
# no listed type on the API docs
account_playbook_statues: Optional[list[Any]] = []
account_rule_config_statuses: Optional[list[RuleConfigStatus]] = []
existence_level: Optional[str] = ""
label_ids: Optional[list[str]] = []
typed_custom_fields: Optional[Any] = {}
custom_field_errors: Optional[Any] = {}
modality: Optional[str] = ""
source_display_name: Optional[str] = ""
salesforce_record_id: Optional[str] = ""
crm_record_url: Optional[str] = ""
account_playbook_statues: list[Any]
account_rule_config_statuses: list[RuleConfigStatus]
existence_level: str
label_ids: list[str]
typed_custom_fields: Any
custom_field_errors: Any
modality: str
source_display_name: str
salesforce_record_id: str
crm_record_url: str
class ContactEmail(BaseModel):
"""A contact email in Apollo"""
email: Optional[str] = ""
email_md5: Optional[str] = ""
email_sha256: Optional[str] = ""
email_status: Optional[str] = ""
email_source: Optional[str] = ""
extrapolated_email_confidence: Optional[str] = ""
position: Optional[int] = 0
email_from_customer: Optional[str] = ""
free_domain: Optional[bool] = True
email: str = ""
email_md5: str = ""
email_sha256: str = ""
email_status: str = ""
email_source: str = ""
extrapolated_email_confidence: str = ""
position: int = 0
email_from_customer: str = ""
free_domain: bool = True
class EmploymentHistory(BaseModel):
@@ -164,40 +150,40 @@ class EmploymentHistory(BaseModel):
populate_by_name=True,
)
_id: Optional[str] = ""
created_at: Optional[str] = ""
current: Optional[bool] = False
degree: Optional[str] = ""
description: Optional[str] = ""
emails: Optional[str] = ""
end_date: Optional[str] = ""
grade_level: Optional[str] = ""
kind: Optional[str] = ""
major: Optional[str] = ""
organization_id: Optional[str] = ""
organization_name: Optional[str] = ""
raw_address: Optional[str] = ""
start_date: Optional[str] = ""
title: Optional[str] = ""
updated_at: Optional[str] = ""
id: Optional[str] = ""
key: Optional[str] = ""
_id: Optional[str] = None
created_at: Optional[str] = None
current: Optional[bool] = None
degree: Optional[str] = None
description: Optional[str] = None
emails: Optional[str] = None
end_date: Optional[str] = None
grade_level: Optional[str] = None
kind: Optional[str] = None
major: Optional[str] = None
organization_id: Optional[str] = None
organization_name: Optional[str] = None
raw_address: Optional[str] = None
start_date: Optional[str] = None
title: Optional[str] = None
updated_at: Optional[str] = None
id: Optional[str] = None
key: Optional[str] = None
class Breadcrumb(BaseModel):
"""A breadcrumb in Apollo"""
label: Optional[str] = ""
signal_field_name: Optional[str] = ""
value: str | list | None = ""
display_name: Optional[str] = ""
label: Optional[str] = "N/A"
signal_field_name: Optional[str] = "N/A"
value: str | list | None = "N/A"
display_name: Optional[str] = "N/A"
class TypedCustomField(BaseModel):
"""A typed custom field in Apollo"""
id: Optional[str] = ""
value: Optional[str] = ""
id: Optional[str] = "N/A"
value: Optional[str] = "N/A"
class Pagination(BaseModel):
@@ -219,23 +205,23 @@ class Pagination(BaseModel):
class DialerFlags(BaseModel):
"""A dialer flags in Apollo"""
country_name: Optional[str] = ""
country_enabled: Optional[bool] = True
high_risk_calling_enabled: Optional[bool] = True
potential_high_risk_number: Optional[bool] = True
country_name: str
country_enabled: bool
high_risk_calling_enabled: bool
potential_high_risk_number: bool
class PhoneNumber(BaseModel):
"""A phone number in Apollo"""
raw_number: Optional[str] = ""
sanitized_number: Optional[str] = ""
type: Optional[str] = ""
position: Optional[int] = 0
status: Optional[str] = ""
dnc_status: Optional[str] = ""
dnc_other_info: Optional[str] = ""
dailer_flags: Optional[DialerFlags] = DialerFlags(
raw_number: str = ""
sanitized_number: str = ""
type: str = ""
position: int = 0
status: str = ""
dnc_status: str = ""
dnc_other_info: str = ""
dailer_flags: DialerFlags = DialerFlags(
country_name="",
country_enabled=True,
high_risk_calling_enabled=True,
@@ -253,31 +239,33 @@ class Organization(BaseModel):
populate_by_name=True,
)
id: Optional[str] = ""
name: Optional[str] = ""
website_url: Optional[str] = ""
blog_url: Optional[str] = ""
angellist_url: Optional[str] = ""
linkedin_url: Optional[str] = ""
twitter_url: Optional[str] = ""
facebook_url: Optional[str] = ""
primary_phone: Optional[PrimaryPhone] = PrimaryPhone()
languages: Optional[list[str]] = []
id: Optional[str] = "N/A"
name: Optional[str] = "N/A"
website_url: Optional[str] = "N/A"
blog_url: Optional[str] = "N/A"
angellist_url: Optional[str] = "N/A"
linkedin_url: Optional[str] = "N/A"
twitter_url: Optional[str] = "N/A"
facebook_url: Optional[str] = "N/A"
primary_phone: Optional[PrimaryPhone] = PrimaryPhone(
number="N/A", source="N/A", sanitized_number="N/A"
)
languages: list[str] = []
alexa_ranking: Optional[int] = 0
phone: Optional[str] = ""
linkedin_uid: Optional[str] = ""
phone: Optional[str] = "N/A"
linkedin_uid: Optional[str] = "N/A"
founded_year: Optional[int] = 0
publicly_traded_symbol: Optional[str] = ""
publicly_traded_exchange: Optional[str] = ""
logo_url: Optional[str] = ""
chrunchbase_url: Optional[str] = ""
primary_domain: Optional[str] = ""
sanitized_phone: Optional[str] = ""
owned_by_organization_id: Optional[str] = ""
intent_strength: Optional[str] = ""
show_intent: Optional[bool] = True
publicly_traded_symbol: Optional[str] = "N/A"
publicly_traded_exchange: Optional[str] = "N/A"
logo_url: Optional[str] = "N/A"
chrunchbase_url: Optional[str] = "N/A"
primary_domain: Optional[str] = "N/A"
sanitized_phone: Optional[str] = "N/A"
owned_by_organization_id: Optional[str] = "N/A"
intent_strength: Optional[str] = "N/A"
show_intent: bool = True
has_intent_signal_account: Optional[bool] = True
intent_signal_account: Optional[str] = ""
intent_signal_account: Optional[str] = "N/A"
class Contact(BaseModel):
@@ -290,95 +278,95 @@ class Contact(BaseModel):
populate_by_name=True,
)
contact_roles: Optional[list[Any]] = []
id: Optional[str] = ""
first_name: Optional[str] = ""
last_name: Optional[str] = ""
name: Optional[str] = ""
linkedin_url: Optional[str] = ""
title: Optional[str] = ""
contact_stage_id: Optional[str] = ""
owner_id: Optional[str] = ""
creator_id: Optional[str] = ""
person_id: Optional[str] = ""
email_needs_tickling: Optional[bool] = True
organization_name: Optional[str] = ""
source: Optional[str] = ""
original_source: Optional[str] = ""
organization_id: Optional[str] = ""
headline: Optional[str] = ""
photo_url: Optional[str] = ""
present_raw_address: Optional[str] = ""
linkededin_uid: Optional[str] = ""
extrapolated_email_confidence: Optional[float] = 0.0
salesforce_id: Optional[str] = ""
salesforce_lead_id: Optional[str] = ""
salesforce_contact_id: Optional[str] = ""
saleforce_account_id: Optional[str] = ""
crm_owner_id: Optional[str] = ""
created_at: Optional[str] = ""
emailer_campaign_ids: Optional[list[str]] = []
direct_dial_status: Optional[str] = ""
direct_dial_enrichment_failed_at: Optional[str] = ""
email_status: Optional[str] = ""
email_source: Optional[str] = ""
account_id: Optional[str] = ""
last_activity_date: Optional[str] = ""
hubspot_vid: Optional[str] = ""
hubspot_company_id: Optional[str] = ""
crm_id: Optional[str] = ""
sanitized_phone: Optional[str] = ""
merged_crm_ids: Optional[str] = ""
updated_at: Optional[str] = ""
queued_for_crm_push: Optional[bool] = True
suggested_from_rule_engine_config_id: Optional[str] = ""
email_unsubscribed: Optional[str] = ""
label_ids: Optional[list[Any]] = []
has_pending_email_arcgate_request: Optional[bool] = True
has_email_arcgate_request: Optional[bool] = True
existence_level: Optional[str] = ""
email: Optional[str] = ""
email_from_customer: Optional[str] = ""
typed_custom_fields: Optional[list[TypedCustomField]] = []
custom_field_errors: Optional[Any] = {}
salesforce_record_id: Optional[str] = ""
crm_record_url: Optional[str] = ""
email_status_unavailable_reason: Optional[str] = ""
email_true_status: Optional[str] = ""
updated_email_true_status: Optional[bool] = True
contact_rule_config_statuses: Optional[list[RuleConfigStatus]] = []
source_display_name: Optional[str] = ""
twitter_url: Optional[str] = ""
contact_campaign_statuses: Optional[list[ContactCampaignStatus]] = []
state: Optional[str] = ""
city: Optional[str] = ""
country: Optional[str] = ""
account: Optional[Account] = Account()
contact_emails: Optional[list[ContactEmail]] = []
organization: Optional[Organization] = Organization()
employment_history: Optional[list[EmploymentHistory]] = []
time_zone: Optional[str] = ""
intent_strength: Optional[str] = ""
show_intent: Optional[bool] = True
phone_numbers: Optional[list[PhoneNumber]] = []
account_phone_note: Optional[str] = ""
free_domain: Optional[bool] = True
is_likely_to_engage: Optional[bool] = True
email_domain_catchall: Optional[bool] = True
contact_job_change_event: Optional[str] = ""
contact_roles: list[Any] = []
id: Optional[str] = None
first_name: Optional[str] = None
last_name: Optional[str] = None
name: Optional[str] = None
linkedin_url: Optional[str] = None
title: Optional[str] = None
contact_stage_id: Optional[str] = None
owner_id: Optional[str] = None
creator_id: Optional[str] = None
person_id: Optional[str] = None
email_needs_tickling: bool = True
organization_name: Optional[str] = None
source: Optional[str] = None
original_source: Optional[str] = None
organization_id: Optional[str] = None
headline: Optional[str] = None
photo_url: Optional[str] = None
present_raw_address: Optional[str] = None
linkededin_uid: Optional[str] = None
extrapolated_email_confidence: Optional[float] = None
salesforce_id: Optional[str] = None
salesforce_lead_id: Optional[str] = None
salesforce_contact_id: Optional[str] = None
saleforce_account_id: Optional[str] = None
crm_owner_id: Optional[str] = None
created_at: Optional[str] = None
emailer_campaign_ids: list[str] = []
direct_dial_status: Optional[str] = None
direct_dial_enrichment_failed_at: Optional[str] = None
email_status: Optional[str] = None
email_source: Optional[str] = None
account_id: Optional[str] = None
last_activity_date: Optional[str] = None
hubspot_vid: Optional[str] = None
hubspot_company_id: Optional[str] = None
crm_id: Optional[str] = None
sanitized_phone: Optional[str] = None
merged_crm_ids: Optional[str] = None
updated_at: Optional[str] = None
queued_for_crm_push: bool = True
suggested_from_rule_engine_config_id: Optional[str] = None
email_unsubscribed: Optional[str] = None
label_ids: list[Any] = []
has_pending_email_arcgate_request: bool = True
has_email_arcgate_request: bool = True
existence_level: Optional[str] = None
email: Optional[str] = None
email_from_customer: Optional[str] = None
typed_custom_fields: list[TypedCustomField] = []
custom_field_errors: Any = None
salesforce_record_id: Optional[str] = None
crm_record_url: Optional[str] = None
email_status_unavailable_reason: Optional[str] = None
email_true_status: Optional[str] = None
updated_email_true_status: bool = True
contact_rule_config_statuses: list[RuleConfigStatus] = []
source_display_name: Optional[str] = None
twitter_url: Optional[str] = None
contact_campaign_statuses: list[ContactCampaignStatus] = []
state: Optional[str] = None
city: Optional[str] = None
country: Optional[str] = None
account: Optional[Account] = None
contact_emails: list[ContactEmail] = []
organization: Optional[Organization] = None
employment_history: list[EmploymentHistory] = []
time_zone: Optional[str] = None
intent_strength: Optional[str] = None
show_intent: bool = True
phone_numbers: list[PhoneNumber] = []
account_phone_note: Optional[str] = None
free_domain: bool = True
is_likely_to_engage: bool = True
email_domain_catchall: bool = True
contact_job_change_event: Optional[str] = None
class SearchOrganizationsRequest(BaseModel):
"""Request for Apollo's search organizations API"""
organization_num_employees_range: Optional[list[int]] = SchemaField(
organization_num_empoloyees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default=[0, 1000000],
)
organization_locations: Optional[list[str]] = SchemaField(
organization_locations: list[str] = SchemaField(
description="""The location of the company headquarters. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, any Boston-based companies will not appearch in your search results, even if they match other parameters.
@@ -387,30 +375,28 @@ To exclude companies based on location, use the organization_not_locations param
""",
default_factory=list,
)
organizations_not_locations: Optional[list[str]] = SchemaField(
organizations_not_locations: list[str] = SchemaField(
description="""Exclude companies from search results based on the location of the company headquarters. You can use cities, US states, and countries as locations to exclude.
This parameter is useful for ensuring you do not prospect in an undesirable territory. For example, if you use ireland as a value, no Ireland-based companies will appear in your search results.
""",
default_factory=list,
)
q_organization_keyword_tags: Optional[list[str]] = SchemaField(
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry.""",
default_factory=list,
q_organization_keyword_tags: list[str] = SchemaField(
description="""Filter search results based on keywords associated with companies. For example, you can enter mining as a value to return only companies that have an association with the mining industry."""
)
q_organization_name: Optional[str] = SchemaField(
q_organization_name: str = SchemaField(
description="""Filter search results to include a specific company name.
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible.""",
default="",
If the value you enter for this parameter does not match with a company's name, the company will not appear in search results, even if it matches other parameters. Partial matches are accepted. For example, if you filter by the value marketing, a company called NY Marketing Unlimited would still be eligible as a search result, but NY Market Analysis would not be eligible."""
)
organization_ids: Optional[list[str]] = SchemaField(
organization_ids: list[str] = SchemaField(
description="""The Apollo IDs for the companies you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, identify the values for organization_id when you call this endpoint.""",
default_factory=list,
)
max_results: Optional[int] = SchemaField(
max_results: int = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
@@ -435,11 +421,11 @@ Use the page parameter to search the different pages of data.""",
class SearchOrganizationsResponse(BaseModel):
"""Response from Apollo's search organizations API"""
breadcrumbs: Optional[list[Breadcrumb]] = []
partial_results_only: Optional[bool] = True
has_join: Optional[bool] = True
disable_eu_prospecting: Optional[bool] = True
partial_results_limit: Optional[int] = 0
breadcrumbs: list[Breadcrumb] = []
partial_results_only: bool = True
has_join: bool = True
disable_eu_prospecting: bool = True
partial_results_limit: int = 0
pagination: Pagination = Pagination(
page=0, per_page=0, total_entries=0, total_pages=0
)
@@ -447,14 +433,14 @@ class SearchOrganizationsResponse(BaseModel):
accounts: list[Any] = []
organizations: list[Organization] = []
models_ids: list[str] = []
num_fetch_result: Optional[str] = ""
derived_params: Optional[str] = ""
num_fetch_result: Optional[str] = "N/A"
derived_params: Optional[str] = "N/A"
class SearchPeopleRequest(BaseModel):
"""Request for Apollo's search people API"""
person_titles: Optional[list[str]] = SchemaField(
person_titles: list[str] = SchemaField(
description="""Job titles held by the people you want to find. For a person to be included in search results, they only need to match 1 of the job titles you add. Adding more job titles expands your search results.
Results also include job titles with the same terms, even if they are not exact matches. For example, searching for marketing manager might return people with the job title content marketing manager.
@@ -464,13 +450,13 @@ Use this parameter in combination with the person_seniorities[] parameter to fin
default_factory=list,
placeholder="marketing manager",
)
person_locations: Optional[list[str]] = SchemaField(
person_locations: list[str] = SchemaField(
description="""The location where people live. You can search across cities, US states, and countries.
To find people based on the headquarters locations of their current employer, use the organization_locations parameter.""",
default_factory=list,
)
person_seniorities: Optional[list[SenorityLevels]] = SchemaField(
person_seniorities: list[SenorityLevels] = SchemaField(
description="""The job seniority that people hold within their current employer. This enables you to find people that currently hold positions at certain reporting levels, such as Director level or senior IC level.
For a person to be included in search results, they only need to match 1 of the seniorities you add. Adding more seniorities expands your search results.
@@ -480,7 +466,7 @@ Searches only return results based on their current job title, so searching for
Use this parameter in combination with the person_titles[] parameter to find people based on specific job functions and seniority levels.""",
default_factory=list,
)
organization_locations: Optional[list[str]] = SchemaField(
organization_locations: list[str] = SchemaField(
description="""The location of the company headquarters for a person's current employer. You can search across cities, US states, and countries.
If a company has several office locations, results are still based on the headquarters location. For example, if you search chicago but a company's HQ location is in boston, people that work for the Boston-based company will not appear in your results, even if they match other parameters.
@@ -488,7 +474,7 @@ If a company has several office locations, results are still based on the headqu
To find people based on their personal location, use the person_locations parameter.""",
default_factory=list,
)
q_organization_domains: Optional[list[str]] = SchemaField(
q_organization_domains: list[str] = SchemaField(
description="""The domain name for the person's employer. This can be the current employer or a previous employer. Do not include www., the @ symbol, or similar.
You can add multiple domains to search across companies.
@@ -496,23 +482,23 @@ You can add multiple domains to search across companies.
Examples: apollo.io and microsoft.com""",
default_factory=list,
)
contact_email_statuses: Optional[list[ContactEmailStatuses]] = SchemaField(
contact_email_statuses: list[ContactEmailStatuses] = SchemaField(
description="""The email statuses for the people you want to find. You can add multiple statuses to expand your search.""",
default_factory=list,
)
organization_ids: Optional[list[str]] = SchemaField(
organization_ids: list[str] = SchemaField(
description="""The Apollo IDs for the companies (employers) you want to include in your search results. Each company in the Apollo database is assigned a unique ID.
To find IDs, call the Organization Search endpoint and identify the values for organization_id.""",
default_factory=list,
)
organization_num_employees_range: Optional[list[int]] = SchemaField(
organization_num_empoloyees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
default_factory=list,
)
q_keywords: Optional[str] = SchemaField(
q_keywords: str = SchemaField(
description="""A string of words over which we want to filter the results""",
default="",
)
@@ -528,7 +514,7 @@ Use this parameter in combination with the per_page parameter to make search res
Use the page parameter to search the different pages of data.""",
default=100,
)
max_results: Optional[int] = SchemaField(
max_results: int = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
@@ -547,61 +533,16 @@ class SearchPeopleResponse(BaseModel):
populate_by_name=True,
)
breadcrumbs: Optional[list[Breadcrumb]] = []
partial_results_only: Optional[bool] = True
has_join: Optional[bool] = True
disable_eu_prospecting: Optional[bool] = True
partial_results_limit: Optional[int] = 0
breadcrumbs: list[Breadcrumb] = []
partial_results_only: bool = True
has_join: bool = True
disable_eu_prospecting: bool = True
partial_results_limit: int = 0
pagination: Pagination = Pagination(
page=0, per_page=0, total_entries=0, total_pages=0
)
contacts: list[Contact] = []
people: list[Contact] = []
model_ids: list[str] = []
num_fetch_result: Optional[str] = ""
derived_params: Optional[str] = ""
class EnrichPersonRequest(BaseModel):
"""Request for Apollo's person enrichment API"""
person_id: Optional[str] = SchemaField(
description="Apollo person ID to enrich (most accurate method)",
default="",
)
first_name: Optional[str] = SchemaField(
description="First name of the person to enrich",
default="",
)
last_name: Optional[str] = SchemaField(
description="Last name of the person to enrich",
default="",
)
name: Optional[str] = SchemaField(
description="Full name of the person to enrich",
default="",
)
email: Optional[str] = SchemaField(
description="Email address of the person to enrich",
default="",
)
domain: Optional[str] = SchemaField(
description="Company domain of the person to enrich",
default="",
)
company: Optional[str] = SchemaField(
description="Company name of the person to enrich",
default="",
)
linkedin_url: Optional[str] = SchemaField(
description="LinkedIn URL of the person to enrich",
default="",
)
organization_id: Optional[str] = SchemaField(
description="Apollo organization ID of the person's company",
default="",
)
title: Optional[str] = SchemaField(
description="Job title of the person to enrich",
default="",
)
num_fetch_result: Optional[str] = "N/A"
derived_params: Optional[str] = "N/A"

View File

@@ -11,14 +11,14 @@ from backend.blocks.apollo.models import (
SearchOrganizationsRequest,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, SchemaField
from backend.data.model import SchemaField
class SearchOrganizationsBlock(Block):
"""Search for organizations in Apollo"""
class Input(BlockSchema):
organization_num_employees_range: list[int] = SchemaField(
organization_num_empoloyees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
@@ -65,7 +65,7 @@ To find IDs, identify the values for organization_id when you call this endpoint
le=50000,
advanced=True,
)
credentials: ApolloCredentialsInput = CredentialsField(
credentials: ApolloCredentialsInput = SchemaField(
description="Apollo credentials",
)
@@ -201,17 +201,19 @@ To find IDs, identify the values for organization_id when you call this endpoint
)
@staticmethod
async def search_organizations(
def search_organizations(
query: SearchOrganizationsRequest, credentials: ApolloCredentials
) -> list[Organization]:
client = ApolloClient(credentials)
return await client.search_organizations(query)
return client.search_organizations(query)
async def run(
def run(
self, input_data: Input, *, credentials: ApolloCredentials, **kwargs
) -> BlockOutput:
query = SearchOrganizationsRequest(**input_data.model_dump())
organizations = await self.search_organizations(query, credentials)
query = SearchOrganizationsRequest(
**input_data.model_dump(exclude={"credentials"})
)
organizations = self.search_organizations(query, credentials)
for organization in organizations:
yield "organization", organization
yield "organizations", organizations

View File

@@ -1,5 +1,3 @@
import asyncio
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
@@ -10,12 +8,11 @@ from backend.blocks.apollo._auth import (
from backend.blocks.apollo.models import (
Contact,
ContactEmailStatuses,
EnrichPersonRequest,
SearchPeopleRequest,
SenorityLevels,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, SchemaField
from backend.data.model import SchemaField
class SearchPeopleBlock(Block):
@@ -80,7 +77,7 @@ class SearchPeopleBlock(Block):
default_factory=list,
advanced=False,
)
organization_num_employees_range: list[int] = SchemaField(
organization_num_empoloyees_range: list[int] = SchemaField(
description="""The number range of employees working for the company. This enables you to find companies based on headcount. You can add multiple ranges to expand your search results.
Each range you add needs to be a string, with the upper and lower numbers of the range separated only by a comma.""",
@@ -93,19 +90,14 @@ class SearchPeopleBlock(Block):
advanced=False,
)
max_results: int = SchemaField(
description="""The maximum number of results to return. If you don't specify this parameter, the default is 25. Limited to 500 to prevent overspending.""",
default=25,
description="""The maximum number of results to return. If you don't specify this parameter, the default is 100.""",
default=100,
ge=1,
le=500,
advanced=True,
)
enrich_info: bool = SchemaField(
description="""Whether to enrich contacts with detailed information including real email addresses. This will double the search cost.""",
default=False,
le=50000,
advanced=True,
)
credentials: ApolloCredentialsInput = CredentialsField(
credentials: ApolloCredentialsInput = SchemaField(
description="Apollo credentials",
)
@@ -114,6 +106,9 @@ class SearchPeopleBlock(Block):
description="List of people found",
default_factory=list,
)
person: Contact = SchemaField(
description="Each found person, one at a time",
)
error: str = SchemaField(
description="Error message if the search failed",
default="",
@@ -129,6 +124,87 @@ class SearchPeopleBlock(Block):
test_credentials=TEST_CREDENTIALS,
test_input={"credentials": TEST_CREDENTIALS_INPUT},
test_output=[
(
"person",
Contact(
contact_roles=[],
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
linkedin_url="https://www.linkedin.com/in/johndoe",
title="Software Engineer",
organization_name="Google",
organization_id="123456",
contact_stage_id="1",
owner_id="1",
creator_id="1",
person_id="1",
email_needs_tickling=True,
source="apollo",
original_source="apollo",
headline="Software Engineer",
photo_url="https://www.linkedin.com/in/johndoe",
present_raw_address="123 Main St, Anytown, USA",
linkededin_uid="123456",
extrapolated_email_confidence=0.8,
salesforce_id="123456",
salesforce_lead_id="123456",
salesforce_contact_id="123456",
saleforce_account_id="123456",
crm_owner_id="123456",
created_at="2021-01-01",
emailer_campaign_ids=[],
direct_dial_status="active",
direct_dial_enrichment_failed_at="2021-01-01",
email_status="active",
email_source="apollo",
account_id="123456",
last_activity_date="2021-01-01",
hubspot_vid="123456",
hubspot_company_id="123456",
crm_id="123456",
sanitized_phone="123456",
merged_crm_ids="123456",
updated_at="2021-01-01",
queued_for_crm_push=True,
suggested_from_rule_engine_config_id="123456",
email_unsubscribed=None,
label_ids=[],
has_pending_email_arcgate_request=True,
has_email_arcgate_request=True,
existence_level=None,
email=None,
email_from_customer=None,
typed_custom_fields=[],
custom_field_errors=None,
salesforce_record_id=None,
crm_record_url=None,
email_status_unavailable_reason=None,
email_true_status=None,
updated_email_true_status=True,
contact_rule_config_statuses=[],
source_display_name=None,
twitter_url=None,
contact_campaign_statuses=[],
state=None,
city=None,
country=None,
account=None,
contact_emails=[],
organization=None,
employment_history=[],
time_zone=None,
intent_strength=None,
show_intent=True,
phone_numbers=[],
account_phone_note=None,
free_domain=True,
is_likely_to_engage=True,
email_domain_catchall=True,
contact_job_change_event=None,
),
),
(
"people",
[
@@ -297,41 +373,13 @@ class SearchPeopleBlock(Block):
)
@staticmethod
async def search_people(
def search_people(
query: SearchPeopleRequest, credentials: ApolloCredentials
) -> list[Contact]:
client = ApolloClient(credentials)
return await client.search_people(query)
return client.search_people(query)
@staticmethod
async def enrich_person(
query: EnrichPersonRequest, credentials: ApolloCredentials
) -> Contact:
client = ApolloClient(credentials)
return await client.enrich_person(query)
@staticmethod
def merge_contact_data(original: Contact, enriched: Contact) -> Contact:
"""
Merge contact data from original search with enriched data.
Enriched data complements original data, only filling in missing values.
"""
merged_data = original.model_dump()
enriched_data = enriched.model_dump()
# Only update fields that are None, empty string, empty list, or default values in original
for key, enriched_value in enriched_data.items():
# Skip if enriched value is None, empty string, or empty list
if enriched_value is None or enriched_value == "" or enriched_value == []:
continue
# Update if original value is None, empty string, empty list, or zero
if enriched_value:
merged_data[key] = enriched_value
return Contact(**merged_data)
async def run(
def run(
self,
input_data: Input,
*,
@@ -339,25 +387,8 @@ class SearchPeopleBlock(Block):
**kwargs,
) -> BlockOutput:
query = SearchPeopleRequest(**input_data.model_dump())
people = await self.search_people(query, credentials)
# Enrich with detailed info if requested
if input_data.enrich_info:
async def enrich_or_fallback(person: Contact):
try:
enrich_query = EnrichPersonRequest(person_id=person.id)
enriched_person = await self.enrich_person(
enrich_query, credentials
)
# Merge enriched data with original data, complementing instead of replacing
return self.merge_contact_data(person, enriched_person)
except Exception:
return person # If enrichment fails, use original person data
people = await asyncio.gather(
*(enrich_or_fallback(person) for person in people)
)
query = SearchPeopleRequest(**input_data.model_dump(exclude={"credentials"}))
people = self.search_people(query, credentials)
for person in people:
yield "person", person
yield "people", people

View File

@@ -1,138 +0,0 @@
from backend.blocks.apollo._api import ApolloClient
from backend.blocks.apollo._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
ApolloCredentials,
ApolloCredentialsInput,
)
from backend.blocks.apollo.models import Contact, EnrichPersonRequest
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import CredentialsField, SchemaField
class GetPersonDetailBlock(Block):
"""Get detailed person data with Apollo API, including email reveal"""
class Input(BlockSchema):
person_id: str = SchemaField(
description="Apollo person ID to enrich (most accurate method)",
default="",
advanced=False,
)
first_name: str = SchemaField(
description="First name of the person to enrich",
default="",
advanced=False,
)
last_name: str = SchemaField(
description="Last name of the person to enrich",
default="",
advanced=False,
)
name: str = SchemaField(
description="Full name of the person to enrich (alternative to first_name + last_name)",
default="",
advanced=False,
)
email: str = SchemaField(
description="Known email address of the person (helps with matching)",
default="",
advanced=False,
)
domain: str = SchemaField(
description="Company domain of the person (e.g., 'google.com')",
default="",
advanced=False,
)
company: str = SchemaField(
description="Company name of the person",
default="",
advanced=False,
)
linkedin_url: str = SchemaField(
description="LinkedIn URL of the person",
default="",
advanced=False,
)
organization_id: str = SchemaField(
description="Apollo organization ID of the person's company",
default="",
advanced=True,
)
title: str = SchemaField(
description="Job title of the person to enrich",
default="",
advanced=True,
)
credentials: ApolloCredentialsInput = CredentialsField(
description="Apollo credentials",
)
class Output(BlockSchema):
contact: Contact = SchemaField(
description="Enriched contact information",
)
error: str = SchemaField(
description="Error message if enrichment failed",
default="",
)
def __init__(self):
super().__init__(
id="3b18d46c-3db6-42ae-a228-0ba441bdd176",
description="Get detailed person data with Apollo API, including email reveal",
categories={BlockCategory.SEARCH},
input_schema=GetPersonDetailBlock.Input,
output_schema=GetPersonDetailBlock.Output,
test_credentials=TEST_CREDENTIALS,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"first_name": "John",
"last_name": "Doe",
"company": "Google",
},
test_output=[
(
"contact",
Contact(
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
email="john.doe@gmail.com",
title="Software Engineer",
organization_name="Google",
linkedin_url="https://www.linkedin.com/in/johndoe",
),
),
],
test_mock={
"enrich_person": lambda query, credentials: Contact(
id="1",
name="John Doe",
first_name="John",
last_name="Doe",
email="john.doe@gmail.com",
title="Software Engineer",
organization_name="Google",
linkedin_url="https://www.linkedin.com/in/johndoe",
)
},
)
@staticmethod
async def enrich_person(
query: EnrichPersonRequest, credentials: ApolloCredentials
) -> Contact:
client = ApolloClient(credentials)
return await client.enrich_person(query)
async def run(
self,
input_data: Input,
*,
credentials: ApolloCredentials,
**kwargs,
) -> BlockOutput:
query = EnrichPersonRequest(**input_data.model_dump())
yield "contact", await self.enrich_person(query, credentials)

View File

@@ -1,9 +1,11 @@
import enum
from typing import Any
from typing import Any, List
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
from backend.data.model import SchemaField
from backend.util import json
from backend.util.file import store_media_file
from backend.util.mock import MockObject
from backend.util.type import MediaFileType, convert
@@ -12,12 +14,6 @@ class FileStoreBlock(Block):
file_in: MediaFileType = SchemaField(
description="The file to store in the temporary directory, it can be a URL, data URI, or local path."
)
base_64: bool = SchemaField(
description="Whether produce an output in base64 format (not recommended, you can pass the string path just fine accross blocks).",
default=False,
advanced=True,
title="Produce Base64 Output",
)
class Output(BlockSchema):
file_out: MediaFileType = SchemaField(
@@ -34,18 +30,19 @@ class FileStoreBlock(Block):
static_output=True,
)
async def run(
def run(
self,
input_data: Input,
*,
graph_exec_id: str,
**kwargs,
) -> BlockOutput:
yield "file_out", await store_media_file(
file_path = store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.file_in,
return_content=input_data.base_64,
return_content=False,
)
yield "file_out", file_path
class StoreValueBlock(Block):
@@ -87,35 +84,268 @@ class StoreValueBlock(Block):
static_output=True,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "output", input_data.data or input_data.input
class PrintToConsoleBlock(Block):
class FindInDictionaryBlock(Block):
class Input(BlockSchema):
text: Any = SchemaField(description="The data to print to the console.")
input: Any = SchemaField(description="Dictionary to lookup from")
key: str | int = SchemaField(description="Key to lookup in the dictionary")
class Output(BlockSchema):
output: Any = SchemaField(description="The data printed to the console.")
status: str = SchemaField(description="The status of the print operation.")
output: Any = SchemaField(description="Value found for the given key")
missing: Any = SchemaField(
description="Value of the input that missing the key"
)
def __init__(self):
super().__init__(
id="f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c",
description="Print the given text to the console, this is used for a debugging purpose.",
categories={BlockCategory.BASIC},
input_schema=PrintToConsoleBlock.Input,
output_schema=PrintToConsoleBlock.Output,
test_input={"text": "Hello, World!"},
id="0e50422c-6dee-4145-83d6-3a5a392f65de",
description="Lookup the given key in the input dictionary/object/list and return the value.",
input_schema=FindInDictionaryBlock.Input,
output_schema=FindInDictionaryBlock.Output,
test_input=[
{"input": {"apple": 1, "banana": 2, "cherry": 3}, "key": "banana"},
{"input": {"x": 10, "y": 20, "z": 30}, "key": "w"},
{"input": [1, 2, 3], "key": 1},
{"input": [1, 2, 3], "key": 3},
{"input": MockObject(value="!!", key="key"), "key": "key"},
{"input": [{"k1": "v1"}, {"k2": "v2"}, {"k1": "v3"}], "key": "k1"},
],
test_output=[
("output", "Hello, World!"),
("status", "printed"),
("output", 2),
("missing", {"x": 10, "y": 20, "z": 30}),
("output", 2),
("missing", [1, 2, 3]),
("output", "key"),
("output", ["v1", "v3"]),
],
categories={BlockCategory.BASIC},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
obj = input_data.input
key = input_data.key
if isinstance(obj, str):
obj = json.loads(obj)
if isinstance(obj, dict) and key in obj:
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, int) and 0 <= key < len(obj):
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, str):
if len(obj) == 0:
yield "output", []
elif isinstance(obj[0], dict) and key in obj[0]:
yield "output", [item[key] for item in obj if key in item]
else:
yield "output", [getattr(val, key) for val in obj if hasattr(val, key)]
elif isinstance(obj, object) and isinstance(key, str) and hasattr(obj, key):
yield "output", getattr(obj, key)
else:
yield "missing", input_data.input
class AddToDictionaryBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(
default_factory=dict,
description="The dictionary to add the entry to. If not provided, a new dictionary will be created.",
)
key: str = SchemaField(
default="",
description="The key for the new entry.",
placeholder="new_key",
advanced=False,
)
value: Any = SchemaField(
default=None,
description="The value for the new entry.",
placeholder="new_value",
advanced=False,
)
entries: dict[Any, Any] = SchemaField(
default_factory=dict,
description="The entries to add to the dictionary. This is the batch version of the `key` and `value` fields.",
advanced=True,
)
class Output(BlockSchema):
updated_dictionary: dict = SchemaField(
description="The dictionary with the new entry added."
)
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="31d1064e-7446-4693-a7d4-65e5ca1180d1",
description="Adds a new key-value pair to a dictionary. If no dictionary is provided, a new one is created.",
categories={BlockCategory.BASIC},
input_schema=AddToDictionaryBlock.Input,
output_schema=AddToDictionaryBlock.Output,
test_input=[
{
"dictionary": {"existing_key": "existing_value"},
"key": "new_key",
"value": "new_value",
},
{"key": "first_key", "value": "first_value"},
{
"dictionary": {"existing_key": "existing_value"},
"entries": {"new_key": "new_value", "first_key": "first_value"},
},
],
test_output=[
(
"updated_dictionary",
{"existing_key": "existing_value", "new_key": "new_value"},
),
("updated_dictionary", {"first_key": "first_value"}),
(
"updated_dictionary",
{
"existing_key": "existing_value",
"new_key": "new_value",
"first_key": "first_value",
},
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "output", input_data.text
yield "status", "printed"
def run(self, input_data: Input, **kwargs) -> BlockOutput:
updated_dict = input_data.dictionary.copy()
if input_data.value is not None and input_data.key:
updated_dict[input_data.key] = input_data.value
for key, value in input_data.entries.items():
updated_dict[key] = value
yield "updated_dictionary", updated_dict
class AddToListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(
default_factory=list,
advanced=False,
description="The list to add the entry to. If not provided, a new list will be created.",
)
entry: Any = SchemaField(
description="The entry to add to the list. Can be of any type (string, int, dict, etc.).",
advanced=False,
default=None,
)
entries: List[Any] = SchemaField(
default_factory=lambda: list(),
description="The entries to add to the list. This is the batch version of the `entry` field.",
advanced=True,
)
position: int | None = SchemaField(
default=None,
description="The position to insert the new entry. If not provided, the entry will be appended to the end of the list.",
)
class Output(BlockSchema):
updated_list: List[Any] = SchemaField(
description="The list with the new entry added."
)
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="aeb08fc1-2fc1-4141-bc8e-f758f183a822",
description="Adds a new entry to a list. The entry can be of any type. If no list is provided, a new one is created.",
categories={BlockCategory.BASIC},
input_schema=AddToListBlock.Input,
output_schema=AddToListBlock.Output,
test_input=[
{
"list": [1, "string", {"existing_key": "existing_value"}],
"entry": {"new_key": "new_value"},
"position": 1,
},
{"entry": "first_entry"},
{"list": ["a", "b", "c"], "entry": "d"},
{
"entry": "e",
"entries": ["f", "g"],
"list": ["a", "b"],
"position": 1,
},
],
test_output=[
(
"updated_list",
[
1,
{"new_key": "new_value"},
"string",
{"existing_key": "existing_value"},
],
),
("updated_list", ["first_entry"]),
("updated_list", ["a", "b", "c", "d"]),
("updated_list", ["a", "f", "g", "e", "b"]),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
entries_added = input_data.entries.copy()
if input_data.entry:
entries_added.append(input_data.entry)
updated_list = input_data.list.copy()
if (pos := input_data.position) is not None:
updated_list = updated_list[:pos] + entries_added + updated_list[pos:]
else:
updated_list += entries_added
yield "updated_list", updated_list
class FindInListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to search in.")
value: Any = SchemaField(description="The value to search for.")
class Output(BlockSchema):
index: int = SchemaField(description="The index of the value in the list.")
found: bool = SchemaField(
description="Whether the value was found in the list."
)
not_found_value: Any = SchemaField(
description="The value that was not found in the list."
)
def __init__(self):
super().__init__(
id="5e2c6d0a-1e37-489f-b1d0-8e1812b23333",
description="Finds the index of the value in the list.",
categories={BlockCategory.BASIC},
input_schema=FindInListBlock.Input,
output_schema=FindInListBlock.Output,
test_input=[
{"list": [1, 2, 3, 4, 5], "value": 3},
{"list": [1, 2, 3, 4, 5], "value": 6},
],
test_output=[
("index", 2),
("found", True),
("found", False),
("not_found_value", 6),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
yield "index", input_data.list.index(input_data.value)
yield "found", True
except ValueError:
yield "found", False
yield "not_found_value", input_data.value
class NoteBlock(Block):
@@ -139,10 +369,108 @@ class NoteBlock(Block):
block_type=BlockType.NOTE,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "output", input_data.text
class CreateDictionaryBlock(Block):
class Input(BlockSchema):
values: dict[str, Any] = SchemaField(
description="Key-value pairs to create the dictionary with",
placeholder="e.g., {'name': 'Alice', 'age': 25}",
)
class Output(BlockSchema):
dictionary: dict[str, Any] = SchemaField(
description="The created dictionary containing the specified key-value pairs"
)
error: str = SchemaField(
description="Error message if dictionary creation failed"
)
def __init__(self):
super().__init__(
id="b924ddf4-de4f-4b56-9a85-358930dcbc91",
description="Creates a dictionary with the specified key-value pairs. Use this when you know all the values you want to add upfront.",
categories={BlockCategory.DATA},
input_schema=CreateDictionaryBlock.Input,
output_schema=CreateDictionaryBlock.Output,
test_input=[
{
"values": {"name": "Alice", "age": 25, "city": "New York"},
},
{
"values": {"numbers": [1, 2, 3], "active": True, "score": 95.5},
},
],
test_output=[
(
"dictionary",
{"name": "Alice", "age": 25, "city": "New York"},
),
(
"dictionary",
{"numbers": [1, 2, 3], "active": True, "score": 95.5},
),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
# The values are already validated by Pydantic schema
yield "dictionary", input_data.values
except Exception as e:
yield "error", f"Failed to create dictionary: {str(e)}"
class CreateListBlock(Block):
class Input(BlockSchema):
values: List[Any] = SchemaField(
description="A list of values to be combined into a new list.",
placeholder="e.g., ['Alice', 25, True]",
)
class Output(BlockSchema):
list: List[Any] = SchemaField(
description="The created list containing the specified values."
)
error: str = SchemaField(description="Error message if list creation failed.")
def __init__(self):
super().__init__(
id="a912d5c7-6e00-4542-b2a9-8034136930e4",
description="Creates a list with the specified values. Use this when you know all the values you want to add upfront.",
categories={BlockCategory.DATA},
input_schema=CreateListBlock.Input,
output_schema=CreateListBlock.Output,
test_input=[
{
"values": ["Alice", 25, True],
},
{
"values": [1, 2, 3, "four", {"key": "value"}],
},
],
test_output=[
(
"list",
["Alice", 25, True],
),
(
"list",
[1, 2, 3, "four", {"key": "value"}],
),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
# The values are already validated by Pydantic schema
yield "list", input_data.values
except Exception as e:
yield "error", f"Failed to create list: {str(e)}"
class TypeOptions(enum.Enum):
STRING = "string"
NUMBER = "number"
@@ -160,7 +488,6 @@ class UniversalTypeConverterBlock(Block):
class Output(BlockSchema):
value: Any = SchemaField(description="The converted value.")
error: str = SchemaField(description="Error message if conversion failed.")
def __init__(self):
super().__init__(
@@ -171,7 +498,7 @@ class UniversalTypeConverterBlock(Block):
output_schema=UniversalTypeConverterBlock.Output,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
converted_value = convert(
input_data.value,

View File

@@ -38,7 +38,7 @@ class BlockInstallationBlock(Block):
disabled=True,
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
code = input_data.code
if search := re.search(r"class (\w+)\(Block\):", code):
@@ -64,7 +64,7 @@ class BlockInstallationBlock(Block):
from backend.util.test import execute_block_test
await execute_block_test(block)
execute_block_test(block)
yield "success", "Block installed successfully."
except Exception as e:
os.remove(file_path)

View File

@@ -3,7 +3,6 @@ from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.type import convert
class ComparisonOperator(Enum):
@@ -71,7 +70,7 @@ class ConditionBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
operator = input_data.operator
value1 = input_data.value1
@@ -164,7 +163,7 @@ class IfInputMatchesBlock(Block):
},
{
"input": 10,
"value": "None",
"value": None,
"yes_value": "Yes",
"no_value": "No",
},
@@ -181,24 +180,8 @@ class IfInputMatchesBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
# If input_data.value is not matching input_data.input, convert value to type of input
if (
input_data.input != input_data.value
and input_data.input is not input_data.value
):
try:
# Only attempt conversion if input is not None and value is not None
if input_data.input is not None and input_data.value is not None:
input_type = type(input_data.input)
# Avoid converting if input_type is Any or object
if input_type not in (Any, object):
input_data.value = convert(input_data.value, input_type)
except Exception:
pass # If conversion fails, just leave value as is
if input_data.input == input_data.value:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
if input_data.input == input_data.value or input_data.input is input_data.value:
yield "result", True
yield "yes_output", input_data.yes_value
else:

View File

@@ -1,7 +1,7 @@
from enum import Enum
from typing import Literal
from e2b_code_interpreter import AsyncSandbox
from e2b_code_interpreter import Sandbox
from pydantic import SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
@@ -123,7 +123,7 @@ class CodeExecutionBlock(Block):
},
)
async def execute_code(
def execute_code(
self,
code: str,
language: ProgrammingLanguage,
@@ -135,21 +135,21 @@ class CodeExecutionBlock(Block):
try:
sandbox = None
if template_id:
sandbox = await AsyncSandbox.create(
sandbox = Sandbox(
template=template_id, api_key=api_key, timeout=timeout
)
else:
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
sandbox = Sandbox(api_key=api_key, timeout=timeout)
if not sandbox:
raise Exception("Sandbox not created")
# Running setup commands
for cmd in setup_commands:
await sandbox.commands.run(cmd)
sandbox.commands.run(cmd)
# Executing the code
execution = await sandbox.run_code(
execution = sandbox.run_code(
code,
language=language.value,
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
@@ -167,11 +167,11 @@ class CodeExecutionBlock(Block):
except Exception as e:
raise e
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
response, stdout_logs, stderr_logs = await self.execute_code(
response, stdout_logs, stderr_logs = self.execute_code(
input_data.code,
input_data.language,
input_data.setup_commands,
@@ -278,11 +278,11 @@ class InstantiationBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
sandbox_id, response, stdout_logs, stderr_logs = await self.execute_code(
sandbox_id, response, stdout_logs, stderr_logs = self.execute_code(
input_data.setup_code,
input_data.language,
input_data.setup_commands,
@@ -303,7 +303,7 @@ class InstantiationBlock(Block):
except Exception as e:
yield "error", str(e)
async def execute_code(
def execute_code(
self,
code: str,
language: ProgrammingLanguage,
@@ -315,21 +315,21 @@ class InstantiationBlock(Block):
try:
sandbox = None
if template_id:
sandbox = await AsyncSandbox.create(
sandbox = Sandbox(
template=template_id, api_key=api_key, timeout=timeout
)
else:
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
sandbox = Sandbox(api_key=api_key, timeout=timeout)
if not sandbox:
raise Exception("Sandbox not created")
# Running setup commands
for cmd in setup_commands:
await sandbox.commands.run(cmd)
sandbox.commands.run(cmd)
# Executing the code
execution = await sandbox.run_code(
execution = sandbox.run_code(
code,
language=language.value,
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
@@ -409,7 +409,7 @@ class StepExecutionBlock(Block):
},
)
async def execute_step_code(
def execute_step_code(
self,
sandbox_id: str,
code: str,
@@ -417,12 +417,12 @@ class StepExecutionBlock(Block):
api_key: str,
):
try:
sandbox = await AsyncSandbox.connect(sandbox_id=sandbox_id, api_key=api_key)
sandbox = Sandbox.connect(sandbox_id=sandbox_id, api_key=api_key)
if not sandbox:
raise Exception("Sandbox not found")
# Executing the code
execution = await sandbox.run_code(code, language=language.value)
execution = sandbox.run_code(code, language=language.value)
if execution.error:
raise Exception(execution.error)
@@ -436,11 +436,11 @@ class StepExecutionBlock(Block):
except Exception as e:
raise e
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
response, stdout_logs, stderr_logs = await self.execute_step_code(
response, stdout_logs, stderr_logs = self.execute_step_code(
input_data.sandbox_id,
input_data.step_code,
input_data.language,

View File

@@ -49,7 +49,7 @@ class CodeExtractionBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
# List of supported programming languages with mapped aliases
language_aliases = {
"html": ["html", "htm"],

View File

@@ -56,5 +56,5 @@ class CompassAITriggerBlock(Block):
# ],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "transcription", input_data.payload.transcription

View File

@@ -30,7 +30,7 @@ class WordCharacterCountBlock(Block):
test_output=[("word_count", 4), ("character_count", 19)],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
text = input_data.text
word_count = len(text.split())

View File

@@ -69,7 +69,7 @@ class ReadCsvBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
import csv
from io import StringIO

View File

@@ -1,683 +0,0 @@
from typing import Any, List
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.json import loads
from backend.util.mock import MockObject
from backend.util.prompt import estimate_token_count_str
# =============================================================================
# Dictionary Manipulation Blocks
# =============================================================================
class CreateDictionaryBlock(Block):
class Input(BlockSchema):
values: dict[str, Any] = SchemaField(
description="Key-value pairs to create the dictionary with",
placeholder="e.g., {'name': 'Alice', 'age': 25}",
)
class Output(BlockSchema):
dictionary: dict[str, Any] = SchemaField(
description="The created dictionary containing the specified key-value pairs"
)
error: str = SchemaField(
description="Error message if dictionary creation failed"
)
def __init__(self):
super().__init__(
id="b924ddf4-de4f-4b56-9a85-358930dcbc91",
description="Creates a dictionary with the specified key-value pairs. Use this when you know all the values you want to add upfront.",
categories={BlockCategory.DATA},
input_schema=CreateDictionaryBlock.Input,
output_schema=CreateDictionaryBlock.Output,
test_input=[
{
"values": {"name": "Alice", "age": 25, "city": "New York"},
},
{
"values": {"numbers": [1, 2, 3], "active": True, "score": 95.5},
},
],
test_output=[
(
"dictionary",
{"name": "Alice", "age": 25, "city": "New York"},
),
(
"dictionary",
{"numbers": [1, 2, 3], "active": True, "score": 95.5},
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
# The values are already validated by Pydantic schema
yield "dictionary", input_data.values
except Exception as e:
yield "error", f"Failed to create dictionary: {str(e)}"
class AddToDictionaryBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(
default_factory=dict,
description="The dictionary to add the entry to. If not provided, a new dictionary will be created.",
)
key: str = SchemaField(
default="",
description="The key for the new entry.",
placeholder="new_key",
advanced=False,
)
value: Any = SchemaField(
default=None,
description="The value for the new entry.",
placeholder="new_value",
advanced=False,
)
entries: dict[Any, Any] = SchemaField(
default_factory=dict,
description="The entries to add to the dictionary. This is the batch version of the `key` and `value` fields.",
advanced=True,
)
class Output(BlockSchema):
updated_dictionary: dict = SchemaField(
description="The dictionary with the new entry added."
)
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="31d1064e-7446-4693-a7d4-65e5ca1180d1",
description="Adds a new key-value pair to a dictionary. If no dictionary is provided, a new one is created.",
categories={BlockCategory.BASIC},
input_schema=AddToDictionaryBlock.Input,
output_schema=AddToDictionaryBlock.Output,
test_input=[
{
"dictionary": {"existing_key": "existing_value"},
"key": "new_key",
"value": "new_value",
},
{"key": "first_key", "value": "first_value"},
{
"dictionary": {"existing_key": "existing_value"},
"entries": {"new_key": "new_value", "first_key": "first_value"},
},
],
test_output=[
(
"updated_dictionary",
{"existing_key": "existing_value", "new_key": "new_value"},
),
("updated_dictionary", {"first_key": "first_value"}),
(
"updated_dictionary",
{
"existing_key": "existing_value",
"new_key": "new_value",
"first_key": "first_value",
},
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
updated_dict = input_data.dictionary.copy()
if input_data.value is not None and input_data.key:
updated_dict[input_data.key] = input_data.value
for key, value in input_data.entries.items():
updated_dict[key] = value
yield "updated_dictionary", updated_dict
class FindInDictionaryBlock(Block):
class Input(BlockSchema):
input: Any = SchemaField(description="Dictionary to lookup from")
key: str | int = SchemaField(description="Key to lookup in the dictionary")
class Output(BlockSchema):
output: Any = SchemaField(description="Value found for the given key")
missing: Any = SchemaField(
description="Value of the input that missing the key"
)
def __init__(self):
super().__init__(
id="0e50422c-6dee-4145-83d6-3a5a392f65de",
description="Lookup the given key in the input dictionary/object/list and return the value.",
input_schema=FindInDictionaryBlock.Input,
output_schema=FindInDictionaryBlock.Output,
test_input=[
{"input": {"apple": 1, "banana": 2, "cherry": 3}, "key": "banana"},
{"input": {"x": 10, "y": 20, "z": 30}, "key": "w"},
{"input": [1, 2, 3], "key": 1},
{"input": [1, 2, 3], "key": 3},
{"input": MockObject(value="!!", key="key"), "key": "key"},
{"input": [{"k1": "v1"}, {"k2": "v2"}, {"k1": "v3"}], "key": "k1"},
],
test_output=[
("output", 2),
("missing", {"x": 10, "y": 20, "z": 30}),
("output", 2),
("missing", [1, 2, 3]),
("output", "key"),
("output", ["v1", "v3"]),
],
categories={BlockCategory.BASIC},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
obj = input_data.input
key = input_data.key
if isinstance(obj, str):
obj = loads(obj)
if isinstance(obj, dict) and key in obj:
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, int) and 0 <= key < len(obj):
yield "output", obj[key]
elif isinstance(obj, list) and isinstance(key, str):
if len(obj) == 0:
yield "output", []
elif isinstance(obj[0], dict) and key in obj[0]:
yield "output", [item[key] for item in obj if key in item]
else:
yield "output", [getattr(val, key) for val in obj if hasattr(val, key)]
elif isinstance(obj, object) and isinstance(key, str) and hasattr(obj, key):
yield "output", getattr(obj, key)
else:
yield "missing", input_data.input
class RemoveFromDictionaryBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(
description="The dictionary to modify."
)
key: str | int = SchemaField(description="Key to remove from the dictionary.")
return_value: bool = SchemaField(
default=False, description="Whether to return the removed value."
)
class Output(BlockSchema):
updated_dictionary: dict[Any, Any] = SchemaField(
description="The dictionary after removal."
)
removed_value: Any = SchemaField(description="The removed value if requested.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="46afe2ea-c613-43f8-95ff-6692c3ef6876",
description="Removes a key-value pair from a dictionary.",
categories={BlockCategory.BASIC},
input_schema=RemoveFromDictionaryBlock.Input,
output_schema=RemoveFromDictionaryBlock.Output,
test_input=[
{
"dictionary": {"a": 1, "b": 2, "c": 3},
"key": "b",
"return_value": True,
},
{"dictionary": {"x": "hello", "y": "world"}, "key": "x"},
],
test_output=[
("updated_dictionary", {"a": 1, "c": 3}),
("removed_value", 2),
("updated_dictionary", {"y": "world"}),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
updated_dict = input_data.dictionary.copy()
try:
removed_value = updated_dict.pop(input_data.key)
yield "updated_dictionary", updated_dict
if input_data.return_value:
yield "removed_value", removed_value
except KeyError:
yield "error", f"Key '{input_data.key}' not found in dictionary"
class ReplaceDictionaryValueBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(
description="The dictionary to modify."
)
key: str | int = SchemaField(description="Key to replace the value for.")
value: Any = SchemaField(description="The new value for the given key.")
class Output(BlockSchema):
updated_dictionary: dict[Any, Any] = SchemaField(
description="The dictionary after replacement."
)
old_value: Any = SchemaField(description="The value that was replaced.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="27e31876-18b6-44f3-ab97-f6226d8b3889",
description="Replaces the value for a specified key in a dictionary.",
categories={BlockCategory.BASIC},
input_schema=ReplaceDictionaryValueBlock.Input,
output_schema=ReplaceDictionaryValueBlock.Output,
test_input=[
{"dictionary": {"a": 1, "b": 2, "c": 3}, "key": "b", "value": 99},
{
"dictionary": {"x": "hello", "y": "world"},
"key": "y",
"value": "universe",
},
],
test_output=[
("updated_dictionary", {"a": 1, "b": 99, "c": 3}),
("old_value", 2),
("updated_dictionary", {"x": "hello", "y": "universe"}),
("old_value", "world"),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
updated_dict = input_data.dictionary.copy()
try:
old_value = updated_dict[input_data.key]
updated_dict[input_data.key] = input_data.value
yield "updated_dictionary", updated_dict
yield "old_value", old_value
except KeyError:
yield "error", f"Key '{input_data.key}' not found in dictionary"
class DictionaryIsEmptyBlock(Block):
class Input(BlockSchema):
dictionary: dict[Any, Any] = SchemaField(description="The dictionary to check.")
class Output(BlockSchema):
is_empty: bool = SchemaField(description="True if the dictionary is empty.")
def __init__(self):
super().__init__(
id="a3cf3f64-6bb9-4cc6-9900-608a0b3359b0",
description="Checks if a dictionary is empty.",
categories={BlockCategory.BASIC},
input_schema=DictionaryIsEmptyBlock.Input,
output_schema=DictionaryIsEmptyBlock.Output,
test_input=[{"dictionary": {}}, {"dictionary": {"a": 1}}],
test_output=[("is_empty", True), ("is_empty", False)],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "is_empty", len(input_data.dictionary) == 0
# =============================================================================
# List Manipulation Blocks
# =============================================================================
class CreateListBlock(Block):
class Input(BlockSchema):
values: List[Any] = SchemaField(
description="A list of values to be combined into a new list.",
placeholder="e.g., ['Alice', 25, True]",
)
max_size: int | None = SchemaField(
default=None,
description="Maximum size of the list. If provided, the list will be yielded in chunks of this size.",
advanced=True,
)
max_tokens: int | None = SchemaField(
default=None,
description="Maximum tokens for the list. If provided, the list will be yielded in chunks that fit within this token limit.",
advanced=True,
)
class Output(BlockSchema):
list: List[Any] = SchemaField(
description="The created list containing the specified values."
)
error: str = SchemaField(description="Error message if list creation failed.")
def __init__(self):
super().__init__(
id="a912d5c7-6e00-4542-b2a9-8034136930e4",
description="Creates a list with the specified values. Use this when you know all the values you want to add upfront. This block can also yield the list in batches based on a maximum size or token limit.",
categories={BlockCategory.DATA},
input_schema=CreateListBlock.Input,
output_schema=CreateListBlock.Output,
test_input=[
{
"values": ["Alice", 25, True],
},
{
"values": [1, 2, 3, "four", {"key": "value"}],
},
],
test_output=[
(
"list",
["Alice", 25, True],
),
(
"list",
[1, 2, 3, "four", {"key": "value"}],
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
chunk = []
cur_tokens, max_tokens = 0, input_data.max_tokens
cur_size, max_size = 0, input_data.max_size
for value in input_data.values:
if max_tokens:
tokens = estimate_token_count_str(value)
else:
tokens = 0
# Check if adding this value would exceed either limit
if (max_tokens and (cur_tokens + tokens > max_tokens)) or (
max_size and (cur_size + 1 > max_size)
):
yield "list", chunk
chunk = [value]
cur_size, cur_tokens = 1, tokens
else:
chunk.append(value)
cur_size, cur_tokens = cur_size + 1, cur_tokens + tokens
# Yield final chunk if any
if chunk or not input_data.values:
yield "list", chunk
class AddToListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(
default_factory=list,
advanced=False,
description="The list to add the entry to. If not provided, a new list will be created.",
)
entry: Any = SchemaField(
description="The entry to add to the list. Can be of any type (string, int, dict, etc.).",
advanced=False,
default=None,
)
entries: List[Any] = SchemaField(
default_factory=lambda: list(),
description="The entries to add to the list. This is the batch version of the `entry` field.",
advanced=True,
)
position: int | None = SchemaField(
default=None,
description="The position to insert the new entry. If not provided, the entry will be appended to the end of the list.",
)
class Output(BlockSchema):
updated_list: List[Any] = SchemaField(
description="The list with the new entry added."
)
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="aeb08fc1-2fc1-4141-bc8e-f758f183a822",
description="Adds a new entry to a list. The entry can be of any type. If no list is provided, a new one is created.",
categories={BlockCategory.BASIC},
input_schema=AddToListBlock.Input,
output_schema=AddToListBlock.Output,
test_input=[
{
"list": [1, "string", {"existing_key": "existing_value"}],
"entry": {"new_key": "new_value"},
"position": 1,
},
{"entry": "first_entry"},
{"list": ["a", "b", "c"], "entry": "d"},
{
"entry": "e",
"entries": ["f", "g"],
"list": ["a", "b"],
"position": 1,
},
],
test_output=[
(
"updated_list",
[
1,
{"new_key": "new_value"},
"string",
{"existing_key": "existing_value"},
],
),
("updated_list", ["first_entry"]),
("updated_list", ["a", "b", "c", "d"]),
("updated_list", ["a", "f", "g", "e", "b"]),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
entries_added = input_data.entries.copy()
if input_data.entry:
entries_added.append(input_data.entry)
updated_list = input_data.list.copy()
if (pos := input_data.position) is not None:
updated_list = updated_list[:pos] + entries_added + updated_list[pos:]
else:
updated_list += entries_added
yield "updated_list", updated_list
class FindInListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to search in.")
value: Any = SchemaField(description="The value to search for.")
class Output(BlockSchema):
index: int = SchemaField(description="The index of the value in the list.")
found: bool = SchemaField(
description="Whether the value was found in the list."
)
not_found_value: Any = SchemaField(
description="The value that was not found in the list."
)
def __init__(self):
super().__init__(
id="5e2c6d0a-1e37-489f-b1d0-8e1812b23333",
description="Finds the index of the value in the list.",
categories={BlockCategory.BASIC},
input_schema=FindInListBlock.Input,
output_schema=FindInListBlock.Output,
test_input=[
{"list": [1, 2, 3, 4, 5], "value": 3},
{"list": [1, 2, 3, 4, 5], "value": 6},
],
test_output=[
("index", 2),
("found", True),
("found", False),
("not_found_value", 6),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
yield "index", input_data.list.index(input_data.value)
yield "found", True
except ValueError:
yield "found", False
yield "not_found_value", input_data.value
class GetListItemBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to get the item from.")
index: int = SchemaField(
description="The 0-based index of the item (supports negative indices)."
)
class Output(BlockSchema):
item: Any = SchemaField(description="The item at the specified index.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="262ca24c-1025-43cf-a578-534e23234e97",
description="Returns the element at the given index.",
categories={BlockCategory.BASIC},
input_schema=GetListItemBlock.Input,
output_schema=GetListItemBlock.Output,
test_input=[
{"list": [1, 2, 3], "index": 1},
{"list": [1, 2, 3], "index": -1},
],
test_output=[
("item", 2),
("item", 3),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
try:
yield "item", input_data.list[input_data.index]
except IndexError:
yield "error", "Index out of range"
class RemoveFromListBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to modify.")
value: Any = SchemaField(
default=None, description="Value to remove from the list."
)
index: int | None = SchemaField(
default=None,
description="Index of the item to pop (supports negative indices).",
)
return_item: bool = SchemaField(
default=False, description="Whether to return the removed item."
)
class Output(BlockSchema):
updated_list: List[Any] = SchemaField(description="The list after removal.")
removed_item: Any = SchemaField(description="The removed item if requested.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="d93c5a93-ac7e-41c1-ae5c-ef67e6e9b826",
description="Removes an item from a list by value or index.",
categories={BlockCategory.BASIC},
input_schema=RemoveFromListBlock.Input,
output_schema=RemoveFromListBlock.Output,
test_input=[
{"list": [1, 2, 3], "index": 1, "return_item": True},
{"list": ["a", "b", "c"], "value": "b"},
],
test_output=[
("updated_list", [1, 3]),
("removed_item", 2),
("updated_list", ["a", "c"]),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
lst = input_data.list.copy()
removed = None
try:
if input_data.index is not None:
removed = lst.pop(input_data.index)
elif input_data.value is not None:
lst.remove(input_data.value)
removed = input_data.value
else:
raise ValueError("No index or value provided for removal")
except (IndexError, ValueError):
yield "error", "Index or value not found"
return
yield "updated_list", lst
if input_data.return_item:
yield "removed_item", removed
class ReplaceListItemBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to modify.")
index: int = SchemaField(
description="Index of the item to replace (supports negative indices)."
)
value: Any = SchemaField(description="The new value for the given index.")
class Output(BlockSchema):
updated_list: List[Any] = SchemaField(description="The list after replacement.")
old_item: Any = SchemaField(description="The item that was replaced.")
error: str = SchemaField(description="Error message if the operation failed.")
def __init__(self):
super().__init__(
id="fbf62922-bea1-4a3d-8bac-23587f810b38",
description="Replaces an item at the specified index.",
categories={BlockCategory.BASIC},
input_schema=ReplaceListItemBlock.Input,
output_schema=ReplaceListItemBlock.Output,
test_input=[
{"list": [1, 2, 3], "index": 1, "value": 99},
{"list": ["a", "b"], "index": -1, "value": "c"},
],
test_output=[
("updated_list", [1, 99, 3]),
("old_item", 2),
("updated_list", ["a", "c"]),
("old_item", "b"),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
lst = input_data.list.copy()
try:
old = lst[input_data.index]
lst[input_data.index] = input_data.value
except IndexError:
yield "error", "Index out of range"
return
yield "updated_list", lst
yield "old_item", old
class ListIsEmptyBlock(Block):
class Input(BlockSchema):
list: List[Any] = SchemaField(description="The list to check.")
class Output(BlockSchema):
is_empty: bool = SchemaField(description="True if the list is empty.")
def __init__(self):
super().__init__(
id="896ed73b-27d0-41be-813c-c1c1dc856c03",
description="Checks if a list is empty.",
categories={BlockCategory.BASIC},
input_schema=ListIsEmptyBlock.Input,
output_schema=ListIsEmptyBlock.Output,
test_input=[{"list": []}, {"list": [1]}],
test_output=[("is_empty", True), ("is_empty", False)],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "is_empty", len(input_data.list) == 0

View File

@@ -34,6 +34,6 @@ This is a "quoted" string.""",
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
decoded_text = codecs.decode(input_data.text, "unicode_escape")
yield "decoded_text", decoded_text

View File

@@ -1,3 +1,4 @@
import asyncio
from typing import Literal
import aiohttp
@@ -73,11 +74,7 @@ class ReadDiscordMessagesBlock(Block):
("username", "test_user"),
],
test_mock={
"run_bot": lambda token: {
"output_data": "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.",
"channel_name": "general",
"username": "test_user",
}
"run_bot": lambda token: asyncio.Future() # Create a Future object for mocking
},
)
@@ -109,24 +106,37 @@ class ReadDiscordMessagesBlock(Block):
if attachment.filename.endswith((".txt", ".py")):
async with aiohttp.ClientSession() as session:
async with session.get(attachment.url) as response:
file_content = response.text()
file_content = await response.text()
self.output_data += f"\n\nFile from user: {attachment.filename}\nContent: {file_content}"
await client.close()
await client.start(token.get_secret_value())
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
async for output_name, output_value in self.__run(input_data, credentials):
yield output_name, output_value
while True:
for output_name, output_value in self.__run(input_data, credentials):
yield output_name, output_value
break
async def __run(
self, input_data: Input, credentials: APIKeyCredentials
) -> BlockOutput:
def __run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput:
try:
result = await self.run_bot(credentials.api_key)
loop = asyncio.get_event_loop()
future = self.run_bot(credentials.api_key)
# If it's a Future (mock), set the result
if isinstance(future, asyncio.Future):
future.set_result(
{
"output_data": "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.",
"channel_name": "general",
"username": "test_user",
}
)
result = loop.run_until_complete(future)
# For testing purposes, use the mocked result
if isinstance(result, dict):
@@ -180,7 +190,7 @@ class SendDiscordMessageBlock(Block):
},
test_output=[("status", "Message sent")],
test_mock={
"send_message": lambda token, channel_name, message_content: "Message sent"
"send_message": lambda token, channel_name, message_content: asyncio.Future()
},
test_credentials=TEST_CREDENTIALS,
)
@@ -212,16 +222,23 @@ class SendDiscordMessageBlock(Block):
"""Splits a message into chunks not exceeding the Discord limit."""
return [message[i : i + limit] for i in range(0, len(message), limit)]
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self.send_message(
loop = asyncio.get_event_loop()
future = self.send_message(
credentials.api_key.get_secret_value(),
input_data.channel_name,
input_data.message_content,
)
# If it's a Future (mock), set the result
if isinstance(future, asyncio.Future):
future.set_result("Message sent")
result = loop.run_until_complete(future)
# For testing purposes, use the mocked result
if isinstance(result, str):
self.output_data = result

View File

@@ -121,7 +121,7 @@ class SendEmailBlock(Block):
return "Email sent successfully"
async def run(
def run(
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
) -> BlockOutput:
yield "status", self.send_email(

View File

@@ -9,7 +9,7 @@ from backend.blocks.exa._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
@@ -62,7 +62,7 @@ class ExaContentsBlock(Block):
output_schema=ExaContentsBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
@@ -79,8 +79,10 @@ class ExaContentsBlock(Block):
}
try:
response = await Requests().post(url, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []

View File

@@ -9,7 +9,7 @@ from backend.blocks.exa._auth import (
from backend.blocks.exa.helpers import ContentSettings
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class ExaSearchBlock(Block):
@@ -78,9 +78,6 @@ class ExaSearchBlock(Block):
description="List of search results",
default_factory=list,
)
error: str = SchemaField(
description="Error message if the request failed",
)
def __init__(self):
super().__init__(
@@ -91,7 +88,7 @@ class ExaSearchBlock(Block):
output_schema=ExaSearchBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/search"
@@ -136,9 +133,11 @@ class ExaSearchBlock(Block):
payload[api_field] = value
try:
response = await Requests().post(url, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
# Extract just the results array from the response
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []

View File

@@ -8,7 +8,7 @@ from backend.blocks.exa._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
from .helpers import ContentSettings
@@ -67,7 +67,6 @@ class ExaFindSimilarBlock(Block):
description="List of similar documents with title, URL, published date, author, and score",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
@@ -78,7 +77,7 @@ class ExaFindSimilarBlock(Block):
output_schema=ExaFindSimilarBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/findSimilar"
@@ -120,8 +119,10 @@ class ExaFindSimilarBlock(Block):
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
try:
response = await Requests().post(url, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []

View File

@@ -1,8 +1,10 @@
import asyncio
import logging
import time
from enum import Enum
from typing import Any
import httpx
from backend.blocks.fal._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
@@ -12,7 +14,6 @@ from backend.blocks.fal._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import ClientResponseError, Requests
logger = logging.getLogger(__name__)
@@ -20,7 +21,6 @@ logger = logging.getLogger(__name__)
class FalModel(str, Enum):
MOCHI = "fal-ai/mochi-v1"
LUMA = "fal-ai/luma-dream-machine"
VEO3 = "fal-ai/veo3"
class AIVideoGeneratorBlock(Block):
@@ -65,37 +65,35 @@ class AIVideoGeneratorBlock(Block):
)
def _get_headers(self, api_key: str) -> dict[str, str]:
"""Get headers for FAL API Requests."""
"""Get headers for FAL API requests."""
return {
"Authorization": f"Key {api_key}",
"Content-Type": "application/json",
}
async def _submit_request(
def _submit_request(
self, url: str, headers: dict[str, str], data: dict[str, Any]
) -> dict[str, Any]:
"""Submit a request to the FAL API."""
try:
response = await Requests().post(url, headers=headers, json=data)
response = httpx.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()
except ClientResponseError as e:
except httpx.HTTPError as e:
logger.error(f"FAL API request failed: {str(e)}")
raise RuntimeError(f"Failed to submit request: {str(e)}")
async def _poll_status(
self, status_url: str, headers: dict[str, str]
) -> dict[str, Any]:
def _poll_status(self, status_url: str, headers: dict[str, str]) -> dict[str, Any]:
"""Poll the status endpoint until completion or failure."""
try:
response = await Requests().get(status_url, headers=headers)
response = httpx.get(status_url, headers=headers)
response.raise_for_status()
return response.json()
except ClientResponseError as e:
except httpx.HTTPError as e:
logger.error(f"Failed to get status: {str(e)}")
raise RuntimeError(f"Failed to get status: {str(e)}")
async def generate_video(
self, input_data: Input, credentials: FalCredentials
) -> str:
def generate_video(self, input_data: Input, credentials: FalCredentials) -> str:
"""Generate video using the specified FAL model."""
base_url = "https://queue.fal.run"
api_key = credentials.api_key.get_secret_value()
@@ -104,16 +102,13 @@ class AIVideoGeneratorBlock(Block):
# Submit generation request
submit_url = f"{base_url}/{input_data.model.value}"
submit_data = {"prompt": input_data.prompt}
if input_data.model == FalModel.VEO3:
submit_data["generate_audio"] = True # type: ignore
seen_logs = set()
try:
# Submit request to queue
submit_response = await Requests().post(
submit_url, headers=headers, json=submit_data
)
submit_response = httpx.post(submit_url, headers=headers, json=submit_data)
submit_response.raise_for_status()
request_data = submit_response.json()
# Get request_id and urls from initial response
@@ -124,23 +119,14 @@ class AIVideoGeneratorBlock(Block):
if not all([request_id, status_url, result_url]):
raise ValueError("Missing required data in submission response")
# Ensure status_url is a string
if not isinstance(status_url, str):
raise ValueError("Invalid status URL format")
# Ensure result_url is a string
if not isinstance(result_url, str):
raise ValueError("Invalid result URL format")
# Poll for status with exponential backoff
max_attempts = 30
attempt = 0
base_wait_time = 5
while attempt < max_attempts:
status_response = await Requests().get(
f"{status_url}?logs=1", headers=headers
)
status_response = httpx.get(f"{status_url}?logs=1", headers=headers)
status_response.raise_for_status()
status_data = status_response.json()
# Process new logs only
@@ -163,7 +149,8 @@ class AIVideoGeneratorBlock(Block):
status = status_data.get("status")
if status == "COMPLETED":
# Get the final result
result_response = await Requests().get(result_url, headers=headers)
result_response = httpx.get(result_url, headers=headers)
result_response.raise_for_status()
result_data = result_response.json()
if "video" not in result_data or not isinstance(
@@ -172,8 +159,8 @@ class AIVideoGeneratorBlock(Block):
raise ValueError("Invalid response format - missing video data")
video_url = result_data["video"].get("url")
if not video_url or not isinstance(video_url, str):
raise ValueError("No valid video URL in response")
if not video_url:
raise ValueError("No video URL in response")
return video_url
@@ -193,19 +180,19 @@ class AIVideoGeneratorBlock(Block):
logger.info(f"[FAL Generation] Status: Unknown status: {status}")
wait_time = min(base_wait_time * (2**attempt), 60) # Cap at 60 seconds
await asyncio.sleep(wait_time)
time.sleep(wait_time)
attempt += 1
raise RuntimeError("Maximum polling attempts reached")
except ClientResponseError as e:
except httpx.HTTPError as e:
raise RuntimeError(f"API request failed: {str(e)}")
async def run(
def run(
self, input_data: Input, *, credentials: FalCredentials, **kwargs
) -> BlockOutput:
try:
video_url = await self.generate_video(input_data, credentials)
video_url = self.generate_video(input_data, credentials)
yield "video_url", video_url
except Exception as e:
error_message = str(e)

View File

@@ -1,183 +0,0 @@
from enum import Enum
from typing import Literal, Optional
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.file import MediaFileType, store_media_file
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="replicate",
api_key=SecretStr("mock-replicate-api-key"),
title="Mock Replicate API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
class FluxKontextModelName(str, Enum):
PRO = "Flux Kontext Pro"
MAX = "Flux Kontext Max"
@property
def api_name(self) -> str:
return f"black-forest-labs/flux-kontext-{self.name.lower()}"
class AspectRatio(str, Enum):
MATCH_INPUT_IMAGE = "match_input_image"
ASPECT_1_1 = "1:1"
ASPECT_16_9 = "16:9"
ASPECT_9_16 = "9:16"
ASPECT_4_3 = "4:3"
ASPECT_3_4 = "3:4"
ASPECT_3_2 = "3:2"
ASPECT_2_3 = "2:3"
ASPECT_4_5 = "4:5"
ASPECT_5_4 = "5:4"
ASPECT_21_9 = "21:9"
ASPECT_9_21 = "9:21"
ASPECT_2_1 = "2:1"
ASPECT_1_2 = "1:2"
class AIImageEditorBlock(Block):
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.REPLICATE], Literal["api_key"]
] = CredentialsField(
description="Replicate API key with permissions for Flux Kontext models",
)
prompt: str = SchemaField(
description="Text instruction describing the desired edit",
title="Prompt",
)
input_image: Optional[MediaFileType] = SchemaField(
description="Reference image URI (jpeg, png, gif, webp)",
default=None,
title="Input Image",
)
aspect_ratio: AspectRatio = SchemaField(
description="Aspect ratio of the generated image",
default=AspectRatio.MATCH_INPUT_IMAGE,
title="Aspect Ratio",
advanced=False,
)
seed: Optional[int] = SchemaField(
description="Random seed. Set for reproducible generation",
default=None,
title="Seed",
advanced=True,
)
model: FluxKontextModelName = SchemaField(
description="Model variant to use",
default=FluxKontextModelName.PRO,
title="Model",
)
class Output(BlockSchema):
output_image: MediaFileType = SchemaField(
description="URL of the transformed image"
)
error: str = SchemaField(description="Error message if generation failed")
def __init__(self):
super().__init__(
id="3fd9c73d-4370-4925-a1ff-1b86b99fabfa",
description=(
"Edit images using BlackForest Labs' Flux Kontext models. Provide a prompt "
"and optional reference image to generate a modified image."
),
categories={BlockCategory.AI, BlockCategory.MULTIMEDIA},
input_schema=AIImageEditorBlock.Input,
output_schema=AIImageEditorBlock.Output,
test_input={
"prompt": "Add a hat to the cat",
"input_image": "data:image/png;base64,MQ==",
"aspect_ratio": AspectRatio.MATCH_INPUT_IMAGE,
"seed": None,
"model": FluxKontextModelName.PRO,
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[
("output_image", "https://replicate.com/output/edited-image.png"),
],
test_mock={
"run_model": lambda *args, **kwargs: "https://replicate.com/output/edited-image.png",
},
test_credentials=TEST_CREDENTIALS,
)
async def run(
self,
input_data: Input,
*,
credentials: APIKeyCredentials,
graph_exec_id: str,
**kwargs,
) -> BlockOutput:
result = await self.run_model(
api_key=credentials.api_key,
model_name=input_data.model.api_name,
prompt=input_data.prompt,
input_image_b64=(
await store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.input_image,
return_content=True,
)
if input_data.input_image
else None
),
aspect_ratio=input_data.aspect_ratio.value,
seed=input_data.seed,
)
yield "output_image", result
async def run_model(
self,
api_key: SecretStr,
model_name: str,
prompt: str,
input_image_b64: Optional[str],
aspect_ratio: str,
seed: Optional[int],
) -> MediaFileType:
client = ReplicateClient(api_token=api_key.get_secret_value())
input_params = {
"prompt": prompt,
"input_image": input_image_b64,
"aspect_ratio": aspect_ratio,
**({"seed": seed} if seed is not None else {}),
}
output: FileOutput | list[FileOutput] = await client.async_run( # type: ignore
model_name,
input=input_params,
wait=False,
)
if isinstance(output, list) and output:
output = output[0]
if isinstance(output, FileOutput):
return MediaFileType(output.url)
if isinstance(output, str):
return MediaFileType(output)
raise ValueError("No output received")

View File

@@ -46,6 +46,6 @@ class GenericWebhookTriggerBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "constants", input_data.constants
yield "payload", input_data.payload

View File

@@ -1,30 +1,19 @@
from typing import overload
from urllib.parse import urlparse
from backend.blocks.github._auth import (
GithubCredentials,
GithubFineGrainedAPICredentials,
)
from backend.util.request import URL, Requests
from backend.util.request import Requests
@overload
def _convert_to_api_url(url: str) -> str: ...
@overload
def _convert_to_api_url(url: URL) -> URL: ...
def _convert_to_api_url(url: str | URL) -> str | URL:
def _convert_to_api_url(url: str) -> str:
"""
Converts a standard GitHub URL to the corresponding GitHub API URL.
Handles repository URLs, issue URLs, pull request URLs, and more.
"""
if url_as_str := isinstance(url, str):
url = urlparse(url)
path_parts = url.path.strip("/").split("/")
parsed_url = urlparse(url)
path_parts = parsed_url.path.strip("/").split("/")
if len(path_parts) >= 2:
owner, repo = path_parts[0], path_parts[1]
@@ -39,7 +28,7 @@ def _convert_to_api_url(url: str | URL) -> str | URL:
else:
raise ValueError("Invalid GitHub URL format.")
return api_url if url_as_str else urlparse(api_url)
return api_url
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:

View File

@@ -129,7 +129,7 @@ class GithubCreateCheckRunBlock(Block):
)
@staticmethod
async def create_check_run(
def create_check_run(
credentials: GithubCredentials,
repo_url: str,
name: str,
@@ -172,7 +172,7 @@ class GithubCreateCheckRunBlock(Block):
data.output = output_data
check_runs_url = f"{repo_url}/check-runs"
response = await api.post(
response = api.post(
check_runs_url, data=data.model_dump_json(exclude_none=True)
)
result = response.json()
@@ -183,7 +183,7 @@ class GithubCreateCheckRunBlock(Block):
"status": result["status"],
}
async def run(
def run(
self,
input_data: Input,
*,
@@ -191,7 +191,7 @@ class GithubCreateCheckRunBlock(Block):
**kwargs,
) -> BlockOutput:
try:
result = await self.create_check_run(
result = self.create_check_run(
credentials=credentials,
repo_url=input_data.repo_url,
name=input_data.name,
@@ -292,7 +292,7 @@ class GithubUpdateCheckRunBlock(Block):
)
@staticmethod
async def update_check_run(
def update_check_run(
credentials: GithubCredentials,
repo_url: str,
check_run_id: int,
@@ -325,7 +325,7 @@ class GithubUpdateCheckRunBlock(Block):
data.output = output_data
check_run_url = f"{repo_url}/check-runs/{check_run_id}"
response = await api.patch(
response = api.patch(
check_run_url, data=data.model_dump_json(exclude_none=True)
)
result = response.json()
@@ -337,7 +337,7 @@ class GithubUpdateCheckRunBlock(Block):
"conclusion": result.get("conclusion"),
}
async def run(
def run(
self,
input_data: Input,
*,
@@ -345,7 +345,7 @@ class GithubUpdateCheckRunBlock(Block):
**kwargs,
) -> BlockOutput:
try:
result = await self.update_check_run(
result = self.update_check_run(
credentials=credentials,
repo_url=input_data.repo_url,
check_run_id=input_data.check_run_id,

View File

@@ -80,7 +80,7 @@ class GithubCommentBlock(Block):
)
@staticmethod
async def post_comment(
def post_comment(
credentials: GithubCredentials, issue_url: str, body_text: str
) -> tuple[int, str]:
api = get_api(credentials)
@@ -88,18 +88,18 @@ class GithubCommentBlock(Block):
if "pull" in issue_url:
issue_url = issue_url.replace("pull", "issues")
comments_url = issue_url + "/comments"
response = await api.post(comments_url, json=data)
response = api.post(comments_url, json=data)
comment = response.json()
return comment["id"], comment["html_url"]
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
id, url = await self.post_comment(
id, url = self.post_comment(
credentials,
input_data.issue_url,
input_data.comment,
@@ -171,7 +171,7 @@ class GithubUpdateCommentBlock(Block):
)
@staticmethod
async def update_comment(
def update_comment(
credentials: GithubCredentials, comment_url: str, body_text: str
) -> tuple[int, str]:
api = get_api(credentials, convert_urls=False)
@@ -179,11 +179,11 @@ class GithubUpdateCommentBlock(Block):
url = convert_comment_url_to_api_endpoint(comment_url)
logger.info(url)
response = await api.patch(url, json=data)
response = api.patch(url, json=data)
comment = response.json()
return comment["id"], comment["html_url"]
async def run(
def run(
self,
input_data: Input,
*,
@@ -209,7 +209,7 @@ class GithubUpdateCommentBlock(Block):
raise ValueError(
"Must provide either comment_url or comment_id and issue_url"
)
id, url = await self.update_comment(
id, url = self.update_comment(
credentials,
input_data.comment_url,
input_data.comment,
@@ -288,7 +288,7 @@ class GithubListCommentsBlock(Block):
)
@staticmethod
async def list_comments(
def list_comments(
credentials: GithubCredentials, issue_url: str
) -> list[Output.CommentItem]:
parsed_url = urlparse(issue_url)
@@ -305,7 +305,7 @@ class GithubListCommentsBlock(Block):
# Set convert_urls=False since we're already providing an API URL
api = get_api(credentials, convert_urls=False)
response = await api.get(api_url)
response = api.get(api_url)
comments = response.json()
parsed_comments: list[GithubListCommentsBlock.Output.CommentItem] = [
{
@@ -318,19 +318,18 @@ class GithubListCommentsBlock(Block):
]
return parsed_comments
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
comments = await self.list_comments(
comments = self.list_comments(
credentials,
input_data.issue_url,
)
for comment in comments:
yield "comment", comment
yield from (("comment", comment) for comment in comments)
yield "comments", comments
@@ -382,24 +381,24 @@ class GithubMakeIssueBlock(Block):
)
@staticmethod
async def create_issue(
def create_issue(
credentials: GithubCredentials, repo_url: str, title: str, body: str
) -> tuple[int, str]:
api = get_api(credentials)
data = {"title": title, "body": body}
issues_url = repo_url + "/issues"
response = await api.post(issues_url, json=data)
response = api.post(issues_url, json=data)
issue = response.json()
return issue["number"], issue["html_url"]
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
number, url = await self.create_issue(
number, url = self.create_issue(
credentials,
input_data.repo_url,
input_data.title,
@@ -452,25 +451,25 @@ class GithubReadIssueBlock(Block):
)
@staticmethod
async def read_issue(
def read_issue(
credentials: GithubCredentials, issue_url: str
) -> tuple[str, str, str]:
api = get_api(credentials)
response = await api.get(issue_url)
response = api.get(issue_url)
data = response.json()
title = data.get("title", "No title found")
body = data.get("body", "No body content found")
user = data.get("user", {}).get("login", "No user found")
return title, body, user
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
title, body, user = await self.read_issue(
title, body, user = self.read_issue(
credentials,
input_data.issue_url,
)
@@ -498,9 +497,6 @@ class GithubListIssuesBlock(Block):
issue: IssueItem = SchemaField(
title="Issue", description="Issues with their title and URL"
)
issues: list[IssueItem] = SchemaField(
description="List of issues with their title and URL"
)
error: str = SchemaField(description="Error message if listing issues failed")
def __init__(self):
@@ -516,22 +512,13 @@ class GithubListIssuesBlock(Block):
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"issues",
[
{
"title": "Issue 1",
"url": "https://github.com/owner/repo/issues/1",
}
],
),
(
"issue",
{
"title": "Issue 1",
"url": "https://github.com/owner/repo/issues/1",
},
),
)
],
test_mock={
"list_issues": lambda *args, **kwargs: [
@@ -544,32 +531,30 @@ class GithubListIssuesBlock(Block):
)
@staticmethod
async def list_issues(
def list_issues(
credentials: GithubCredentials, repo_url: str
) -> list[Output.IssueItem]:
api = get_api(credentials)
issues_url = repo_url + "/issues"
response = await api.get(issues_url)
response = api.get(issues_url)
data = response.json()
issues: list[GithubListIssuesBlock.Output.IssueItem] = [
{"title": issue["title"], "url": issue["html_url"]} for issue in data
]
return issues
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
issues = await self.list_issues(
issues = self.list_issues(
credentials,
input_data.repo_url,
)
yield "issues", issues
for issue in issues:
yield "issue", issue
yield from (("issue", issue) for issue in issues)
class GithubAddLabelBlock(Block):
@@ -608,23 +593,21 @@ class GithubAddLabelBlock(Block):
)
@staticmethod
async def add_label(
credentials: GithubCredentials, issue_url: str, label: str
) -> str:
def add_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
api = get_api(credentials)
data = {"labels": [label]}
labels_url = issue_url + "/labels"
await api.post(labels_url, json=data)
api.post(labels_url, json=data)
return "Label added successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.add_label(
status = self.add_label(
credentials,
input_data.issue_url,
input_data.label,
@@ -670,22 +653,20 @@ class GithubRemoveLabelBlock(Block):
)
@staticmethod
async def remove_label(
credentials: GithubCredentials, issue_url: str, label: str
) -> str:
def remove_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
api = get_api(credentials)
label_url = issue_url + f"/labels/{label}"
await api.delete(label_url)
api.delete(label_url)
return "Label removed successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.remove_label(
status = self.remove_label(
credentials,
input_data.issue_url,
input_data.label,
@@ -733,7 +714,7 @@ class GithubAssignIssueBlock(Block):
)
@staticmethod
async def assign_issue(
def assign_issue(
credentials: GithubCredentials,
issue_url: str,
assignee: str,
@@ -741,17 +722,17 @@ class GithubAssignIssueBlock(Block):
api = get_api(credentials)
assignees_url = issue_url + "/assignees"
data = {"assignees": [assignee]}
await api.post(assignees_url, json=data)
api.post(assignees_url, json=data)
return "Issue assigned successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.assign_issue(
status = self.assign_issue(
credentials,
input_data.issue_url,
input_data.assignee,
@@ -799,7 +780,7 @@ class GithubUnassignIssueBlock(Block):
)
@staticmethod
async def unassign_issue(
def unassign_issue(
credentials: GithubCredentials,
issue_url: str,
assignee: str,
@@ -807,17 +788,17 @@ class GithubUnassignIssueBlock(Block):
api = get_api(credentials)
assignees_url = issue_url + "/assignees"
data = {"assignees": [assignee]}
await api.delete(assignees_url, json=data)
api.delete(assignees_url, json=data)
return "Issue unassigned successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.unassign_issue(
status = self.unassign_issue(
credentials,
input_data.issue_url,
input_data.assignee,

View File

@@ -31,12 +31,7 @@ class GithubListPullRequestsBlock(Block):
pull_request: PRItem = SchemaField(
title="Pull Request", description="PRs with their title and URL"
)
pull_requests: list[PRItem] = SchemaField(
description="List of pull requests with their title and URL"
)
error: str = SchemaField(
description="Error message if listing pull requests failed"
)
error: str = SchemaField(description="Error message if listing issues failed")
def __init__(self):
super().__init__(
@@ -51,22 +46,13 @@ class GithubListPullRequestsBlock(Block):
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"pull_requests",
[
{
"title": "Pull request 1",
"url": "https://github.com/owner/repo/pull/1",
}
],
),
(
"pull_request",
{
"title": "Pull request 1",
"url": "https://github.com/owner/repo/pull/1",
},
),
)
],
test_mock={
"list_prs": lambda *args, **kwargs: [
@@ -79,32 +65,28 @@ class GithubListPullRequestsBlock(Block):
)
@staticmethod
async def list_prs(
credentials: GithubCredentials, repo_url: str
) -> list[Output.PRItem]:
def list_prs(credentials: GithubCredentials, repo_url: str) -> list[Output.PRItem]:
api = get_api(credentials)
pulls_url = repo_url + "/pulls"
response = await api.get(pulls_url)
response = api.get(pulls_url)
data = response.json()
pull_requests: list[GithubListPullRequestsBlock.Output.PRItem] = [
{"title": pr["title"], "url": pr["html_url"]} for pr in data
]
return pull_requests
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
pull_requests = await self.list_prs(
pull_requests = self.list_prs(
credentials,
input_data.repo_url,
)
yield "pull_requests", pull_requests
for pr in pull_requests:
yield "pull_request", pr
yield from (("pull_request", pr) for pr in pull_requests)
class GithubMakePullRequestBlock(Block):
@@ -171,7 +153,7 @@ class GithubMakePullRequestBlock(Block):
)
@staticmethod
async def create_pr(
def create_pr(
credentials: GithubCredentials,
repo_url: str,
title: str,
@@ -182,11 +164,11 @@ class GithubMakePullRequestBlock(Block):
api = get_api(credentials)
pulls_url = repo_url + "/pulls"
data = {"title": title, "body": body, "head": head, "base": base}
response = await api.post(pulls_url, json=data)
response = api.post(pulls_url, json=data)
pr_data = response.json()
return pr_data["number"], pr_data["html_url"]
async def run(
def run(
self,
input_data: Input,
*,
@@ -194,7 +176,7 @@ class GithubMakePullRequestBlock(Block):
**kwargs,
) -> BlockOutput:
try:
number, url = await self.create_pr(
number, url = self.create_pr(
credentials,
input_data.repo_url,
input_data.title,
@@ -260,55 +242,39 @@ class GithubReadPullRequestBlock(Block):
)
@staticmethod
async def read_pr(
credentials: GithubCredentials, pr_url: str
) -> tuple[str, str, str]:
def read_pr(credentials: GithubCredentials, pr_url: str) -> tuple[str, str, str]:
api = get_api(credentials)
# Adjust the URL to access the issue endpoint for PR metadata
issue_url = pr_url.replace("/pull/", "/issues/")
response = await api.get(issue_url)
response = api.get(issue_url)
data = response.json()
title = data.get("title", "No title found")
body = data.get("body", "No body content found")
author = data.get("user", {}).get("login", "Unknown author")
author = data.get("user", {}).get("login", "No user found")
return title, body, author
@staticmethod
async def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str:
def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str:
api = get_api(credentials)
files_url = prepare_pr_api_url(pr_url=pr_url, path="files")
response = await api.get(files_url)
response = api.get(files_url)
files = response.json()
changes = []
for file in files:
status: str = file.get("status", "")
diff: str = file.get("patch", "")
if status != "removed":
is_filename: str = file.get("filename", "")
was_filename: str = (
file.get("previous_filename", is_filename)
if status != "added"
else ""
)
else:
is_filename = ""
was_filename: str = file.get("filename", "")
patch_header = ""
if was_filename:
patch_header += f"--- {was_filename}\n"
if is_filename:
patch_header += f"+++ {is_filename}\n"
changes.append(patch_header + diff)
filename = file.get("filename")
patch = file.get("patch")
if filename and patch:
changes.append(f"File: {filename}\n{patch}")
return "\n\n".join(changes)
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
title, body, author = await self.read_pr(
title, body, author = self.read_pr(
credentials,
input_data.pr_url,
)
@@ -317,7 +283,7 @@ class GithubReadPullRequestBlock(Block):
yield "author", author
if input_data.include_pr_changes:
changes = await self.read_pr_changes(
changes = self.read_pr_changes(
credentials,
input_data.pr_url,
)
@@ -364,16 +330,16 @@ class GithubAssignPRReviewerBlock(Block):
)
@staticmethod
async def assign_reviewer(
def assign_reviewer(
credentials: GithubCredentials, pr_url: str, reviewer: str
) -> str:
api = get_api(credentials)
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
data = {"reviewers": [reviewer]}
await api.post(reviewers_url, json=data)
api.post(reviewers_url, json=data)
return "Reviewer assigned successfully"
async def run(
def run(
self,
input_data: Input,
*,
@@ -381,7 +347,7 @@ class GithubAssignPRReviewerBlock(Block):
**kwargs,
) -> BlockOutput:
try:
status = await self.assign_reviewer(
status = self.assign_reviewer(
credentials,
input_data.pr_url,
input_data.reviewer,
@@ -431,16 +397,16 @@ class GithubUnassignPRReviewerBlock(Block):
)
@staticmethod
async def unassign_reviewer(
def unassign_reviewer(
credentials: GithubCredentials, pr_url: str, reviewer: str
) -> str:
api = get_api(credentials)
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
data = {"reviewers": [reviewer]}
await api.delete(reviewers_url, json=data)
api.delete(reviewers_url, json=data)
return "Reviewer unassigned successfully"
async def run(
def run(
self,
input_data: Input,
*,
@@ -448,7 +414,7 @@ class GithubUnassignPRReviewerBlock(Block):
**kwargs,
) -> BlockOutput:
try:
status = await self.unassign_reviewer(
status = self.unassign_reviewer(
credentials,
input_data.pr_url,
input_data.reviewer,
@@ -475,9 +441,6 @@ class GithubListPRReviewersBlock(Block):
title="Reviewer",
description="Reviewers with their username and profile URL",
)
reviewers: list[ReviewerItem] = SchemaField(
description="List of reviewers with their username and profile URL"
)
error: str = SchemaField(
description="Error message if listing reviewers failed"
)
@@ -495,22 +458,13 @@ class GithubListPRReviewersBlock(Block):
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"reviewers",
[
{
"username": "reviewer1",
"url": "https://github.com/reviewer1",
}
],
),
(
"reviewer",
{
"username": "reviewer1",
"url": "https://github.com/reviewer1",
},
),
)
],
test_mock={
"list_reviewers": lambda *args, **kwargs: [
@@ -523,12 +477,12 @@ class GithubListPRReviewersBlock(Block):
)
@staticmethod
async def list_reviewers(
def list_reviewers(
credentials: GithubCredentials, pr_url: str
) -> list[Output.ReviewerItem]:
api = get_api(credentials)
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
response = await api.get(reviewers_url)
response = api.get(reviewers_url)
data = response.json()
reviewers: list[GithubListPRReviewersBlock.Output.ReviewerItem] = [
{"username": reviewer["login"], "url": reviewer["html_url"]}
@@ -536,20 +490,18 @@ class GithubListPRReviewersBlock(Block):
]
return reviewers
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
reviewers = await self.list_reviewers(
reviewers = self.list_reviewers(
credentials,
input_data.pr_url,
)
yield "reviewers", reviewers
for reviewer in reviewers:
yield "reviewer", reviewer
yield from (("reviewer", reviewer) for reviewer in reviewers)
def prepare_pr_api_url(pr_url: str, path: str) -> str:

View File

@@ -31,9 +31,6 @@ class GithubListTagsBlock(Block):
tag: TagItem = SchemaField(
title="Tag", description="Tags with their name and file tree browser URL"
)
tags: list[TagItem] = SchemaField(
description="List of tags with their name and file tree browser URL"
)
error: str = SchemaField(description="Error message if listing tags failed")
def __init__(self):
@@ -49,22 +46,13 @@ class GithubListTagsBlock(Block):
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"tags",
[
{
"name": "v1.0.0",
"url": "https://github.com/owner/repo/tree/v1.0.0",
}
],
),
(
"tag",
{
"name": "v1.0.0",
"url": "https://github.com/owner/repo/tree/v1.0.0",
},
),
)
],
test_mock={
"list_tags": lambda *args, **kwargs: [
@@ -77,12 +65,12 @@ class GithubListTagsBlock(Block):
)
@staticmethod
async def list_tags(
def list_tags(
credentials: GithubCredentials, repo_url: str
) -> list[Output.TagItem]:
api = get_api(credentials)
tags_url = repo_url + "/tags"
response = await api.get(tags_url)
response = api.get(tags_url)
data = response.json()
repo_path = repo_url.replace("https://github.com/", "")
tags: list[GithubListTagsBlock.Output.TagItem] = [
@@ -94,20 +82,18 @@ class GithubListTagsBlock(Block):
]
return tags
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
tags = await self.list_tags(
tags = self.list_tags(
credentials,
input_data.repo_url,
)
yield "tags", tags
for tag in tags:
yield "tag", tag
yield from (("tag", tag) for tag in tags)
class GithubListBranchesBlock(Block):
@@ -127,9 +113,6 @@ class GithubListBranchesBlock(Block):
title="Branch",
description="Branches with their name and file tree browser URL",
)
branches: list[BranchItem] = SchemaField(
description="List of branches with their name and file tree browser URL"
)
error: str = SchemaField(description="Error message if listing branches failed")
def __init__(self):
@@ -145,22 +128,13 @@ class GithubListBranchesBlock(Block):
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"branches",
[
{
"name": "main",
"url": "https://github.com/owner/repo/tree/main",
}
],
),
(
"branch",
{
"name": "main",
"url": "https://github.com/owner/repo/tree/main",
},
),
)
],
test_mock={
"list_branches": lambda *args, **kwargs: [
@@ -173,12 +147,12 @@ class GithubListBranchesBlock(Block):
)
@staticmethod
async def list_branches(
def list_branches(
credentials: GithubCredentials, repo_url: str
) -> list[Output.BranchItem]:
api = get_api(credentials)
branches_url = repo_url + "/branches"
response = await api.get(branches_url)
response = api.get(branches_url)
data = response.json()
repo_path = repo_url.replace("https://github.com/", "")
branches: list[GithubListBranchesBlock.Output.BranchItem] = [
@@ -190,20 +164,18 @@ class GithubListBranchesBlock(Block):
]
return branches
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
branches = await self.list_branches(
branches = self.list_branches(
credentials,
input_data.repo_url,
)
yield "branches", branches
for branch in branches:
yield "branch", branch
yield from (("branch", branch) for branch in branches)
class GithubListDiscussionsBlock(Block):
@@ -225,9 +197,6 @@ class GithubListDiscussionsBlock(Block):
discussion: DiscussionItem = SchemaField(
title="Discussion", description="Discussions with their title and URL"
)
discussions: list[DiscussionItem] = SchemaField(
description="List of discussions with their title and URL"
)
error: str = SchemaField(
description="Error message if listing discussions failed"
)
@@ -246,22 +215,13 @@ class GithubListDiscussionsBlock(Block):
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"discussions",
[
{
"title": "Discussion 1",
"url": "https://github.com/owner/repo/discussions/1",
}
],
),
(
"discussion",
{
"title": "Discussion 1",
"url": "https://github.com/owner/repo/discussions/1",
},
),
)
],
test_mock={
"list_discussions": lambda *args, **kwargs: [
@@ -274,7 +234,7 @@ class GithubListDiscussionsBlock(Block):
)
@staticmethod
async def list_discussions(
def list_discussions(
credentials: GithubCredentials, repo_url: str, num_discussions: int
) -> list[Output.DiscussionItem]:
api = get_api(credentials)
@@ -294,7 +254,7 @@ class GithubListDiscussionsBlock(Block):
}
"""
variables = {"owner": owner, "repo": repo, "num": num_discussions}
response = await api.post(
response = api.post(
"https://api.github.com/graphql",
json={"query": query, "variables": variables},
)
@@ -305,21 +265,17 @@ class GithubListDiscussionsBlock(Block):
]
return discussions
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
discussions = await self.list_discussions(
credentials,
input_data.repo_url,
input_data.num_discussions,
discussions = self.list_discussions(
credentials, input_data.repo_url, input_data.num_discussions
)
yield "discussions", discussions
for discussion in discussions:
yield "discussion", discussion
yield from (("discussion", discussion) for discussion in discussions)
class GithubListReleasesBlock(Block):
@@ -339,9 +295,6 @@ class GithubListReleasesBlock(Block):
title="Release",
description="Releases with their name and file tree browser URL",
)
releases: list[ReleaseItem] = SchemaField(
description="List of releases with their name and file tree browser URL"
)
error: str = SchemaField(description="Error message if listing releases failed")
def __init__(self):
@@ -357,22 +310,13 @@ class GithubListReleasesBlock(Block):
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"releases",
[
{
"name": "v1.0.0",
"url": "https://github.com/owner/repo/releases/tag/v1.0.0",
}
],
),
(
"release",
{
"name": "v1.0.0",
"url": "https://github.com/owner/repo/releases/tag/v1.0.0",
},
),
)
],
test_mock={
"list_releases": lambda *args, **kwargs: [
@@ -385,32 +329,30 @@ class GithubListReleasesBlock(Block):
)
@staticmethod
async def list_releases(
def list_releases(
credentials: GithubCredentials, repo_url: str
) -> list[Output.ReleaseItem]:
api = get_api(credentials)
releases_url = repo_url + "/releases"
response = await api.get(releases_url)
response = api.get(releases_url)
data = response.json()
releases: list[GithubListReleasesBlock.Output.ReleaseItem] = [
{"name": release["name"], "url": release["html_url"]} for release in data
]
return releases
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
releases = await self.list_releases(
releases = self.list_releases(
credentials,
input_data.repo_url,
)
yield "releases", releases
for release in releases:
yield "release", release
yield from (("release", release) for release in releases)
class GithubReadFileBlock(Block):
@@ -463,40 +405,40 @@ class GithubReadFileBlock(Block):
)
@staticmethod
async def read_file(
def read_file(
credentials: GithubCredentials, repo_url: str, file_path: str, branch: str
) -> tuple[str, int]:
api = get_api(credentials)
content_url = repo_url + f"/contents/{file_path}?ref={branch}"
response = await api.get(content_url)
data = response.json()
response = api.get(content_url)
content = response.json()
if isinstance(data, list):
if isinstance(content, list):
# Multiple entries of different types exist at this path
if not (file := next((f for f in data if f["type"] == "file"), None)):
if not (file := next((f for f in content if f["type"] == "file"), None)):
raise TypeError("Not a file")
data = file
content = file
if data["type"] != "file":
if content["type"] != "file":
raise TypeError("Not a file")
return data["content"], data["size"]
return content["content"], content["size"]
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
content, size = await self.read_file(
raw_content, size = self.read_file(
credentials,
input_data.repo_url,
input_data.file_path,
input_data.file_path.lstrip("/"),
input_data.branch,
)
yield "raw_content", content
yield "text_content", base64.b64decode(content).decode("utf-8")
yield "raw_content", raw_content
yield "text_content", base64.b64decode(raw_content).decode("utf-8")
yield "size", size
@@ -573,55 +515,52 @@ class GithubReadFolderBlock(Block):
)
@staticmethod
async def read_folder(
def read_folder(
credentials: GithubCredentials, repo_url: str, folder_path: str, branch: str
) -> tuple[list[Output.FileEntry], list[Output.DirEntry]]:
api = get_api(credentials)
contents_url = repo_url + f"/contents/{folder_path}?ref={branch}"
response = await api.get(contents_url)
data = response.json()
response = api.get(contents_url)
content = response.json()
if not isinstance(data, list):
if not isinstance(content, list):
raise TypeError("Not a folder")
files: list[GithubReadFolderBlock.Output.FileEntry] = [
files = [
GithubReadFolderBlock.Output.FileEntry(
name=entry["name"],
path=entry["path"],
size=entry["size"],
)
for entry in data
for entry in content
if entry["type"] == "file"
]
dirs: list[GithubReadFolderBlock.Output.DirEntry] = [
dirs = [
GithubReadFolderBlock.Output.DirEntry(
name=entry["name"],
path=entry["path"],
)
for entry in data
for entry in content
if entry["type"] == "dir"
]
return files, dirs
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
files, dirs = await self.read_folder(
files, dirs = self.read_folder(
credentials,
input_data.repo_url,
input_data.folder_path.lstrip("/"),
input_data.branch,
)
for file in files:
yield "file", file
for dir in dirs:
yield "dir", dir
yield from (("file", file) for file in files)
yield from (("dir", dir) for dir in dirs)
class GithubMakeBranchBlock(Block):
@@ -667,35 +606,32 @@ class GithubMakeBranchBlock(Block):
)
@staticmethod
async def create_branch(
def create_branch(
credentials: GithubCredentials,
repo_url: str,
new_branch: str,
source_branch: str,
) -> str:
api = get_api(credentials)
# Get the SHA of the source branch
ref_url = repo_url + f"/git/refs/heads/{source_branch}"
response = await api.get(ref_url)
data = response.json()
sha = data["object"]["sha"]
response = api.get(ref_url)
sha = response.json()["object"]["sha"]
# Create the new branch
new_ref_url = repo_url + "/git/refs"
data = {
"ref": f"refs/heads/{new_branch}",
"sha": sha,
}
response = await api.post(new_ref_url, json=data)
create_ref_url = repo_url + "/git/refs"
data = {"ref": f"refs/heads/{new_branch}", "sha": sha}
response = api.post(create_ref_url, json=data)
return "Branch created successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.create_branch(
status = self.create_branch(
credentials,
input_data.repo_url,
input_data.new_branch,
@@ -742,22 +678,22 @@ class GithubDeleteBranchBlock(Block):
)
@staticmethod
async def delete_branch(
def delete_branch(
credentials: GithubCredentials, repo_url: str, branch: str
) -> str:
api = get_api(credentials)
ref_url = repo_url + f"/git/refs/heads/{branch}"
await api.delete(ref_url)
api.delete(ref_url)
return "Branch deleted successfully"
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
status = await self.delete_branch(
status = self.delete_branch(
credentials,
input_data.repo_url,
input_data.branch,
@@ -825,7 +761,7 @@ class GithubCreateFileBlock(Block):
)
@staticmethod
async def create_file(
def create_file(
credentials: GithubCredentials,
repo_url: str,
file_path: str,
@@ -834,18 +770,23 @@ class GithubCreateFileBlock(Block):
commit_message: str,
) -> tuple[str, str]:
api = get_api(credentials)
contents_url = repo_url + f"/contents/{file_path}"
content_base64 = base64.b64encode(content.encode()).decode()
# Convert content to base64
content_bytes = content.encode("utf-8")
content_base64 = base64.b64encode(content_bytes).decode("utf-8")
# Create the file using the GitHub API
contents_url = f"{repo_url}/contents/{file_path}"
data = {
"message": commit_message,
"content": content_base64,
"branch": branch,
}
response = await api.put(contents_url, json=data)
data = response.json()
return data["content"]["html_url"], data["commit"]["sha"]
response = api.put(contents_url, json=data)
result = response.json()
async def run(
return result["content"]["html_url"], result["commit"]["sha"]
def run(
self,
input_data: Input,
*,
@@ -853,7 +794,7 @@ class GithubCreateFileBlock(Block):
**kwargs,
) -> BlockOutput:
try:
url, sha = await self.create_file(
url, sha = self.create_file(
credentials,
input_data.repo_url,
input_data.file_path,
@@ -925,7 +866,7 @@ class GithubUpdateFileBlock(Block):
)
@staticmethod
async def update_file(
def update_file(
credentials: GithubCredentials,
repo_url: str,
file_path: str,
@@ -934,24 +875,30 @@ class GithubUpdateFileBlock(Block):
commit_message: str,
) -> tuple[str, str]:
api = get_api(credentials)
contents_url = repo_url + f"/contents/{file_path}"
# First get the current file to get its SHA
contents_url = f"{repo_url}/contents/{file_path}"
params = {"ref": branch}
response = await api.get(contents_url, params=params)
data = response.json()
response = api.get(contents_url, params=params)
current_file = response.json()
# Convert new content to base64
content_base64 = base64.b64encode(content.encode()).decode()
content_bytes = content.encode("utf-8")
content_base64 = base64.b64encode(content_bytes).decode("utf-8")
# Update the file
data = {
"message": commit_message,
"content": content_base64,
"sha": data["sha"],
"sha": current_file["sha"],
"branch": branch,
}
response = await api.put(contents_url, json=data)
data = response.json()
return data["content"]["html_url"], data["commit"]["sha"]
response = api.put(contents_url, json=data)
result = response.json()
async def run(
return result["content"]["html_url"], result["commit"]["sha"]
def run(
self,
input_data: Input,
*,
@@ -959,7 +906,7 @@ class GithubUpdateFileBlock(Block):
**kwargs,
) -> BlockOutput:
try:
url, sha = await self.update_file(
url, sha = self.update_file(
credentials,
input_data.repo_url,
input_data.file_path,
@@ -1034,7 +981,7 @@ class GithubCreateRepositoryBlock(Block):
)
@staticmethod
async def create_repository(
def create_repository(
credentials: GithubCredentials,
name: str,
description: str,
@@ -1042,19 +989,24 @@ class GithubCreateRepositoryBlock(Block):
auto_init: bool,
gitignore_template: str,
) -> tuple[str, str]:
api = get_api(credentials)
api = get_api(credentials, convert_urls=False) # Disable URL conversion
data = {
"name": name,
"description": description,
"private": private,
"auto_init": auto_init,
"gitignore_template": gitignore_template,
}
response = await api.post("https://api.github.com/user/repos", json=data)
data = response.json()
return data["html_url"], data["clone_url"]
async def run(
if gitignore_template:
data["gitignore_template"] = gitignore_template
# Create repository using the user endpoint
response = api.post("https://api.github.com/user/repos", json=data)
result = response.json()
return result["html_url"], result["clone_url"]
def run(
self,
input_data: Input,
*,
@@ -1062,7 +1014,7 @@ class GithubCreateRepositoryBlock(Block):
**kwargs,
) -> BlockOutput:
try:
url, clone_url = await self.create_repository(
url, clone_url = self.create_repository(
credentials,
input_data.name,
input_data.description,
@@ -1093,9 +1045,6 @@ class GithubListStargazersBlock(Block):
title="Stargazer",
description="Stargazers with their username and profile URL",
)
stargazers: list[StargazerItem] = SchemaField(
description="List of stargazers with their username and profile URL"
)
error: str = SchemaField(
description="Error message if listing stargazers failed"
)
@@ -1113,22 +1062,13 @@ class GithubListStargazersBlock(Block):
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"stargazers",
[
{
"username": "octocat",
"url": "https://github.com/octocat",
}
],
),
(
"stargazer",
{
"username": "octocat",
"url": "https://github.com/octocat",
},
),
)
],
test_mock={
"list_stargazers": lambda *args, **kwargs: [
@@ -1141,13 +1081,17 @@ class GithubListStargazersBlock(Block):
)
@staticmethod
async def list_stargazers(
def list_stargazers(
credentials: GithubCredentials, repo_url: str
) -> list[Output.StargazerItem]:
api = get_api(credentials)
stargazers_url = repo_url + "/stargazers"
response = await api.get(stargazers_url)
# Add /stargazers to the repo URL to get stargazers endpoint
stargazers_url = f"{repo_url}/stargazers"
# Set accept header to get starred_at timestamp
headers = {"Accept": "application/vnd.github.star+json"}
response = api.get(stargazers_url, headers=headers)
data = response.json()
stargazers: list[GithubListStargazersBlock.Output.StargazerItem] = [
{
"username": stargazer["login"],
@@ -1157,17 +1101,18 @@ class GithubListStargazersBlock(Block):
]
return stargazers
async def run(
def run(
self,
input_data: Input,
*,
credentials: GithubCredentials,
**kwargs,
) -> BlockOutput:
stargazers = await self.list_stargazers(
credentials,
input_data.repo_url,
)
yield "stargazers", stargazers
for stargazer in stargazers:
yield "stargazer", stargazer
try:
stargazers = self.list_stargazers(
credentials,
input_data.repo_url,
)
yield from (("stargazer", stargazer) for stargazer in stargazers)
except Exception as e:
yield "error", str(e)

View File

@@ -115,7 +115,7 @@ class GithubCreateStatusBlock(Block):
)
@staticmethod
async def create_status(
def create_status(
credentials: GithubFineGrainedAPICredentials,
repo_url: str,
sha: str,
@@ -144,9 +144,7 @@ class GithubCreateStatusBlock(Block):
data.description = description
status_url = f"{repo_url}/statuses/{sha}"
response = await api.post(
status_url, data=data.model_dump_json(exclude_none=True)
)
response = api.post(status_url, data=data.model_dump_json(exclude_none=True))
result = response.json()
return {
@@ -160,7 +158,7 @@ class GithubCreateStatusBlock(Block):
"updated_at": result["updated_at"],
}
async def run(
def run(
self,
input_data: Input,
*,
@@ -168,7 +166,7 @@ class GithubCreateStatusBlock(Block):
**kwargs,
) -> BlockOutput:
try:
result = await self.create_status(
result = self.create_status(
credentials=credentials,
repo_url=input_data.repo_url,
sha=input_data.sha,

View File

@@ -53,7 +53,7 @@ class GitHubTriggerBase:
description="Error message if the payload could not be processed"
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "payload", input_data.payload
yield "triggered_by_user", input_data.payload["sender"]
@@ -148,9 +148,8 @@ class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
async for name, value in super().run(input_data, **kwargs):
yield name, value
def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
yield from super().run(input_data, **kwargs)
yield "event", input_data.payload["action"]
yield "number", input_data.payload["number"]
yield "pull_request", input_data.payload["pull_request"]

View File

@@ -1,603 +0,0 @@
import asyncio
import enum
import uuid
from datetime import datetime, timedelta, timezone
from typing import Literal
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from pydantic import BaseModel
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.settings import AppEnvironment, Settings
from ._auth import (
GOOGLE_OAUTH_IS_CONFIGURED,
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
GoogleCredentials,
GoogleCredentialsField,
GoogleCredentialsInput,
)
class CalendarEvent(BaseModel):
"""Structured representation of a Google Calendar event."""
id: str
title: str
start_time: str
end_time: str
is_all_day: bool
location: str | None
description: str | None
organizer: str | None
attendees: list[str]
has_video_call: bool
video_link: str | None
calendar_link: str
is_recurring: bool
class GoogleCalendarReadEventsBlock(Block):
class Input(BlockSchema):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/calendar.readonly"]
)
calendar_id: str = SchemaField(
description="Calendar ID (use 'primary' for your main calendar)",
default="primary",
)
max_events: int = SchemaField(
description="Maximum number of events to retrieve", default=10
)
start_time: datetime = SchemaField(
description="Retrieve events starting from this time",
default_factory=lambda: datetime.now(tz=timezone.utc),
)
time_range_days: int = SchemaField(
description="Number of days to look ahead for events", default=30
)
search_term: str | None = SchemaField(
description="Optional search term to filter events by", default=None
)
page_token: str | None = SchemaField(
description="Page token from previous request to get the next batch of events. You can use this if you have lots of events you want to process in a loop",
default=None,
)
include_declined_events: bool = SchemaField(
description="Include events you've declined", default=False
)
class Output(BlockSchema):
events: list[CalendarEvent] = SchemaField(
description="List of calendar events in the requested time range",
default_factory=list,
)
event: CalendarEvent = SchemaField(
description="One of the calendar events in the requested time range"
)
next_page_token: str | None = SchemaField(
description="Token for retrieving the next page of events if more exist",
default=None,
)
error: str = SchemaField(
description="Error message if the request failed",
)
def __init__(self):
settings = Settings()
# Create realistic test data for events
test_now = datetime.now(tz=timezone.utc)
test_tomorrow = test_now + timedelta(days=1)
test_event_dict = {
"id": "event1id",
"title": "Team Meeting",
"start_time": test_tomorrow.strftime("%Y-%m-%d %H:%M"),
"end_time": (test_tomorrow + timedelta(hours=1)).strftime("%Y-%m-%d %H:%M"),
"is_all_day": False,
"location": "Conference Room A",
"description": "Weekly team sync",
"organizer": "manager@example.com",
"attendees": ["colleague1@example.com", "colleague2@example.com"],
"has_video_call": True,
"video_link": "https://meet.google.com/abc-defg-hij",
"calendar_link": "https://calendar.google.com/calendar/event?eid=event1id",
"is_recurring": True,
}
super().__init__(
id="80bc3ed1-e9a4-449e-8163-a8fc86f74f6a",
description="Retrieves upcoming events from a Google Calendar with filtering options",
categories={BlockCategory.PRODUCTIVITY, BlockCategory.DATA},
input_schema=GoogleCalendarReadEventsBlock.Input,
output_schema=GoogleCalendarReadEventsBlock.Output,
disabled=not GOOGLE_OAUTH_IS_CONFIGURED
or settings.config.app_env == AppEnvironment.PRODUCTION,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"calendar_id": "primary",
"max_events": 5,
"start_time": test_now.isoformat(),
"time_range_days": 7,
"search_term": None,
"include_declined_events": False,
"page_token": None,
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("event", test_event_dict),
("events", [test_event_dict]),
],
test_mock={
"_read_calendar": lambda *args, **kwargs: {
"items": [
{
"id": "event1id",
"summary": "Team Meeting",
"start": {
"dateTime": test_tomorrow.isoformat(),
"timeZone": "UTC",
},
"end": {
"dateTime": (
test_tomorrow + timedelta(hours=1)
).isoformat(),
"timeZone": "UTC",
},
"location": "Conference Room A",
"description": "Weekly team sync",
"organizer": {"email": "manager@example.com"},
"attendees": [
{"email": "colleague1@example.com"},
{"email": "colleague2@example.com"},
],
"conferenceData": {
"conferenceUrl": "https://meet.google.com/abc-defg-hij"
},
"htmlLink": "https://calendar.google.com/calendar/event?eid=event1id",
"recurrence": ["RRULE:FREQ=WEEKLY;COUNT=10"],
}
],
"nextPageToken": None,
},
"_format_events": lambda *args, **kwargs: [test_event_dict],
},
)
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
try:
service = self._build_service(credentials, **kwargs)
# Calculate end time based on start time and time range
end_time = input_data.start_time + timedelta(
days=input_data.time_range_days
)
# Call Google Calendar API
result = await asyncio.to_thread(
self._read_calendar,
service=service,
calendarId=input_data.calendar_id,
time_min=input_data.start_time.isoformat(),
time_max=end_time.isoformat(),
max_results=input_data.max_events,
single_events=True,
search_term=input_data.search_term,
show_deleted=False,
show_hidden=input_data.include_declined_events,
page_token=input_data.page_token,
)
# Format events into a user-friendly structure
formatted_events = self._format_events(result.get("items", []))
# Include next page token if available
if next_page_token := result.get("nextPageToken"):
yield "next_page_token", next_page_token
for event in formatted_events:
yield "event", event
yield "events", formatted_events
except Exception as e:
yield "error", str(e)
@staticmethod
def _build_service(credentials: GoogleCredentials, **kwargs):
creds = Credentials(
token=(
credentials.access_token.get_secret_value()
if credentials.access_token
else None
),
refresh_token=(
credentials.refresh_token.get_secret_value()
if credentials.refresh_token
else None
),
token_uri="https://oauth2.googleapis.com/token",
client_id=Settings().secrets.google_client_id,
client_secret=Settings().secrets.google_client_secret,
scopes=credentials.scopes,
)
return build("calendar", "v3", credentials=creds)
def _read_calendar(
self,
service,
calendarId: str,
time_min: str,
time_max: str,
max_results: int,
single_events: bool,
search_term: str | None = None,
show_deleted: bool = False,
show_hidden: bool = False,
page_token: str | None = None,
) -> dict:
"""Read calendar events with optional filtering."""
calendar = service.events()
# Build query parameters
params = {
"calendarId": calendarId,
"timeMin": time_min,
"timeMax": time_max,
"maxResults": max_results,
"singleEvents": single_events,
"orderBy": "startTime",
"showDeleted": show_deleted,
"showHiddenInvitations": show_hidden,
**({"pageToken": page_token} if page_token else {}),
}
# Add search term if provided
if search_term:
params["q"] = search_term
result = calendar.list(**params).execute()
return result
def _format_events(self, events: list[dict]) -> list[CalendarEvent]:
"""Format Google Calendar API events into user-friendly structure."""
formatted_events = []
for event in events:
# Determine if all-day event
is_all_day = "date" in event.get("start", {})
# Format start and end times
if is_all_day:
start_time = event.get("start", {}).get("date", "")
end_time = event.get("end", {}).get("date", "")
else:
# Convert ISO format to more readable format
start_datetime = datetime.fromisoformat(
event.get("start", {}).get("dateTime", "").replace("Z", "+00:00")
)
end_datetime = datetime.fromisoformat(
event.get("end", {}).get("dateTime", "").replace("Z", "+00:00")
)
start_time = start_datetime.strftime("%Y-%m-%d %H:%M")
end_time = end_datetime.strftime("%Y-%m-%d %H:%M")
# Extract attendees
attendees = []
for attendee in event.get("attendees", []):
if email := attendee.get("email"):
attendees.append(email)
# Check for video call link
has_video_call = False
video_link = None
if conf_data := event.get("conferenceData"):
if conf_url := conf_data.get("conferenceUrl"):
has_video_call = True
video_link = conf_url
elif entry_points := conf_data.get("entryPoints", []):
for entry in entry_points:
if entry.get("entryPointType") == "video":
has_video_call = True
video_link = entry.get("uri")
break
# Create formatted event
formatted_event = CalendarEvent(
id=event.get("id", ""),
title=event.get("summary", "Untitled Event"),
start_time=start_time,
end_time=end_time,
is_all_day=is_all_day,
location=event.get("location"),
description=event.get("description"),
organizer=event.get("organizer", {}).get("email"),
attendees=attendees,
has_video_call=has_video_call,
video_link=video_link,
calendar_link=event.get("htmlLink", ""),
is_recurring=bool(event.get("recurrence")),
)
formatted_events.append(formatted_event)
return formatted_events
class ReminderPreset(enum.Enum):
"""Common reminder times before an event."""
TEN_MINUTES = 10
THIRTY_MINUTES = 30
ONE_HOUR = 60
ONE_DAY = 1440 # 24 hours in minutes
class RecurrenceFrequency(enum.Enum):
"""Frequency options for recurring events."""
DAILY = "DAILY"
WEEKLY = "WEEKLY"
MONTHLY = "MONTHLY"
YEARLY = "YEARLY"
class ExactTiming(BaseModel):
"""Model for specifying start and end times."""
discriminator: Literal["exact_timing"]
start_datetime: datetime
end_datetime: datetime
class DurationTiming(BaseModel):
"""Model for specifying start time and duration."""
discriminator: Literal["duration_timing"]
start_datetime: datetime
duration_minutes: int
class OneTimeEvent(BaseModel):
"""Model for a one-time event."""
discriminator: Literal["one_time"]
class RecurringEvent(BaseModel):
"""Model for a recurring event."""
discriminator: Literal["recurring"]
frequency: RecurrenceFrequency
count: int
class GoogleCalendarCreateEventBlock(Block):
class Input(BlockSchema):
credentials: GoogleCredentialsInput = GoogleCredentialsField(
["https://www.googleapis.com/auth/calendar"]
)
# Event Details
event_title: str = SchemaField(description="Title of the event")
location: str | None = SchemaField(
description="Location of the event", default=None
)
description: str | None = SchemaField(
description="Description of the event", default=None
)
# Timing
timing: ExactTiming | DurationTiming = SchemaField(
discriminator="discriminator",
advanced=False,
description="Specify when the event starts and ends",
default_factory=lambda: DurationTiming(
discriminator="duration_timing",
start_datetime=datetime.now().replace(microsecond=0, second=0, minute=0)
+ timedelta(hours=1),
duration_minutes=60,
),
)
# Calendar selection
calendar_id: str = SchemaField(
description="Calendar ID (use 'primary' for your main calendar)",
default="primary",
)
# Guests
guest_emails: list[str] = SchemaField(
description="Email addresses of guests to invite", default_factory=list
)
send_notifications: bool = SchemaField(
description="Send email notifications to guests", default=True
)
# Extras
add_google_meet: bool = SchemaField(
description="Include a Google Meet video conference link", default=False
)
recurrence: OneTimeEvent | RecurringEvent = SchemaField(
discriminator="discriminator",
description="Whether the event repeats",
default_factory=lambda: OneTimeEvent(discriminator="one_time"),
)
reminder_minutes: list[ReminderPreset] = SchemaField(
description="When to send reminders before the event",
default_factory=lambda: [ReminderPreset.TEN_MINUTES],
)
class Output(BlockSchema):
event_id: str = SchemaField(description="ID of the created event")
event_link: str = SchemaField(
description="Link to view the event in Google Calendar"
)
error: str = SchemaField(description="Error message if event creation failed")
def __init__(self):
settings = Settings()
super().__init__(
id="ed2ec950-fbff-4204-94c0-023fb1d625e0",
description="This block creates a new event in Google Calendar with customizable parameters.",
categories={BlockCategory.PRODUCTIVITY},
input_schema=GoogleCalendarCreateEventBlock.Input,
output_schema=GoogleCalendarCreateEventBlock.Output,
disabled=not GOOGLE_OAUTH_IS_CONFIGURED
or settings.config.app_env == AppEnvironment.PRODUCTION,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"event_title": "Team Meeting",
"location": "Conference Room A",
"description": "Weekly team sync-up",
"calendar_id": "primary",
"guest_emails": ["colleague1@example.com", "colleague2@example.com"],
"add_google_meet": True,
"send_notifications": True,
"reminder_minutes": [
ReminderPreset.TEN_MINUTES.value,
ReminderPreset.ONE_HOUR.value,
],
},
test_credentials=TEST_CREDENTIALS,
test_output=[
("event_id", "abc123event_id"),
("event_link", "https://calendar.google.com/calendar/event?eid=abc123"),
],
test_mock={
"_create_event": lambda *args, **kwargs: {
"id": "abc123event_id",
"htmlLink": "https://calendar.google.com/calendar/event?eid=abc123",
}
},
)
async def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
try:
service = self._build_service(credentials, **kwargs)
# Create event body
# Get start and end times based on the timing option
if input_data.timing.discriminator == "exact_timing":
start_datetime = input_data.timing.start_datetime
end_datetime = input_data.timing.end_datetime
else: # duration_timing
start_datetime = input_data.timing.start_datetime
end_datetime = start_datetime + timedelta(
minutes=input_data.timing.duration_minutes
)
# Format datetimes for Google Calendar API
start_time_str = start_datetime.isoformat()
end_time_str = end_datetime.isoformat()
# Build the event body
event_body = {
"summary": input_data.event_title,
"start": {"dateTime": start_time_str},
"end": {"dateTime": end_time_str},
}
# Add optional fields
if input_data.location:
event_body["location"] = input_data.location
if input_data.description:
event_body["description"] = input_data.description
# Add guests
if input_data.guest_emails:
event_body["attendees"] = [
{"email": email} for email in input_data.guest_emails
]
# Add reminders
if input_data.reminder_minutes:
event_body["reminders"] = {
"useDefault": False,
"overrides": [
{"method": "popup", "minutes": reminder.value}
for reminder in input_data.reminder_minutes
],
}
# Add Google Meet
if input_data.add_google_meet:
event_body["conferenceData"] = {
"createRequest": {
"requestId": f"meet-{uuid.uuid4()}",
"conferenceSolutionKey": {"type": "hangoutsMeet"},
}
}
# Add recurrence
if input_data.recurrence.discriminator == "recurring":
rule = f"RRULE:FREQ={input_data.recurrence.frequency.value}"
rule += f";COUNT={input_data.recurrence.count}"
event_body["recurrence"] = [rule]
# Create the event
result = await asyncio.to_thread(
self._create_event,
service=service,
calendar_id=input_data.calendar_id,
event_body=event_body,
send_notifications=input_data.send_notifications,
conference_data_version=1 if input_data.add_google_meet else 0,
)
yield "event_id", result["id"]
yield "event_link", result["htmlLink"]
except Exception as e:
yield "error", str(e)
@staticmethod
def _build_service(credentials: GoogleCredentials, **kwargs):
creds = Credentials(
token=(
credentials.access_token.get_secret_value()
if credentials.access_token
else None
),
refresh_token=(
credentials.refresh_token.get_secret_value()
if credentials.refresh_token
else None
),
token_uri="https://oauth2.googleapis.com/token",
client_id=Settings().secrets.google_client_id,
client_secret=Settings().secrets.google_client_secret,
scopes=credentials.scopes,
)
return build("calendar", "v3", credentials=creds)
def _create_event(
self,
service,
calendar_id: str,
event_body: dict,
send_notifications: bool = False,
conference_data_version: int = 0,
) -> dict:
"""Create a new event in Google Calendar."""
calendar = service.events()
# Make the API call
result = calendar.insert(
calendarId=calendar_id,
body=event_body,
sendNotifications=send_notifications,
conferenceDataVersion=conference_data_version,
).execute()
return result

View File

@@ -1,4 +1,3 @@
import asyncio
import base64
from email.utils import parseaddr
from typing import List
@@ -129,13 +128,11 @@ class GmailReadBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
messages = await asyncio.to_thread(
self._read_emails, service, input_data.query, input_data.max_results
)
service = self._build_service(credentials, **kwargs)
messages = self._read_emails(service, input_data.query, input_data.max_results)
for email in messages:
yield "email", email
yield "emails", messages
@@ -289,18 +286,14 @@ class GmailSendBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
result = await asyncio.to_thread(
self._send_email,
service,
input_data.to,
input_data.subject,
input_data.body,
send_result = self._send_email(
service, input_data.to, input_data.subject, input_data.body
)
yield "result", result
yield "result", send_result
def _send_email(self, service, to: str, subject: str, body: str) -> dict:
if not to or not subject or not body:
@@ -365,12 +358,12 @@ class GmailListLabelsBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
result = await asyncio.to_thread(self._list_labels, service)
yield "result", result
labels = self._list_labels(service)
yield "result", labels
def _list_labels(self, service) -> list[dict]:
results = service.users().labels().list(userId="me").execute()
@@ -426,13 +419,11 @@ class GmailAddLabelBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
result = await asyncio.to_thread(
self._add_label, service, input_data.message_id, input_data.label_name
)
result = self._add_label(service, input_data.message_id, input_data.label_name)
yield "result", result
def _add_label(self, service, message_id: str, label_name: str) -> dict:
@@ -511,12 +502,12 @@ class GmailRemoveLabelBlock(Block):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
) -> BlockOutput:
service = GmailReadBlock._build_service(credentials, **kwargs)
result = await asyncio.to_thread(
self._remove_label, service, input_data.message_id, input_data.label_name
result = self._remove_label(
service, input_data.message_id, input_data.label_name
)
yield "result", result

File diff suppressed because it is too large Load Diff

View File

@@ -103,7 +103,7 @@ class GoogleMapsSearchBlock(Block):
test_credentials=TEST_CREDENTIALS,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
places = self.search_places(

View File

@@ -1,17 +1,14 @@
from typing import Any, Optional
from backend.util.request import Requests
from backend.util.request import requests
class GetRequest:
@classmethod
async def get_request(
def get_request(
cls, url: str, headers: Optional[dict] = None, json: bool = False
) -> Any:
if headers is None:
headers = {}
response = await Requests().get(url, headers=headers)
if json:
return response.json()
else:
return response.text()
response = requests.get(url, headers=headers)
return response.json() if json else response.text

View File

@@ -1,56 +1,17 @@
import json
import logging
from enum import Enum
from io import BytesIO
from pathlib import Path
from typing import Literal
from typing import Any
import aiofiles
from pydantic import SecretStr
from requests.exceptions import HTTPError, RequestException
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import (
CredentialsField,
CredentialsMetaInput,
HostScopedCredentials,
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.file import (
MediaFileType,
get_exec_file_path,
get_mime_type,
store_media_file,
)
from backend.util.request import Requests
from backend.data.model import SchemaField
from backend.util.request import requests
logger = logging.getLogger(name=__name__)
# Host-scoped credentials for HTTP requests
HttpCredentials = CredentialsMetaInput[
Literal[ProviderName.HTTP], Literal["host_scoped"]
]
TEST_CREDENTIALS = HostScopedCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="http",
host="api.example.com",
headers={
"Authorization": SecretStr("Bearer test-token"),
"X-API-Key": SecretStr("test-api-key"),
},
title="Mock HTTP Host-Scoped Credentials",
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
class HttpMethod(Enum):
GET = "GET"
POST = "POST"
@@ -77,21 +38,13 @@ class SendWebRequestBlock(Block):
)
json_format: bool = SchemaField(
title="JSON format",
description="If true, send the body as JSON (unless files are also present).",
description="Whether to send and receive body as JSON",
default=True,
)
body: dict | None = SchemaField(
description="Form/JSON body payload. If files are supplied, this must be a mapping of formfields.",
body: Any = SchemaField(
description="The body of the request",
default=None,
)
files_name: str = SchemaField(
description="The name of the file field in the form data.",
default="file",
)
files: list[MediaFileType] = SchemaField(
description="Mapping of *form field name* → Image url / path / base64 url.",
default_factory=list,
)
class Output(BlockSchema):
response: object = SchemaField(description="The response from the server")
@@ -102,161 +55,59 @@ class SendWebRequestBlock(Block):
def __init__(self):
super().__init__(
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
description="Make an HTTP request (JSON / form / multipart).",
description="This block makes an HTTP request to the given URL.",
categories={BlockCategory.OUTPUT},
input_schema=SendWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
)
@staticmethod
async def _prepare_files(
graph_exec_id: str,
files_name: str,
files: list[MediaFileType],
) -> list[tuple[str, tuple[str, BytesIO, str]]]:
"""
Prepare files for the request by storing them and reading their content.
Returns a list of tuples in the format:
(files_name, (filename, BytesIO, mime_type))
"""
files_payload: list[tuple[str, tuple[str, BytesIO, str]]] = []
for media in files:
# Normalise to a list so we can repeat the same key
rel_path = await store_media_file(
graph_exec_id, media, return_content=False
)
abs_path = get_exec_file_path(graph_exec_id, rel_path)
async with aiofiles.open(abs_path, "rb") as f:
content = await f.read()
handle = BytesIO(content)
mime = get_mime_type(abs_path)
files_payload.append((files_name, (Path(abs_path).name, handle, mime)))
return files_payload
async def run(
self, input_data: Input, *, graph_exec_id: str, **kwargs
) -> BlockOutput:
# ─── Parse/normalise body ────────────────────────────────────
def run(self, input_data: Input, **kwargs) -> BlockOutput:
body = input_data.body
if isinstance(body, str):
try:
# Validate JSON string length to prevent DoS attacks
if len(body) > 10_000_000: # 10MB limit
raise ValueError("JSON body too large")
parsed_body = json.loads(body)
# Validate that parsed JSON is safe (basic object/array/primitive types)
if (
isinstance(parsed_body, (dict, list, str, int, float, bool))
or parsed_body is None
):
body = parsed_body
else:
# Unexpected type, treat as plain text
if input_data.json_format:
if isinstance(body, str):
try:
# Try to parse as JSON first
body = json.loads(body)
except json.JSONDecodeError:
# If it's not valid JSON and just plain text,
# we should send it as plain text instead
input_data.json_format = False
except (json.JSONDecodeError, ValueError):
# Invalid JSON or too large treat as formfield value instead
input_data.json_format = False
# ─── Prepare files (if any) ──────────────────────────────────
use_files = bool(input_data.files)
files_payload: list[tuple[str, tuple[str, BytesIO, str]]] = []
if use_files:
files_payload = await self._prepare_files(
graph_exec_id, input_data.files_name, input_data.files
try:
response = requests.request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
json=body if input_data.json_format else None,
data=body if not input_data.json_format else None,
)
# Enforce body format rules
if use_files and input_data.json_format:
raise ValueError(
"json_format=True cannot be combined with file uploads; set json_format=False and put form fields in `body`."
)
# ─── Execute request ─────────────────────────────────────────
response = await Requests().request(
input_data.method.value,
input_data.url,
headers=input_data.headers,
files=files_payload if use_files else None,
# * If files → multipart ⇒ pass formfields via data=
data=body if not input_data.json_format else None,
# * Else, choose JSON vs urlencoded based on flag
json=body if (input_data.json_format and not use_files) else None,
)
# Decide how to parse the response
if response.headers.get("content-type", "").startswith("application/json"):
result = None if response.status == 204 else response.json()
else:
result = response.text()
# Yield according to status code bucket
if 200 <= response.status < 300:
result = response.json() if input_data.json_format else response.text
yield "response", result
elif 400 <= response.status < 500:
yield "client_error", result
else:
yield "server_error", result
except HTTPError as e:
# Handle error responses
try:
result = e.response.json() if input_data.json_format else str(e)
except json.JSONDecodeError:
result = str(e)
class SendAuthenticatedWebRequestBlock(SendWebRequestBlock):
class Input(SendWebRequestBlock.Input):
credentials: HttpCredentials = CredentialsField(
description="HTTP host-scoped credentials for automatic header injection",
discriminator="url",
)
if 400 <= e.response.status_code < 500:
yield "client_error", result
elif 500 <= e.response.status_code < 600:
yield "server_error", result
else:
error_msg = (
"Unexpected status code "
f"{e.response.status_code} '{e.response.reason}'"
)
logger.warning(error_msg)
yield "error", error_msg
def __init__(self):
Block.__init__(
self,
id="fff86bcd-e001-4bad-a7f6-2eae4720c8dc",
description="Make an authenticated HTTP request with host-scoped credentials (JSON / form / multipart).",
categories={BlockCategory.OUTPUT},
input_schema=SendAuthenticatedWebRequestBlock.Input,
output_schema=SendWebRequestBlock.Output,
test_credentials=TEST_CREDENTIALS,
)
except RequestException as e:
# Handle other request-related exceptions
yield "error", str(e)
async def run( # type: ignore[override]
self,
input_data: Input,
*,
graph_exec_id: str,
credentials: HostScopedCredentials,
**kwargs,
) -> BlockOutput:
# Create SendWebRequestBlock.Input from our input (removing credentials field)
base_input = SendWebRequestBlock.Input(
url=input_data.url,
method=input_data.method,
headers=input_data.headers,
json_format=input_data.json_format,
body=input_data.body,
files_name=input_data.files_name,
files=input_data.files,
)
# Apply host-scoped credentials to headers
extra_headers = {}
if credentials.matches_url(input_data.url):
logger.debug(
f"Applying host-scoped credentials {credentials.id} for URL {input_data.url}"
)
extra_headers.update(credentials.get_headers_dict())
else:
logger.warning(
f"Host-scoped credentials {credentials.id} do not match URL {input_data.url}"
)
# Merge with user-provided headers (user headers take precedence)
base_input.headers = {**extra_headers, **input_data.headers}
# Use parent class run method
async for output_name, output_data in super().run(
base_input, graph_exec_id=graph_exec_id, **kwargs
):
yield output_name, output_data
except Exception as e:
# Catch any other unexpected exceptions
yield "error", str(e)

View File

@@ -5,7 +5,7 @@ from backend.blocks.hubspot._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class HubSpotCompanyBlock(Block):
@@ -35,7 +35,7 @@ class HubSpotCompanyBlock(Block):
output_schema=HubSpotCompanyBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
@@ -45,7 +45,7 @@ class HubSpotCompanyBlock(Block):
}
if input_data.operation == "create":
response = await Requests().post(
response = requests.post(
base_url, headers=headers, json={"properties": input_data.company_data}
)
result = response.json()
@@ -67,16 +67,14 @@ class HubSpotCompanyBlock(Block):
}
]
}
search_response = await Requests().post(
search_url, headers=headers, json=search_data
)
search_result = search_response.json()
yield "search_company", search_result.get("results", [{}])[0]
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "company", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
# First get company ID by domain
search_response = await Requests().post(
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
@@ -93,11 +91,10 @@ class HubSpotCompanyBlock(Block):
]
},
)
search_result = search_response.json()
company_id = search_result.get("results", [{}])[0].get("id")
company_id = search_response.json().get("results", [{}])[0].get("id")
if company_id:
response = await Requests().patch(
response = requests.patch(
f"{base_url}/{company_id}",
headers=headers,
json={"properties": input_data.company_data},

View File

@@ -5,7 +5,7 @@ from backend.blocks.hubspot._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class HubSpotContactBlock(Block):
@@ -35,7 +35,7 @@ class HubSpotContactBlock(Block):
output_schema=HubSpotContactBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/contacts"
@@ -45,7 +45,7 @@ class HubSpotContactBlock(Block):
}
if input_data.operation == "create":
response = await Requests().post(
response = requests.post(
base_url, headers=headers, json={"properties": input_data.contact_data}
)
result = response.json()
@@ -53,6 +53,7 @@ class HubSpotContactBlock(Block):
yield "status", "created"
elif input_data.operation == "get":
# Search for contact by email
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
@@ -67,15 +68,13 @@ class HubSpotContactBlock(Block):
}
]
}
response = await Requests().post(
search_url, headers=headers, json=search_data
)
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "contact", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
search_response = await Requests().post(
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
@@ -92,11 +91,10 @@ class HubSpotContactBlock(Block):
]
},
)
search_result = search_response.json()
contact_id = search_result.get("results", [{}])[0].get("id")
contact_id = search_response.json().get("results", [{}])[0].get("id")
if contact_id:
response = await Requests().patch(
response = requests.patch(
f"{base_url}/{contact_id}",
headers=headers,
json={"properties": input_data.contact_data},

View File

@@ -7,7 +7,7 @@ from backend.blocks.hubspot._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class HubSpotEngagementBlock(Block):
@@ -42,7 +42,7 @@ class HubSpotEngagementBlock(Block):
output_schema=HubSpotEngagementBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com"
@@ -66,9 +66,7 @@ class HubSpotEngagementBlock(Block):
}
}
response = await Requests().post(
email_url, headers=headers, json=email_data
)
response = requests.post(email_url, headers=headers, json=email_data)
result = response.json()
yield "result", result
yield "status", "email_sent"
@@ -82,9 +80,7 @@ class HubSpotEngagementBlock(Block):
params = {"limit": 100, "after": from_date.isoformat()}
response = await Requests().get(
engagement_url, headers=headers, params=params
)
response = requests.get(engagement_url, headers=headers, params=params)
engagements = response.json()
# Process engagement metrics

View File

@@ -12,7 +12,7 @@ from backend.data.model import (
SchemaField,
)
from backend.integrations.providers import ProviderName
from backend.util.request import Requests
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
@@ -196,13 +196,13 @@ class IdeogramModelBlock(Block):
test_credentials=TEST_CREDENTIALS,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
seed = input_data.seed
# Step 1: Generate the image
result = await self.run_model(
result = self.run_model(
api_key=credentials.api_key,
model_name=input_data.ideogram_model_name.value,
prompt=input_data.prompt,
@@ -217,14 +217,14 @@ class IdeogramModelBlock(Block):
# Step 2: Upscale the image if requested
if input_data.upscale == UpscaleOption.AI_UPSCALE:
result = await self.upscale_image(
result = self.upscale_image(
api_key=credentials.api_key,
image_url=result,
)
yield "result", result
async def run_model(
def run_model(
self,
api_key: SecretStr,
model_name: str,
@@ -267,12 +267,12 @@ class IdeogramModelBlock(Block):
}
try:
response = await Requests().post(url, headers=headers, json=data)
response = requests.post(url, json=data, headers=headers)
return response.json()["data"][0]["url"]
except RequestException as e:
raise Exception(f"Failed to fetch image: {str(e)}")
async def upscale_image(self, api_key: SecretStr, image_url: str):
def upscale_image(self, api_key: SecretStr, image_url: str):
url = "https://api.ideogram.ai/upscale"
headers = {
"Api-Key": api_key.get_secret_value(),
@@ -280,22 +280,21 @@ class IdeogramModelBlock(Block):
try:
# Step 1: Download the image from the provided URL
response = await Requests().get(image_url)
image_content = response.content
image_response = requests.get(image_url)
# Step 2: Send the downloaded image to the upscale API
files = {
"image_file": ("image.png", image_content, "image/png"),
"image_file": ("image.png", image_response.content, "image/png"),
}
response = await Requests().post(
response = requests.post(
url,
headers=headers,
data={"image_request": "{}"},
files=files,
)
return (response.json())["data"][0]["url"]
return response.json()["data"][0]["url"]
except RequestException as e:
raise Exception(f"Failed to upscale image: {str(e)}")

View File

@@ -95,7 +95,7 @@ class AgentInputBlock(Block):
}
)
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
if input_data.value is not None:
yield "result", input_data.value
@@ -186,7 +186,7 @@ class AgentOutputBlock(Block):
static_output=True,
)
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
"""
Attempts to format the recorded_value using the fmt_string if provided.
If formatting fails or no fmt_string is given, returns the original recorded_value.
@@ -413,12 +413,6 @@ class AgentFileInputBlock(AgentInputBlock):
advanced=False,
title="Default Value",
)
base_64: bool = SchemaField(
description="Whether produce an output in base64 format (not recommended, you can pass the string path just fine accross blocks).",
default=False,
advanced=True,
title="Produce Base64 Output",
)
class Output(AgentInputBlock.Output):
result: str = SchemaField(description="File reference/path result.")
@@ -442,7 +436,7 @@ class AgentFileInputBlock(AgentInputBlock):
],
)
async def run(
def run(
self,
input_data: Input,
*,
@@ -452,11 +446,12 @@ class AgentFileInputBlock(AgentInputBlock):
if not input_data.value:
return
yield "result", await store_media_file(
file_path = store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.value,
return_content=input_data.base_64,
return_content=False,
)
yield "result", file_path
class AgentDropdownInputBlock(AgentInputBlock):

View File

@@ -53,7 +53,7 @@ class StepThroughItemsBlock(Block):
test_mock={},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
for data in [input_data.items, input_data.items_object, input_data.items_str]:
if not data:
continue

View File

@@ -5,7 +5,7 @@ from backend.blocks.jina._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class JinaChunkingBlock(Block):
@@ -35,7 +35,7 @@ class JinaChunkingBlock(Block):
output_schema=JinaChunkingBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://segment.jina.ai/"
@@ -55,7 +55,7 @@ class JinaChunkingBlock(Block):
"max_chunk_length": str(input_data.max_chunk_length),
}
response = await Requests().post(url, headers=headers, json=data)
response = requests.post(url, headers=headers, json=data)
result = response.json()
all_chunks.extend(result.get("chunks", []))

View File

@@ -5,7 +5,7 @@ from backend.blocks.jina._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
class JinaEmbeddingBlock(Block):
@@ -29,7 +29,7 @@ class JinaEmbeddingBlock(Block):
output_schema=JinaEmbeddingBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.jina.ai/v1/embeddings"
@@ -38,6 +38,6 @@ class JinaEmbeddingBlock(Block):
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
data = {"input": input_data.texts, "model": input_data.model}
response = await Requests().post(url, headers=headers, json=data)
response = requests.post(url, headers=headers, json=data)
embeddings = [e["embedding"] for e in response.json()["data"]]
yield "embeddings", embeddings

View File

@@ -1,5 +1,7 @@
from urllib.parse import quote
import requests
from backend.blocks.jina._auth import (
JinaCredentials,
JinaCredentialsField,
@@ -7,7 +9,6 @@ from backend.blocks.jina._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class FactCheckerBlock(Block):
@@ -34,7 +35,7 @@ class FactCheckerBlock(Block):
output_schema=FactCheckerBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
encoded_statement = quote(input_data.statement)
@@ -45,7 +46,8 @@ class FactCheckerBlock(Block):
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
response = await Requests().get(url, headers=headers)
response = requests.get(url, headers=headers)
response.raise_for_status()
data = response.json()
if "data" in data:

View File

@@ -39,7 +39,7 @@ class SearchTheWebBlock(Block, GetRequest):
test_mock={"get_request": lambda *args, **kwargs: "search content"},
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
# Encode the search query
@@ -51,7 +51,7 @@ class SearchTheWebBlock(Block, GetRequest):
# Prepend the Jina Search URL to the encoded query
jina_search_url = f"https://s.jina.ai/{encoded_query}"
results = await self.get_request(jina_search_url, headers=headers, json=False)
results = self.get_request(jina_search_url, headers=headers, json=False)
# Output the search results
yield "results", results
@@ -90,7 +90,7 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
)
async def run(
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
if input_data.raw_content:
@@ -103,5 +103,5 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
content = await self.get_request(url, json=False, headers=headers)
content = self.get_request(url, json=False, headers=headers)
yield "content", content

View File

@@ -48,7 +48,7 @@ class LinearClient:
raise_for_status=False,
)
async def _execute_graphql_request(
def _execute_graphql_request(
self, query: str, variables: dict | None = None
) -> Any:
"""
@@ -65,18 +65,19 @@ class LinearClient:
if variables:
payload["variables"] = variables
response = await self._requests.post(self.API_URL, json=payload)
response = self._requests.post(self.API_URL, json=payload)
if not response.ok:
try:
error_data = response.json()
error_message = error_data.get("errors", [{}])[0].get("message", "")
except json.JSONDecodeError:
error_message = response.text()
error_message = response.text
raise LinearAPIException(
f"Linear API request failed ({response.status}): {error_message}",
response.status,
f"Linear API request failed ({response.status_code}): {error_message}",
response.status_code,
)
response_data = response.json()
@@ -87,12 +88,12 @@ class LinearClient:
]
raise LinearAPIException(
f"Linear API returned errors: {', '.join(error_messages)}",
response.status,
response.status_code,
)
return response_data["data"]
async def query(self, query: str, variables: Optional[dict] = None) -> dict:
def query(self, query: str, variables: Optional[dict] = None) -> dict:
"""Executes a GraphQL query.
Args:
@@ -102,9 +103,9 @@ class LinearClient:
Returns:
The response data.
"""
return await self._execute_graphql_request(query, variables)
return self._execute_graphql_request(query, variables)
async def mutate(self, mutation: str, variables: Optional[dict] = None) -> dict:
def mutate(self, mutation: str, variables: Optional[dict] = None) -> dict:
"""Executes a GraphQL mutation.
Args:
@@ -114,11 +115,9 @@ class LinearClient:
Returns:
The response data.
"""
return await self._execute_graphql_request(mutation, variables)
return self._execute_graphql_request(mutation, variables)
async def try_create_comment(
self, issue_id: str, comment: str
) -> CreateCommentResponse:
def try_create_comment(self, issue_id: str, comment: str) -> CreateCommentResponse:
try:
mutation = """
mutation CommentCreate($input: CommentCreateInput!) {
@@ -139,13 +138,13 @@ class LinearClient:
}
}
added_comment = await self.mutate(mutation, variables)
added_comment = self.mutate(mutation, variables)
# Select the commentCreate field from the mutation response
return CreateCommentResponse(**added_comment["commentCreate"])
except LinearAPIException as e:
raise e
async def try_get_team_by_name(self, team_name: str) -> str:
def try_get_team_by_name(self, team_name: str) -> str:
try:
query = """
query GetTeamId($searchTerm: String!) {
@@ -168,12 +167,12 @@ class LinearClient:
"searchTerm": team_name,
}
team_id = await self.query(query, variables)
team_id = self.query(query, variables)
return team_id["teams"]["nodes"][0]["id"]
except LinearAPIException as e:
raise e
async def try_create_issue(
def try_create_issue(
self,
team_id: str,
title: str,
@@ -212,12 +211,12 @@ class LinearClient:
if priority:
variables["input"]["priority"] = priority
added_issue = await self.mutate(mutation, variables)
added_issue = self.mutate(mutation, variables)
return CreateIssueResponse(**added_issue["issueCreate"])
except LinearAPIException as e:
raise e
async def try_search_projects(self, term: str) -> list[Project]:
def try_search_projects(self, term: str) -> list[Project]:
try:
query = """
query SearchProjects($term: String!, $includeComments: Boolean!) {
@@ -239,14 +238,14 @@ class LinearClient:
"includeComments": True,
}
projects = await self.query(query, variables)
projects = self.query(query, variables)
return [
Project(**project) for project in projects["searchProjects"]["nodes"]
]
except LinearAPIException as e:
raise e
async def try_search_issues(self, term: str) -> list[Issue]:
def try_search_issues(self, term: str) -> list[Issue]:
try:
query = """
query SearchIssues($term: String!, $includeComments: Boolean!) {
@@ -267,7 +266,7 @@ class LinearClient:
"includeComments": True,
}
issues = await self.query(query, variables)
issues = self.query(query, variables)
return [Issue(**issue) for issue in issues["searchIssues"]["nodes"]]
except LinearAPIException as e:
raise e

View File

@@ -54,21 +54,21 @@ class LinearCreateCommentBlock(Block):
)
@staticmethod
async def create_comment(
def create_comment(
credentials: LinearCredentials, issue_id: str, comment: str
) -> tuple[str, str]:
client = LinearClient(credentials=credentials)
response: CreateCommentResponse = await client.try_create_comment(
response: CreateCommentResponse = client.try_create_comment(
issue_id=issue_id, comment=comment
)
return response.comment.id, response.comment.body
async def run(
def run(
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
) -> BlockOutput:
"""Execute the comment creation"""
try:
comment_id, comment_body = await self.create_comment(
comment_id, comment_body = self.create_comment(
credentials=credentials,
issue_id=input_data.issue_id,
comment=input_data.comment,

View File

@@ -67,7 +67,7 @@ class LinearCreateIssueBlock(Block):
)
@staticmethod
async def create_issue(
def create_issue(
credentials: LinearCredentials,
team_name: str,
title: str,
@@ -76,15 +76,15 @@ class LinearCreateIssueBlock(Block):
project_name: str | None = None,
) -> tuple[str, str]:
client = LinearClient(credentials=credentials)
team_id = await client.try_get_team_by_name(team_name=team_name)
team_id = client.try_get_team_by_name(team_name=team_name)
project_id: str | None = None
if project_name:
projects = await client.try_search_projects(term=project_name)
projects = client.try_search_projects(term=project_name)
if projects:
project_id = projects[0].id
else:
raise LinearAPIException("Project not found", status_code=404)
response: CreateIssueResponse = await client.try_create_issue(
response: CreateIssueResponse = client.try_create_issue(
team_id=team_id,
title=title,
description=description,
@@ -93,12 +93,12 @@ class LinearCreateIssueBlock(Block):
)
return response.issue.identifier, response.issue.title
async def run(
def run(
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
) -> BlockOutput:
"""Execute the issue creation"""
try:
issue_id, issue_title = await self.create_issue(
issue_id, issue_title = self.create_issue(
credentials=credentials,
team_name=input_data.team_name,
title=input_data.title,
@@ -168,22 +168,20 @@ class LinearSearchIssuesBlock(Block):
)
@staticmethod
async def search_issues(
def search_issues(
credentials: LinearCredentials,
term: str,
) -> list[Issue]:
client = LinearClient(credentials=credentials)
response: list[Issue] = await client.try_search_issues(term=term)
response: list[Issue] = client.try_search_issues(term=term)
return response
async def run(
def run(
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
) -> BlockOutput:
"""Execute the issue search"""
try:
issues = await self.search_issues(
credentials=credentials, term=input_data.term
)
issues = self.search_issues(credentials=credentials, term=input_data.term)
yield "issues", issues
except LinearAPIException as e:
yield "error", str(e)

View File

@@ -69,20 +69,20 @@ class LinearSearchProjectsBlock(Block):
)
@staticmethod
async def search_projects(
def search_projects(
credentials: LinearCredentials,
term: str,
) -> list[Project]:
client = LinearClient(credentials=credentials)
response: list[Project] = await client.try_search_projects(term=term)
response: list[Project] = client.try_search_projects(term=term)
return response
async def run(
def run(
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
) -> BlockOutput:
"""Execute the project search"""
try:
projects = await self.search_projects(
projects = self.search_projects(
credentials=credentials,
term=input_data.term,
)

View File

@@ -3,13 +3,14 @@ import logging
from abc import ABC
from enum import Enum, EnumMeta
from json import JSONDecodeError
from types import MappingProxyType
from typing import Any, Iterable, List, Literal, NamedTuple, Optional
import anthropic
import ollama
import openai
from anthropic.types import ToolParam
from groq import AsyncGroq
from groq import Groq
from pydantic import BaseModel, SecretStr
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
@@ -22,26 +23,23 @@ from backend.data.model import (
)
from backend.integrations.providers import ProviderName
from backend.util import json
from backend.util.logging import TruncatedLogger
from backend.util.prompt import compress_prompt, estimate_token_count
from backend.util.settings import BehaveAs, Settings
from backend.util.text import TextFormatter
logger = TruncatedLogger(logging.getLogger(__name__), "[LLM-Block]")
logger = logging.getLogger(__name__)
fmt = TextFormatter()
LLMProviderName = Literal[
ProviderName.AIML_API,
ProviderName.ANTHROPIC,
ProviderName.GROQ,
ProviderName.OLLAMA,
ProviderName.OPENAI,
ProviderName.OPEN_ROUTER,
ProviderName.LLAMA_API,
]
AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]]
TEST_CREDENTIALS = APIKeyCredentials(
id="769f6af7-820b-4d5d-9b7a-ab82bbc165f",
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
provider="openai",
api_key=SecretStr("mock-openai-api-key"),
title="Mock OpenAI API key",
@@ -72,7 +70,20 @@ class ModelMetadata(NamedTuple):
class LlmModelMeta(EnumMeta):
pass
@property
def __members__(self) -> MappingProxyType:
if Settings().config.behave_as == BehaveAs.LOCAL:
members = super().__members__
return MappingProxyType(members)
else:
removed_providers = ["ollama"]
existing_members = super().__members__
members = {
name: member
for name, member in existing_members.items()
if LlmModel[name].provider not in removed_providers
}
return MappingProxyType(members)
class LlmModel(str, Enum, metaclass=LlmModelMeta):
@@ -88,18 +99,10 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
GPT4_TURBO = "gpt-4-turbo"
GPT3_5_TURBO = "gpt-3.5-turbo"
# Anthropic models
CLAUDE_4_OPUS = "claude-opus-4-20250514"
CLAUDE_4_SONNET = "claude-sonnet-4-20250514"
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest"
CLAUDE_3_5_HAIKU = "claude-3-5-haiku-latest"
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
# AI/ML API models
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
AIML_API_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct"
AIML_API_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
AIML_API_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
AIML_API_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo"
# Groq models
GEMMA2_9B = "gemma2-9b-it"
LLAMA3_3_70B = "llama-3.3-70b-versatile"
@@ -127,9 +130,6 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE = (
"perplexity/llama-3.1-sonar-large-128k-online"
)
PERPLEXITY_SONAR = "perplexity/sonar"
PERPLEXITY_SONAR_PRO = "perplexity/sonar-pro"
PERPLEXITY_SONAR_DEEP_RESEARCH = "perplexity/sonar-deep-research"
QWEN_QWQ_32B_PREVIEW = "qwen/qwq-32b-preview"
NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B = "nousresearch/hermes-3-llama-3.1-405b"
NOUSRESEARCH_HERMES_3_LLAMA_3_1_70B = "nousresearch/hermes-3-llama-3.1-70b"
@@ -140,11 +140,6 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b"
META_LLAMA_4_SCOUT = "meta-llama/llama-4-scout"
META_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick"
# Llama API models
LLAMA_API_LLAMA_4_SCOUT = "Llama-4-Scout-17B-16E-Instruct-FP8"
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
LLAMA_API_LLAMA3_3_8B = "Llama-3.3-8B-Instruct"
LLAMA_API_LLAMA3_3_70B = "Llama-3.3-70B-Instruct"
@property
def metadata(self) -> ModelMetadata:
@@ -182,12 +177,6 @@ MODEL_METADATA = {
), # gpt-4-turbo-2024-04-09
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, 4096), # gpt-3.5-turbo-0125
# https://docs.anthropic.com/en/docs/about-claude/models
LlmModel.CLAUDE_4_OPUS: ModelMetadata(
"anthropic", 200000, 8192
), # claude-4-opus-20250514
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
"anthropic", 200000, 8192
), # claude-4-sonnet-20250514
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
"anthropic", 200000, 8192
), # claude-3-7-sonnet-20250219
@@ -200,12 +189,6 @@ MODEL_METADATA = {
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
"anthropic", 200000, 4096
), # claude-3-haiku-20240307
# https://docs.aimlapi.com/api-overview/model-database/text-models
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000, 8000),
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000, 40000),
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000, None),
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000, 2000),
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000, None),
# https://console.groq.com/docs/models
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, None),
LlmModel.LLAMA3_3_70B: ModelMetadata("groq", 128000, 32768),
@@ -232,13 +215,6 @@ MODEL_METADATA = {
LlmModel.PERPLEXITY_LLAMA_3_1_SONAR_LARGE_128K_ONLINE: ModelMetadata(
"open_router", 127072, 127072
),
LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 127000),
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000),
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
"open_router",
128000,
128000,
),
LlmModel.QWEN_QWQ_32B_PREVIEW: ModelMetadata("open_router", 32768, 32768),
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
"open_router", 131000, 4096
@@ -253,11 +229,6 @@ MODEL_METADATA = {
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4096, 4096),
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072),
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000),
# Llama API models
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata("llama_api", 128000, 4028),
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028),
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028),
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028),
}
for model in LlmModel:
@@ -283,7 +254,6 @@ class LLMResponse(BaseModel):
tool_calls: Optional[List[ToolContentBlock]] | None
prompt_tokens: int
completion_tokens: int
reasoning: Optional[str] = None
def convert_openai_tool_fmt_to_anthropic(
@@ -318,47 +288,7 @@ def convert_openai_tool_fmt_to_anthropic(
return anthropic_tools
def extract_openai_reasoning(response) -> str | None:
"""Extract reasoning from OpenAI-compatible response if available."""
"""Note: This will likely not working since the reasoning is not present in another Response API"""
reasoning = None
choice = response.choices[0]
if hasattr(choice, "reasoning") and getattr(choice, "reasoning", None):
reasoning = str(getattr(choice, "reasoning"))
elif hasattr(response, "reasoning") and getattr(response, "reasoning", None):
reasoning = str(getattr(response, "reasoning"))
elif hasattr(choice.message, "reasoning") and getattr(
choice.message, "reasoning", None
):
reasoning = str(getattr(choice.message, "reasoning"))
return reasoning
def extract_openai_tool_calls(response) -> list[ToolContentBlock] | None:
"""Extract tool calls from OpenAI-compatible response."""
if response.choices[0].message.tool_calls:
return [
ToolContentBlock(
id=tool.id,
type=tool.type,
function=ToolCall(
name=tool.function.name,
arguments=tool.function.arguments,
),
)
for tool in response.choices[0].message.tool_calls
]
return None
def get_parallel_tool_calls_param(llm_model: LlmModel, parallel_tool_calls):
"""Get the appropriate parallel_tool_calls parameter for OpenAI-compatible APIs."""
if llm_model.startswith("o") or parallel_tool_calls is None:
return openai.NOT_GIVEN
return parallel_tool_calls
async def llm_call(
def llm_call(
credentials: APIKeyCredentials,
llm_model: LlmModel,
prompt: list[dict],
@@ -366,8 +296,7 @@ async def llm_call(
max_tokens: int | None,
tools: list[dict] | None = None,
ollama_host: str = "localhost:11434",
parallel_tool_calls=None,
compress_prompt_to_fit: bool = True,
parallel_tool_calls: bool | None = None,
) -> LLMResponse:
"""
Make a call to a language model.
@@ -390,45 +319,48 @@ async def llm_call(
- completion_tokens: The number of tokens used in the completion.
"""
provider = llm_model.metadata.provider
context_window = llm_model.context_window
if compress_prompt_to_fit:
prompt = compress_prompt(
messages=prompt,
target_tokens=llm_model.context_window // 2,
lossy_ok=True,
)
# Calculate available tokens based on context window and input length
estimated_input_tokens = estimate_token_count(prompt)
model_max_output = llm_model.max_output_tokens or int(2**15)
user_max = max_tokens or model_max_output
available_tokens = max(context_window - estimated_input_tokens, 0)
max_tokens = max(min(available_tokens, model_max_output, user_max), 1)
max_tokens = max_tokens or llm_model.max_output_tokens or 4096
if provider == "openai":
tools_param = tools if tools else openai.NOT_GIVEN
oai_client = openai.AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
oai_client = openai.OpenAI(api_key=credentials.api_key.get_secret_value())
response_format = None
parallel_tool_calls = get_parallel_tool_calls_param(
llm_model, parallel_tool_calls
)
if json_format:
if llm_model in [LlmModel.O1_MINI, LlmModel.O1_PREVIEW]:
sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
prompt = [
{"role": "user", "content": "\n".join(sys_messages)},
{"role": "user", "content": "\n".join(usr_messages)},
]
elif json_format:
response_format = {"type": "json_object"}
response = await oai_client.chat.completions.create(
response = oai_client.chat.completions.create(
model=llm_model.value,
messages=prompt, # type: ignore
response_format=response_format, # type: ignore
max_completion_tokens=max_tokens,
tools=tools_param, # type: ignore
parallel_tool_calls=parallel_tool_calls,
parallel_tool_calls=(
openai.NOT_GIVEN if parallel_tool_calls is None else parallel_tool_calls
),
)
tool_calls = extract_openai_tool_calls(response)
reasoning = extract_openai_reasoning(response)
if response.choices[0].message.tool_calls:
tool_calls = [
ToolContentBlock(
id=tool.id,
type=tool.type,
function=ToolCall(
name=tool.function.name,
arguments=tool.function.arguments,
),
)
for tool in response.choices[0].message.tool_calls
]
else:
tool_calls = None
return LLMResponse(
raw_response=response.choices[0].message,
@@ -437,7 +369,6 @@ async def llm_call(
tool_calls=tool_calls,
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
completion_tokens=response.usage.completion_tokens if response.usage else 0,
reasoning=reasoning,
)
elif provider == "anthropic":
@@ -461,11 +392,9 @@ async def llm_call(
messages.append({"role": p["role"], "content": p["content"]})
last_role = p["role"]
client = anthropic.AsyncAnthropic(
api_key=credentials.api_key.get_secret_value()
)
client = anthropic.Anthropic(api_key=credentials.api_key.get_secret_value())
try:
resp = await client.messages.create(
resp = client.messages.create(
model=llm_model.value,
system=sysprompt,
messages=messages,
@@ -496,15 +425,9 @@ async def llm_call(
if not tool_calls and resp.stop_reason == "tool_use":
logger.warning(
f"Tool use stop reason but no tool calls found in content. {resp}"
"Tool use stop reason but no tool calls found in content. %s", resp
)
reasoning = None
for content_block in resp.content:
if hasattr(content_block, "type") and content_block.type == "thinking":
reasoning = content_block.thinking
break
return LLMResponse(
raw_response=resp,
prompt=prompt,
@@ -516,7 +439,6 @@ async def llm_call(
tool_calls=tool_calls,
prompt_tokens=resp.usage.input_tokens,
completion_tokens=resp.usage.output_tokens,
reasoning=reasoning,
)
except anthropic.APIError as e:
error_message = f"Anthropic API error: {str(e)}"
@@ -526,9 +448,9 @@ async def llm_call(
if tools:
raise ValueError("Groq does not support tools.")
client = AsyncGroq(api_key=credentials.api_key.get_secret_value())
client = Groq(api_key=credentials.api_key.get_secret_value())
response_format = {"type": "json_object"} if json_format else None
response = await client.chat.completions.create(
response = client.chat.completions.create(
model=llm_model.value,
messages=prompt, # type: ignore
response_format=response_format, # type: ignore
@@ -541,20 +463,18 @@ async def llm_call(
tool_calls=None,
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
completion_tokens=response.usage.completion_tokens if response.usage else 0,
reasoning=None,
)
elif provider == "ollama":
if tools:
raise ValueError("Ollama does not support tools.")
client = ollama.AsyncClient(host=ollama_host)
client = ollama.Client(host=ollama_host)
sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
response = await client.generate(
response = client.generate(
model=llm_model.value,
prompt=f"{sys_messages}\n\n{usr_messages}",
stream=False,
options={"num_ctx": max_tokens},
)
return LLMResponse(
raw_response=response.get("response") or "",
@@ -563,20 +483,15 @@ async def llm_call(
tool_calls=None,
prompt_tokens=response.get("prompt_eval_count") or 0,
completion_tokens=response.get("eval_count") or 0,
reasoning=None,
)
elif provider == "open_router":
tools_param = tools if tools else openai.NOT_GIVEN
client = openai.AsyncOpenAI(
client = openai.OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=credentials.api_key.get_secret_value(),
)
parallel_tool_calls_param = get_parallel_tool_calls_param(
llm_model, parallel_tool_calls
)
response = await client.chat.completions.create(
response = client.chat.completions.create(
extra_headers={
"HTTP-Referer": "https://agpt.co",
"X-Title": "AutoGPT",
@@ -585,7 +500,9 @@ async def llm_call(
messages=prompt, # type: ignore
max_tokens=max_tokens,
tools=tools_param, # type: ignore
parallel_tool_calls=parallel_tool_calls_param,
parallel_tool_calls=(
openai.NOT_GIVEN if parallel_tool_calls is None else parallel_tool_calls
),
)
# If there's no response, raise an error
@@ -595,8 +512,19 @@ async def llm_call(
else:
raise ValueError("No response from OpenRouter.")
tool_calls = extract_openai_tool_calls(response)
reasoning = extract_openai_reasoning(response)
if response.choices[0].message.tool_calls:
tool_calls = [
ToolContentBlock(
id=tool.id,
type=tool.type,
function=ToolCall(
name=tool.function.name, arguments=tool.function.arguments
),
)
for tool in response.choices[0].message.tool_calls
]
else:
tool_calls = None
return LLMResponse(
raw_response=response.choices[0].message,
@@ -605,73 +533,6 @@ async def llm_call(
tool_calls=tool_calls,
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
completion_tokens=response.usage.completion_tokens if response.usage else 0,
reasoning=reasoning,
)
elif provider == "llama_api":
tools_param = tools if tools else openai.NOT_GIVEN
client = openai.AsyncOpenAI(
base_url="https://api.llama.com/compat/v1/",
api_key=credentials.api_key.get_secret_value(),
)
parallel_tool_calls_param = get_parallel_tool_calls_param(
llm_model, parallel_tool_calls
)
response = await client.chat.completions.create(
extra_headers={
"HTTP-Referer": "https://agpt.co",
"X-Title": "AutoGPT",
},
model=llm_model.value,
messages=prompt, # type: ignore
max_tokens=max_tokens,
tools=tools_param, # type: ignore
parallel_tool_calls=parallel_tool_calls_param,
)
# If there's no response, raise an error
if not response.choices:
if response:
raise ValueError(f"Llama API error: {response}")
else:
raise ValueError("No response from Llama API.")
tool_calls = extract_openai_tool_calls(response)
reasoning = extract_openai_reasoning(response)
return LLMResponse(
raw_response=response.choices[0].message,
prompt=prompt,
response=response.choices[0].message.content or "",
tool_calls=tool_calls,
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
completion_tokens=response.usage.completion_tokens if response.usage else 0,
reasoning=reasoning,
)
elif provider == "aiml_api":
client = openai.OpenAI(
base_url="https://api.aimlapi.com/v2",
api_key=credentials.api_key.get_secret_value(),
default_headers={"X-Project": "AutoGPT"},
)
completion = client.chat.completions.create(
model=llm_model.value,
messages=prompt, # type: ignore
max_tokens=max_tokens,
)
return LLMResponse(
raw_response=completion.choices[0].message,
prompt=prompt,
response=completion.choices[0].message.content or "",
tool_calls=None,
prompt_tokens=completion.usage.prompt_tokens if completion.usage else 0,
completion_tokens=(
completion.usage.completion_tokens if completion.usage else 0
),
reasoning=None,
)
else:
raise ValueError(f"Unsupported LLM provider: {provider}")
@@ -697,11 +558,6 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
description="Expected format of the response. If provided, the response will be validated against this format. "
"The keys should be the expected fields in the response, and the values should be the description of the field.",
)
list_result: bool = SchemaField(
title="List Result",
default=False,
description="Whether the response should be a list of objects in the expected format.",
)
model: LlmModel = SchemaField(
title="LLM Model",
default=LlmModel.GPT4O,
@@ -733,11 +589,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
default=None,
description="The maximum number of tokens to generate in the chat completion.",
)
compress_prompt_to_fit: bool = SchemaField(
advanced=True,
default=True,
description="Whether to compress the prompt to fit within the model's context window.",
)
ollama_host: str = SchemaField(
advanced=True,
default="localhost:11434",
@@ -745,7 +597,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
)
class Output(BlockSchema):
response: dict[str, Any] | list[dict[str, Any]] = SchemaField(
response: dict[str, Any] = SchemaField(
description="The response object generated by the language model."
)
prompt: list = SchemaField(description="The prompt sent to the language model.")
@@ -785,18 +637,16 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
tool_calls=None,
prompt_tokens=0,
completion_tokens=0,
reasoning=None,
)
},
)
async def llm_call(
def llm_call(
self,
credentials: APIKeyCredentials,
llm_model: LlmModel,
prompt: list[dict],
json_format: bool,
compress_prompt_to_fit: bool,
max_tokens: int | None,
tools: list[dict] | None = None,
ollama_host: str = "localhost:11434",
@@ -806,7 +656,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
so that it can be mocked withing the block testing framework.
"""
self.prompt = prompt
return await llm_call(
return llm_call(
credentials=credentials,
llm_model=llm_model,
prompt=prompt,
@@ -814,10 +664,9 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
max_tokens=max_tokens,
tools=tools,
ollama_host=ollama_host,
compress_prompt_to_fit=compress_prompt_to_fit,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
logger.debug(f"Calling LLM with input data: {input_data}")
@@ -839,22 +688,13 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
expected_format = [
f'"{k}": "{v}"' for k, v in input_data.expected_format.items()
]
if input_data.list_result:
format_prompt = (
f'"results": [\n {{\n {", ".join(expected_format)}\n }}\n]'
)
else:
format_prompt = "\n ".join(expected_format)
format_prompt = ",\n ".join(expected_format)
sys_prompt = trim_prompt(
f"""
|Reply strictly only in the following JSON format:
|{{
| {format_prompt}
|}}
|
|Ensure the response is valid JSON. Do not include any additional text outside of the JSON.
|If you cannot provide all the keys, provide an empty string for the values you cannot answer.
"""
)
prompt.append({"role": "system", "content": sys_prompt})
@@ -862,28 +702,28 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
if input_data.prompt:
prompt.append({"role": "user", "content": input_data.prompt})
def validate_response(parsed: object) -> str | None:
def parse_response(resp: str) -> tuple[dict[str, Any], str | None]:
try:
parsed = json.loads(resp)
if not isinstance(parsed, dict):
return f"Expected a dictionary, but got {type(parsed)}"
return {}, f"Expected a dictionary, but got {type(parsed)}"
miss_keys = set(input_data.expected_format.keys()) - set(parsed.keys())
if miss_keys:
return f"Missing keys: {miss_keys}"
return None
return parsed, f"Missing keys: {miss_keys}"
return parsed, None
except JSONDecodeError as e:
return f"JSON decode error: {e}"
return {}, f"JSON decode error: {e}"
logger.debug(f"LLM request: {prompt}")
logger.info(f"LLM request: {prompt}")
retry_prompt = ""
llm_model = input_data.model
for retry_count in range(input_data.retry):
try:
llm_response = await self.llm_call(
llm_response = self.llm_call(
credentials=credentials,
llm_model=llm_model,
prompt=prompt,
compress_prompt_to_fit=input_data.compress_prompt_to_fit,
json_format=bool(input_data.expected_format),
ollama_host=input_data.ollama_host,
max_tokens=input_data.max_tokens,
@@ -895,32 +735,21 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
output_token_count=llm_response.completion_tokens,
)
)
logger.debug(f"LLM attempt-{retry_count} response: {response_text}")
logger.info(f"LLM attempt-{retry_count} response: {response_text}")
if input_data.expected_format:
response_obj = json.loads(response_text)
if input_data.list_result and isinstance(response_obj, dict):
if "results" in response_obj:
response_obj = response_obj.get("results", [])
elif len(response_obj) == 1:
response_obj = list(response_obj.values())
response_error = "\n".join(
[
validation_error
for response_item in (
response_obj
if isinstance(response_obj, list)
else [response_obj]
parsed_dict, parsed_error = parse_response(response_text)
if not parsed_error:
yield "response", {
k: (
json.loads(v)
if isinstance(v, str)
and v.startswith("[")
and v.endswith("]")
else (", ".join(v) if isinstance(v, list) else v)
)
if (validation_error := validate_response(response_item))
]
)
if not response_error:
yield "response", response_obj
for k, v in parsed_dict.items()
}
yield "prompt", self.prompt
return
else:
@@ -937,23 +766,13 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
|And this is the error:
|--
|{response_error}
|{parsed_error}
|--
"""
)
prompt.append({"role": "user", "content": retry_prompt})
except Exception as e:
logger.exception(f"Error calling LLM: {e}")
if (
"maximum context length" in str(e).lower()
or "token limit" in str(e).lower()
):
if input_data.max_tokens is None:
input_data.max_tokens = llm_model.max_output_tokens or 4096
input_data.max_tokens = int(input_data.max_tokens * 0.85)
logger.debug(
f"Reducing max_tokens to {input_data.max_tokens} for next attempt"
)
retry_prompt = f"Error calling LLM: {e}"
finally:
self.merge_stats(
@@ -1031,17 +850,17 @@ class AITextGeneratorBlock(AIBlockBase):
test_mock={"llm_call": lambda *args, **kwargs: "Response text"},
)
async def llm_call(
def llm_call(
self,
input_data: AIStructuredResponseGeneratorBlock.Input,
credentials: APIKeyCredentials,
) -> dict:
) -> str:
block = AIStructuredResponseGeneratorBlock()
response = await block.run_once(input_data, "response", credentials=credentials)
response = block.run_once(input_data, "response", credentials=credentials)
self.merge_llm_stats(block)
return response["response"]
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
object_input_data = AIStructuredResponseGeneratorBlock.Input(
@@ -1051,8 +870,7 @@ class AITextGeneratorBlock(AIBlockBase):
},
expected_format={},
)
response = await self.llm_call(object_input_data, credentials)
yield "response", response
yield "response", self.llm_call(object_input_data, credentials)
yield "prompt", self.prompt
@@ -1134,27 +952,23 @@ class AITextSummarizerBlock(AIBlockBase):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
async for output_name, output_data in self._run(input_data, credentials):
yield output_name, output_data
for output in self._run(input_data, credentials):
yield output
async def _run(
self, input_data: Input, credentials: APIKeyCredentials
) -> BlockOutput:
def _run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput:
chunks = self._split_text(
input_data.text, input_data.max_tokens, input_data.chunk_overlap
)
summaries = []
for chunk in chunks:
chunk_summary = await self._summarize_chunk(chunk, input_data, credentials)
chunk_summary = self._summarize_chunk(chunk, input_data, credentials)
summaries.append(chunk_summary)
final_summary = await self._combine_summaries(
summaries, input_data, credentials
)
final_summary = self._combine_summaries(summaries, input_data, credentials)
yield "summary", final_summary
yield "prompt", self.prompt
@@ -1170,22 +984,22 @@ class AITextSummarizerBlock(AIBlockBase):
return chunks
async def llm_call(
def llm_call(
self,
input_data: AIStructuredResponseGeneratorBlock.Input,
credentials: APIKeyCredentials,
) -> dict:
block = AIStructuredResponseGeneratorBlock()
response = await block.run_once(input_data, "response", credentials=credentials)
response = block.run_once(input_data, "response", credentials=credentials)
self.merge_llm_stats(block)
return response
async def _summarize_chunk(
def _summarize_chunk(
self, chunk: str, input_data: Input, credentials: APIKeyCredentials
) -> str:
prompt = f"Summarize the following text in a {input_data.style} form. Focus your summary on the topic of `{input_data.focus}` if present, otherwise just provide a general summary:\n\n```{chunk}```"
llm_response = await self.llm_call(
llm_response = self.llm_call(
AIStructuredResponseGeneratorBlock.Input(
prompt=prompt,
credentials=input_data.credentials,
@@ -1197,7 +1011,7 @@ class AITextSummarizerBlock(AIBlockBase):
return llm_response["summary"]
async def _combine_summaries(
def _combine_summaries(
self, summaries: list[str], input_data: Input, credentials: APIKeyCredentials
) -> str:
combined_text = "\n\n".join(summaries)
@@ -1205,7 +1019,7 @@ class AITextSummarizerBlock(AIBlockBase):
if len(combined_text.split()) <= input_data.max_tokens:
prompt = f"Provide a final summary of the following section summaries in a {input_data.style} form, focus your summary on the topic of `{input_data.focus}` if present:\n\n ```{combined_text}```\n\n Just respond with the final_summary in the format specified."
llm_response = await self.llm_call(
llm_response = self.llm_call(
AIStructuredResponseGeneratorBlock.Input(
prompt=prompt,
credentials=input_data.credentials,
@@ -1220,8 +1034,7 @@ class AITextSummarizerBlock(AIBlockBase):
return llm_response["final_summary"]
else:
# If combined summaries are still too long, recursively summarize
block = AITextSummarizerBlock()
return await block.run_once(
return self._run(
AITextSummarizerBlock.Input(
text=combined_text,
credentials=input_data.credentials,
@@ -1229,9 +1042,10 @@ class AITextSummarizerBlock(AIBlockBase):
max_tokens=input_data.max_tokens,
chunk_overlap=input_data.chunk_overlap,
),
"summary",
credentials=credentials,
)
).send(None)[
1
] # Get the first yielded value
class AIConversationBlock(AIBlockBase):
@@ -1302,20 +1116,20 @@ class AIConversationBlock(AIBlockBase):
},
)
async def llm_call(
def llm_call(
self,
input_data: AIStructuredResponseGeneratorBlock.Input,
credentials: APIKeyCredentials,
) -> dict:
) -> str:
block = AIStructuredResponseGeneratorBlock()
response = await block.run_once(input_data, "response", credentials=credentials)
response = block.run_once(input_data, "response", credentials=credentials)
self.merge_llm_stats(block)
return response
return response["response"]
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
response = await self.llm_call(
response = self.llm_call(
AIStructuredResponseGeneratorBlock.Input(
prompt=input_data.prompt,
credentials=input_data.credentials,
@@ -1327,6 +1141,7 @@ class AIConversationBlock(AIBlockBase):
),
credentials=credentials,
)
yield "response", response
yield "prompt", self.prompt
@@ -1420,15 +1235,13 @@ class AIListGeneratorBlock(AIBlockBase):
},
)
async def llm_call(
def llm_call(
self,
input_data: AIStructuredResponseGeneratorBlock.Input,
credentials: APIKeyCredentials,
) -> dict[str, str]:
llm_block = AIStructuredResponseGeneratorBlock()
response = await llm_block.run_once(
input_data, "response", credentials=credentials
)
response = llm_block.run_once(input_data, "response", credentials=credentials)
self.merge_llm_stats(llm_block)
return response
@@ -1451,7 +1264,7 @@ class AIListGeneratorBlock(AIBlockBase):
logger.error(f"Failed to convert string to list: {e}")
raise ValueError("Invalid list format. Could not convert to list.")
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
logger.debug(f"Starting AIListGeneratorBlock.run with input data: {input_data}")
@@ -1517,7 +1330,7 @@ class AIListGeneratorBlock(AIBlockBase):
for attempt in range(input_data.max_retries):
try:
logger.debug("Calling LLM")
llm_response = await self.llm_call(
llm_response = self.llm_call(
AIStructuredResponseGeneratorBlock.Input(
sys_prompt=sys_prompt,
prompt=prompt,

View File

@@ -52,7 +52,7 @@ class CalculatorBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
operation = input_data.operation
a = input_data.a
b = input_data.b
@@ -107,7 +107,7 @@ class CountItemsBlock(Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
collection = input_data.collection
try:

View File

@@ -39,7 +39,7 @@ class MediaDurationBlock(Block):
output_schema=MediaDurationBlock.Output,
)
async def run(
def run(
self,
input_data: Input,
*,
@@ -47,7 +47,7 @@ class MediaDurationBlock(Block):
**kwargs,
) -> BlockOutput:
# 1) Store the input media locally
local_media_path = await store_media_file(
local_media_path = store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.media_in,
return_content=False,
@@ -105,7 +105,7 @@ class LoopVideoBlock(Block):
output_schema=LoopVideoBlock.Output,
)
async def run(
def run(
self,
input_data: Input,
*,
@@ -114,7 +114,7 @@ class LoopVideoBlock(Block):
**kwargs,
) -> BlockOutput:
# 1) Store the input video locally
local_video_path = await store_media_file(
local_video_path = store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.video_in,
return_content=False,
@@ -146,7 +146,7 @@ class LoopVideoBlock(Block):
looped_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
# Return as data URI
video_out = await store_media_file(
video_out = store_media_file(
graph_exec_id=graph_exec_id,
file=output_filename,
return_content=input_data.output_return_type == "data_uri",
@@ -194,7 +194,7 @@ class AddAudioToVideoBlock(Block):
output_schema=AddAudioToVideoBlock.Output,
)
async def run(
def run(
self,
input_data: Input,
*,
@@ -203,12 +203,12 @@ class AddAudioToVideoBlock(Block):
**kwargs,
) -> BlockOutput:
# 1) Store the inputs locally
local_video_path = await store_media_file(
local_video_path = store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.video_in,
return_content=False,
)
local_audio_path = await store_media_file(
local_audio_path = store_media_file(
graph_exec_id=graph_exec_id,
file=input_data.audio_in,
return_content=False,
@@ -236,7 +236,7 @@ class AddAudioToVideoBlock(Block):
final_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
# 5) Return either path or data URI
video_out = await store_media_file(
video_out = store_media_file(
graph_exec_id=graph_exec_id,
file=output_filename,
return_content=input_data.output_return_type == "data_uri",

View File

@@ -13,7 +13,7 @@ from backend.data.model import (
SecretField,
)
from backend.integrations.providers import ProviderName
from backend.util.request import Requests
from backend.util.request import requests
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
@@ -130,7 +130,7 @@ class PublishToMediumBlock(Block):
test_credentials=TEST_CREDENTIALS,
)
async def create_post(
def create_post(
self,
api_key: SecretStr,
author_id,
@@ -160,17 +160,18 @@ class PublishToMediumBlock(Block):
"notifyFollowers": notify_followers,
}
response = await Requests().post(
response = requests.post(
f"https://api.medium.com/v1/users/{author_id}/posts",
headers=headers,
json=data,
)
return response.json()
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
response = await self.create_post(
response = self.create_post(
credentials.api_key,
input_data.author_id.get_secret_value(),
input_data.title,

View File

@@ -13,7 +13,7 @@ from backend.data.model import (
from backend.integrations.providers import ProviderName
TEST_CREDENTIALS = APIKeyCredentials(
id="8cc8b2c5-d3e4-4b1c-84ad-e1e9fe2a0122",
id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
provider="mem0",
api_key=SecretStr("mock-mem0-api-key"),
title="Mock Mem0 API key",
@@ -67,19 +67,17 @@ class AddMemoryBlock(Block, Mem0Base):
metadata: dict[str, Any] = SchemaField(
description="Optional metadata for the memory", default_factory=dict
)
limit_memory_to_run: bool = SchemaField(
description="Limit the memory to the run", default=False
)
limit_memory_to_agent: bool = SchemaField(
description="Limit the memory to the agent", default=True
description="Limit the memory to the agent", default=False
)
class Output(BlockSchema):
action: str = SchemaField(description="Action of the operation")
memory: str = SchemaField(description="Memory created")
results: list[dict[str, str]] = SchemaField(
description="List of all results from the operation"
)
error: str = SchemaField(description="Error message if operation fails")
def __init__(self):
@@ -106,19 +104,12 @@ class AddMemoryBlock(Block, Mem0Base):
"credentials": TEST_CREDENTIALS_INPUT,
},
],
test_output=[
("results", [{"event": "CREATED", "memory": "test memory"}]),
("action", "CREATED"),
("memory", "test memory"),
("results", [{"event": "CREATED", "memory": "test memory"}]),
("action", "CREATED"),
("memory", "test memory"),
],
test_output=[("action", "NO_CHANGE"), ("action", "NO_CHANGE")],
test_credentials=TEST_CREDENTIALS,
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
)
async def run(
def run(
self,
input_data: Input,
*,
@@ -126,17 +117,15 @@ class AddMemoryBlock(Block, Mem0Base):
user_id: str,
graph_id: str,
graph_exec_id: str,
**kwargs,
**kwargs
) -> BlockOutput:
try:
client = self._get_client(credentials)
if isinstance(input_data.content, Conversation):
messages = input_data.content.messages
elif isinstance(input_data.content, Content):
messages = [{"role": "user", "content": input_data.content.content}]
else:
messages = [{"role": "user", "content": str(input_data.content)}]
messages = [{"role": "user", "content": input_data.content}]
params = {
"user_id": user_id,
@@ -155,18 +144,15 @@ class AddMemoryBlock(Block, Mem0Base):
**params,
)
results = result.get("results", [])
yield "results", results
if len(results) > 0:
for result in results:
if len(result.get("results", [])) > 0:
for result in result.get("results", []):
yield "action", result["event"]
yield "memory", result["memory"]
else:
yield "action", "NO_CHANGE"
except Exception as e:
yield "error", str(e)
yield "error", str(object=e)
class SearchMemoryBlock(Block, Mem0Base):
@@ -190,10 +176,6 @@ class SearchMemoryBlock(Block, Mem0Base):
default_factory=list,
advanced=True,
)
metadata_filter: Optional[dict[str, Any]] = SchemaField(
description="Optional metadata filters to apply",
default=None,
)
limit_memory_to_run: bool = SchemaField(
description="Limit the memory to the run", default=False
)
@@ -224,7 +206,7 @@ class SearchMemoryBlock(Block, Mem0Base):
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
)
async def run(
def run(
self,
input_data: Input,
*,
@@ -232,7 +214,7 @@ class SearchMemoryBlock(Block, Mem0Base):
user_id: str,
graph_id: str,
graph_exec_id: str,
**kwargs,
**kwargs
) -> BlockOutput:
try:
client = self._get_client(credentials)
@@ -251,8 +233,6 @@ class SearchMemoryBlock(Block, Mem0Base):
filters["AND"].append({"run_id": graph_exec_id})
if input_data.limit_memory_to_agent:
filters["AND"].append({"agent_id": graph_id})
if input_data.metadata_filter:
filters["AND"].append({"metadata": input_data.metadata_filter})
result: list[dict[str, Any]] = client.search(
input_data.query, version="v2", filters=filters
@@ -278,15 +258,11 @@ class GetAllMemoriesBlock(Block, Mem0Base):
categories: Optional[list[str]] = SchemaField(
description="Filter by categories", default=None
)
metadata_filter: Optional[dict[str, Any]] = SchemaField(
description="Optional metadata filters to apply",
default=None,
)
limit_memory_to_run: bool = SchemaField(
description="Limit the memory to the run", default=False
)
limit_memory_to_agent: bool = SchemaField(
description="Limit the memory to the agent", default=True
description="Limit the memory to the agent", default=False
)
class Output(BlockSchema):
@@ -296,11 +272,11 @@ class GetAllMemoriesBlock(Block, Mem0Base):
def __init__(self):
super().__init__(
id="45aee5bf-4767-45d1-a28b-e01c5aae9fc1",
description="Retrieve all memories from Mem0 with optional conversation filtering",
description="Retrieve all memories from Mem0 with pagination",
input_schema=GetAllMemoriesBlock.Input,
output_schema=GetAllMemoriesBlock.Output,
test_input={
"metadata_filter": {"type": "test"},
"user_id": "test_user",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[
@@ -310,7 +286,7 @@ class GetAllMemoriesBlock(Block, Mem0Base):
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
)
async def run(
def run(
self,
input_data: Input,
*,
@@ -318,7 +294,7 @@ class GetAllMemoriesBlock(Block, Mem0Base):
user_id: str,
graph_id: str,
graph_exec_id: str,
**kwargs,
**kwargs
) -> BlockOutput:
try:
client = self._get_client(credentials)
@@ -336,8 +312,6 @@ class GetAllMemoriesBlock(Block, Mem0Base):
filters["AND"].append(
{"categories": {"contains": input_data.categories}}
)
if input_data.metadata_filter:
filters["AND"].append({"metadata": input_data.metadata_filter})
memories: list[dict[str, Any]] = client.get_all(
filters=filters,
@@ -350,116 +324,14 @@ class GetAllMemoriesBlock(Block, Mem0Base):
yield "error", str(e)
class GetLatestMemoryBlock(Block, Mem0Base):
"""Block for retrieving the latest memory from Mem0"""
class Input(BlockSchema):
credentials: CredentialsMetaInput[
Literal[ProviderName.MEM0], Literal["api_key"]
] = CredentialsField(description="Mem0 API key credentials")
trigger: bool = SchemaField(
description="An unused field that is used to trigger the block when you have no other inputs",
default=False,
advanced=False,
)
categories: Optional[list[str]] = SchemaField(
description="Filter by categories", default=None
)
conversation_id: Optional[str] = SchemaField(
description="Optional conversation ID to retrieve the latest memory from (uses run_id)",
default=None,
)
metadata_filter: Optional[dict[str, Any]] = SchemaField(
description="Optional metadata filters to apply",
default=None,
)
limit_memory_to_run: bool = SchemaField(
description="Limit the memory to the run", default=False
)
limit_memory_to_agent: bool = SchemaField(
description="Limit the memory to the agent", default=True
)
class Output(BlockSchema):
memory: Optional[dict[str, Any]] = SchemaField(
description="Latest memory if found"
)
found: bool = SchemaField(description="Whether a memory was found")
error: str = SchemaField(description="Error message if operation fails")
def __init__(self):
super().__init__(
id="0f9d81b5-a145-4c23-b87f-01d6bf37b677",
description="Retrieve the latest memory from Mem0 with optional key filtering",
input_schema=GetLatestMemoryBlock.Input,
output_schema=GetLatestMemoryBlock.Output,
test_input={
"metadata_filter": {"type": "test"},
"credentials": TEST_CREDENTIALS_INPUT,
},
test_output=[
("memory", {"id": "test-memory", "content": "test content"}),
("found", True),
],
test_credentials=TEST_CREDENTIALS,
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
)
async def run(
self,
input_data: Input,
*,
credentials: APIKeyCredentials,
user_id: str,
graph_id: str,
graph_exec_id: str,
**kwargs,
) -> BlockOutput:
try:
client = self._get_client(credentials)
filters: Filter = {
"AND": [
{"user_id": user_id},
]
}
if input_data.limit_memory_to_run:
filters["AND"].append({"run_id": graph_exec_id})
if input_data.limit_memory_to_agent:
filters["AND"].append({"agent_id": graph_id})
if input_data.categories:
filters["AND"].append(
{"categories": {"contains": input_data.categories}}
)
if input_data.metadata_filter:
filters["AND"].append({"metadata": input_data.metadata_filter})
memories: list[dict[str, Any]] = client.get_all(
filters=filters,
version="v2",
)
if memories:
# Return the latest memory (first in the list as they're sorted by recency)
latest_memory = memories[0]
yield "memory", latest_memory
yield "found", True
else:
yield "memory", None
yield "found", False
except Exception as e:
yield "error", str(e)
# Mock client for testing
class MockMemoryClient:
"""Mock Mem0 client for testing"""
def add(self, *args, **kwargs):
return {"results": [{"event": "CREATED", "memory": "test memory"}]}
return {"memory_id": "test-memory-id", "status": "success"}
def search(self, *args, **kwargs) -> list[dict[str, Any]]:
def search(self, *args, **kwargs) -> list[dict[str, str]]:
return [{"id": "test-memory", "content": "test content"}]
def get_all(self, *args, **kwargs) -> list[dict[str, str]]:

View File

@@ -5,7 +5,7 @@ from backend.blocks.nvidia._auth import (
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.request import requests
from backend.util.type import MediaFileType
@@ -40,7 +40,7 @@ class NvidiaDeepfakeDetectBlock(Block):
output_schema=NvidiaDeepfakeDetectBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: NvidiaCredentials, **kwargs
) -> BlockOutput:
url = "https://ai.api.nvidia.com/v1/cv/hive/deepfake-image-detection"
@@ -59,7 +59,8 @@ class NvidiaDeepfakeDetectBlock(Block):
}
try:
response = await Requests().post(url, headers=headers, json=payload)
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
result = data.get("data", [{}])[0]

View File

@@ -1,155 +0,0 @@
import logging
from typing import Any, Literal
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
logger = logging.getLogger(__name__)
@thread_cached
def get_database_manager_client():
from backend.executor import DatabaseManagerAsyncClient
from backend.util.service import get_service_client
return get_service_client(DatabaseManagerAsyncClient, health_check=False)
StorageScope = Literal["within_agent", "across_agents"]
def get_storage_key(key: str, scope: StorageScope, graph_id: str) -> str:
"""Generate the storage key based on scope"""
if scope == "across_agents":
return f"global#{key}"
else:
return f"agent#{graph_id}#{key}"
class PersistInformationBlock(Block):
"""Block for persisting key-value data for the current user with configurable scope"""
class Input(BlockSchema):
key: str = SchemaField(description="Key to store the information under")
value: Any = SchemaField(description="Value to store")
scope: StorageScope = SchemaField(
description="Scope of persistence: within_agent (shared across all runs of this agent) or across_agents (shared across all agents for this user)",
default="within_agent",
)
class Output(BlockSchema):
value: Any = SchemaField(description="Value that was stored")
def __init__(self):
super().__init__(
id="1d055e55-a2b9-4547-8311-907d05b0304d",
description="Persist key-value information for the current user",
categories={BlockCategory.DATA},
input_schema=PersistInformationBlock.Input,
output_schema=PersistInformationBlock.Output,
test_input={
"key": "user_preference",
"value": {"theme": "dark", "language": "en"},
"scope": "within_agent",
},
test_output=[
("value", {"theme": "dark", "language": "en"}),
],
test_mock={
"_store_data": lambda *args, **kwargs: {
"theme": "dark",
"language": "en",
}
},
)
async def run(
self,
input_data: Input,
*,
user_id: str,
graph_id: str,
node_exec_id: str,
**kwargs,
) -> BlockOutput:
# Determine the storage key based on scope
storage_key = get_storage_key(input_data.key, input_data.scope, graph_id)
# Store the data
yield "value", await self._store_data(
user_id=user_id,
node_exec_id=node_exec_id,
key=storage_key,
data=input_data.value,
)
async def _store_data(
self, user_id: str, node_exec_id: str, key: str, data: Any
) -> Any | None:
return await get_database_manager_client().set_execution_kv_data(
user_id=user_id,
node_exec_id=node_exec_id,
key=key,
data=data,
)
class RetrieveInformationBlock(Block):
"""Block for retrieving key-value data for the current user with configurable scope"""
class Input(BlockSchema):
key: str = SchemaField(description="Key to retrieve the information for")
scope: StorageScope = SchemaField(
description="Scope of persistence: within_agent (shared across all runs of this agent) or across_agents (shared across all agents for this user)",
default="within_agent",
)
default_value: Any = SchemaField(
description="Default value to return if key is not found", default=None
)
class Output(BlockSchema):
value: Any = SchemaField(description="Retrieved value or default value")
def __init__(self):
super().__init__(
id="d8710fc9-6e29-481e-a7d5-165eb16f8471",
description="Retrieve key-value information for the current user",
categories={BlockCategory.DATA},
input_schema=RetrieveInformationBlock.Input,
output_schema=RetrieveInformationBlock.Output,
test_input={
"key": "user_preference",
"scope": "within_agent",
"default_value": {"theme": "light", "language": "en"},
},
test_output=[
("value", {"theme": "light", "language": "en"}),
],
test_mock={"_retrieve_data": lambda *args, **kwargs: None},
static_output=True,
)
async def run(
self, input_data: Input, *, user_id: str, graph_id: str, **kwargs
) -> BlockOutput:
# Determine the storage key based on scope
storage_key = get_storage_key(input_data.key, input_data.scope, graph_id)
# Retrieve the data
stored_value = await self._retrieve_data(
user_id=user_id,
key=storage_key,
)
if stored_value is not None:
yield "value", stored_value
else:
yield "value", input_data.default_value
async def _retrieve_data(self, user_id: str, key: str) -> Any | None:
return await get_database_manager_client().get_execution_kv_data(
user_id=user_id,
key=key,
)

View File

@@ -56,7 +56,7 @@ class PineconeInitBlock(Block):
output_schema=PineconeInitBlock.Output,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
pc = Pinecone(api_key=credentials.api_key.get_secret_value())
@@ -117,7 +117,7 @@ class PineconeQueryBlock(Block):
output_schema=PineconeQueryBlock.Output,
)
async def run(
def run(
self,
input_data: Input,
*,
@@ -195,7 +195,7 @@ class PineconeInsertBlock(Block):
output_schema=PineconeInsertBlock.Output,
)
async def run(
def run(
self,
input_data: Input,
*,

View File

@@ -96,7 +96,6 @@ class GetRedditPostsBlock(Block):
class Output(BlockSchema):
post: RedditPost = SchemaField(description="Reddit post")
posts: list[RedditPost] = SchemaField(description="List of all Reddit posts")
def __init__(self):
super().__init__(
@@ -129,23 +128,6 @@ class GetRedditPostsBlock(Block):
id="id2", subreddit="subreddit", title="title2", body="body2"
),
),
(
"posts",
[
RedditPost(
id="id1",
subreddit="subreddit",
title="title1",
body="body1",
),
RedditPost(
id="id2",
subreddit="subreddit",
title="title2",
body="body2",
),
],
),
],
test_mock={
"get_posts": lambda input_data, credentials: [
@@ -164,11 +146,10 @@ class GetRedditPostsBlock(Block):
subreddit = client.subreddit(input_data.subreddit)
return subreddit.new(limit=input_data.post_limit or 10)
async def run(
def run(
self, input_data: Input, *, credentials: RedditCredentials, **kwargs
) -> BlockOutput:
current_time = datetime.now(tz=timezone.utc)
all_posts = []
for post in self.get_posts(input_data=input_data, credentials=credentials):
if input_data.last_minutes:
post_datetime = datetime.fromtimestamp(
@@ -181,16 +162,12 @@ class GetRedditPostsBlock(Block):
if input_data.last_post and post.id == input_data.last_post:
break
reddit_post = RedditPost(
yield "post", RedditPost(
id=post.id,
subreddit=input_data.subreddit,
title=post.title,
body=post.selftext,
)
all_posts.append(reddit_post)
yield "post", reddit_post
yield "posts", all_posts
class PostRedditCommentBlock(Block):
@@ -230,7 +207,7 @@ class PostRedditCommentBlock(Block):
raise ValueError("Failed to post comment.")
return new_comment.id
async def run(
def run(
self, input_data: Input, *, credentials: RedditCredentials, **kwargs
) -> BlockOutput:
yield "comment_id", self.reply_post(credentials, input_data.data)

View File

@@ -2,8 +2,8 @@ import os
from enum import Enum
from typing import Literal
import replicate
from pydantic import SecretStr
from replicate.client import Client as ReplicateClient
from replicate.helpers import FileOutput
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
@@ -159,7 +159,7 @@ class ReplicateFluxAdvancedModelBlock(Block):
test_credentials=TEST_CREDENTIALS,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
# If the seed is not provided, generate a random seed
@@ -168,7 +168,7 @@ class ReplicateFluxAdvancedModelBlock(Block):
seed = int.from_bytes(os.urandom(4), "big")
# Run the model using the provided inputs
result = await self.run_model(
result = self.run_model(
api_key=credentials.api_key,
model_name=input_data.replicate_model_name.api_name,
prompt=input_data.prompt,
@@ -183,7 +183,7 @@ class ReplicateFluxAdvancedModelBlock(Block):
)
yield "result", result
async def run_model(
def run_model(
self,
api_key: SecretStr,
model_name,
@@ -198,10 +198,10 @@ class ReplicateFluxAdvancedModelBlock(Block):
safety_tolerance,
):
# Initialize Replicate client with the API key
client = ReplicateClient(api_token=api_key.get_secret_value())
client = replicate.Client(api_token=api_key.get_secret_value())
# Run the model with additional parameters
output: FileOutput | list[FileOutput] = await client.async_run( # type: ignore This is because they changed the return type, and didn't update the type hint! It should be overloaded depending on the value of `use_file_output` to `FileOutput | list[FileOutput]` but it's `Any | Iterator[Any]`
output: FileOutput | list[FileOutput] = client.run( # type: ignore This is because they changed the return type, and didn't update the type hint! It should be overloaded depending on the value of `use_file_output` to `FileOutput | list[FileOutput]` but it's `Any | Iterator[Any]`
f"{model_name}",
input={
"prompt": prompt,

View File

@@ -1,4 +1,4 @@
import asyncio
import time
from datetime import datetime, timedelta, timezone
from typing import Any
@@ -40,7 +40,6 @@ class ReadRSSFeedBlock(Block):
class Output(BlockSchema):
entry: RSSEntry = SchemaField(description="The RSS item")
entries: list[RSSEntry] = SchemaField(description="List of all RSS entries")
def __init__(self):
super().__init__(
@@ -67,21 +66,6 @@ class ReadRSSFeedBlock(Block):
categories=["Technology", "News"],
),
),
(
"entries",
[
RSSEntry(
title="Example RSS Item",
link="https://example.com/article",
description="This is an example RSS item description.",
pub_date=datetime(
2023, 6, 23, 12, 30, 0, tzinfo=timezone.utc
),
author="John Doe",
categories=["Technology", "News"],
),
],
),
],
test_mock={
"parse_feed": lambda *args, **kwargs: {
@@ -103,7 +87,7 @@ class ReadRSSFeedBlock(Block):
def parse_feed(url: str) -> dict[str, Any]:
return feedparser.parse(url) # type: ignore
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
keep_going = True
start_time = datetime.now(timezone.utc) - timedelta(
minutes=input_data.time_period
@@ -112,22 +96,21 @@ class ReadRSSFeedBlock(Block):
keep_going = input_data.run_continuously
feed = self.parse_feed(input_data.rss_url)
all_entries = []
for entry in feed["entries"]:
pub_date = datetime(*entry["published_parsed"][:6], tzinfo=timezone.utc)
if pub_date > start_time:
rss_entry = RSSEntry(
title=entry["title"],
link=entry["link"],
description=entry.get("summary", ""),
pub_date=pub_date,
author=entry.get("author", ""),
categories=[tag["term"] for tag in entry.get("tags", [])],
yield (
"entry",
RSSEntry(
title=entry["title"],
link=entry["link"],
description=entry.get("summary", ""),
pub_date=pub_date,
author=entry.get("author", ""),
categories=[tag["term"] for tag in entry.get("tags", [])],
),
)
all_entries.append(rss_entry)
yield "entry", rss_entry
yield "entries", all_entries
await asyncio.sleep(input_data.polling_rate)
time.sleep(input_data.polling_rate)

View File

@@ -93,7 +93,7 @@ class DataSamplingBlock(Block):
)
self.accumulated_data = []
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
if input_data.accumulate:
if isinstance(input_data.data, dict):
self.accumulated_data.append(input_data.data)

View File

@@ -105,7 +105,7 @@ class ScreenshotWebPageBlock(Block):
)
@staticmethod
async def take_screenshot(
def take_screenshot(
credentials: APIKeyCredentials,
graph_exec_id: str,
url: str,
@@ -121,10 +121,11 @@ class ScreenshotWebPageBlock(Block):
"""
Takes a screenshot using the ScreenshotOne API
"""
api = Requests()
api = Requests(trusted_origins=["https://api.screenshotone.com"])
# Build API parameters
# Build API URL with parameters
params = {
"access_key": credentials.api_key.get_secret_value(),
"url": url,
"viewport_width": viewport_width,
"viewport_height": viewport_height,
@@ -136,28 +137,19 @@ class ScreenshotWebPageBlock(Block):
"cache": str(cache).lower(),
}
# Make the API request
# Use header-based authentication instead of query parameter
headers = {
"X-Access-Key": credentials.api_key.get_secret_value(),
}
response = await api.get(
"https://api.screenshotone.com/take", params=params, headers=headers
)
content = response.content
response = api.get("https://api.screenshotone.com/take", params=params)
return {
"image": await store_media_file(
"image": store_media_file(
graph_exec_id=graph_exec_id,
file=MediaFileType(
f"data:image/{format.value};base64,{b64encode(content).decode('utf-8')}"
f"data:image/{format.value};base64,{b64encode(response.content).decode('utf-8')}"
),
return_content=True,
)
}
async def run(
def run(
self,
input_data: Input,
*,
@@ -166,7 +158,7 @@ class ScreenshotWebPageBlock(Block):
**kwargs,
) -> BlockOutput:
try:
screenshot_data = await self.take_screenshot(
screenshot_data = self.take_screenshot(
credentials=credentials,
graph_exec_id=graph_exec_id,
url=input_data.url,

View File

@@ -36,10 +36,10 @@ class GetWikipediaSummaryBlock(Block, GetRequest):
test_mock={"get_request": lambda url, json: {"extract": "summary content"}},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
topic = input_data.topic
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
response = await self.get_request(url, json=True)
response = self.get_request(url, json=True)
if "extract" not in response:
raise RuntimeError(f"Unable to parse Wikipedia response: {response}")
yield "summary", response["extract"]
@@ -113,14 +113,14 @@ class GetWeatherInformationBlock(Block, GetRequest):
test_credentials=TEST_CREDENTIALS,
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
units = "metric" if input_data.use_celsius else "imperial"
api_key = credentials.api_key
location = input_data.location
url = f"http://api.openweathermap.org/data/2.5/weather?q={quote(location)}&appid={api_key}&units={units}"
weather_data = await self.get_request(url, json=True)
weather_data = self.get_request(url, json=True)
if "main" in weather_data and "weather" in weather_data:
yield "temperature", str(weather_data["main"]["temp"])

View File

@@ -1,7 +1,7 @@
from typing import Any, Dict
from backend.data.block import Block
from backend.util.request import Requests
from backend.util.request import requests
from ._api import Color, CustomerDetails, OrderItem, Profile
@@ -14,25 +14,20 @@ class Slant3DBlockBase(Block):
def _get_headers(self, api_key: str) -> Dict[str, str]:
return {"api-key": api_key, "Content-Type": "application/json"}
async def _make_request(
self, method: str, endpoint: str, api_key: str, **kwargs
) -> Dict:
def _make_request(self, method: str, endpoint: str, api_key: str, **kwargs) -> Dict:
url = f"{self.BASE_URL}/{endpoint}"
response = await Requests().request(
response = requests.request(
method=method, url=url, headers=self._get_headers(api_key), **kwargs
)
resp = response.json()
if not response.ok:
error_msg = resp.get("error", "Unknown error")
error_msg = response.json().get("error", "Unknown error")
raise RuntimeError(f"API request failed: {error_msg}")
return resp
return response.json()
async def _check_valid_color(
self, profile: Profile, color: Color, api_key: str
) -> str:
response = await self._make_request(
def _check_valid_color(self, profile: Profile, color: Color, api_key: str) -> str:
response = self._make_request(
"GET",
"filament",
api_key,
@@ -53,12 +48,10 @@ Valid colors for {profile.value} are:
)
return color_tag
async def _convert_to_color(
self, profile: Profile, color: Color, api_key: str
) -> str:
return await self._check_valid_color(profile, color, api_key)
def _convert_to_color(self, profile: Profile, color: Color, api_key: str) -> str:
return self._check_valid_color(profile, color, api_key)
async def _format_order_data(
def _format_order_data(
self,
customer: CustomerDetails,
order_number: str,
@@ -68,7 +61,6 @@ Valid colors for {profile.value} are:
"""Helper function to format order data for API requests"""
orders = []
for item in items:
color_tag = await self._convert_to_color(item.profile, item.color, api_key)
order_data = {
"email": customer.email,
"phone": customer.phone,
@@ -93,7 +85,9 @@ Valid colors for {profile.value} are:
"order_quantity": item.quantity,
"order_image_url": "",
"order_sku": "NOT_USED",
"order_item_color": color_tag,
"order_item_color": self._convert_to_color(
item.profile, item.color, api_key
),
"profile": item.profile.value,
}
orders.append(order_data)

View File

@@ -72,11 +72,11 @@ class Slant3DFilamentBlock(Slant3DBlockBase):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self._make_request(
result = self._make_request(
"GET", "filament", credentials.api_key.get_secret_value()
)
yield "filaments", result["filaments"]

View File

@@ -1,6 +1,8 @@
import uuid
from typing import List
import requests as baserequests
from backend.data.block import BlockOutput, BlockSchema
from backend.data.model import APIKeyCredentials, SchemaField
from backend.util import settings
@@ -74,17 +76,17 @@ class Slant3DCreateOrderBlock(Slant3DBlockBase):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
order_data = await self._format_order_data(
order_data = self._format_order_data(
input_data.customer,
input_data.order_number,
input_data.items,
credentials.api_key.get_secret_value(),
)
result = await self._make_request(
result = self._make_request(
"POST", "order", credentials.api_key.get_secret_value(), json=order_data
)
yield "order_id", result["orderId"]
@@ -160,24 +162,28 @@ class Slant3DEstimateOrderBlock(Slant3DBlockBase):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
order_data = await self._format_order_data(
order_data = self._format_order_data(
input_data.customer,
input_data.order_number,
input_data.items,
credentials.api_key.get_secret_value(),
)
result = await self._make_request(
"POST",
"order/estimate",
credentials.api_key.get_secret_value(),
json=order_data,
)
yield "total_price", result["totalPrice"]
yield "shipping_cost", result["shippingCost"]
yield "printing_cost", result["printingCost"]
try:
result = self._make_request(
"POST",
"order/estimate",
credentials.api_key.get_secret_value(),
json=order_data,
)
yield "total_price", result["totalPrice"]
yield "shipping_cost", result["shippingCost"]
yield "printing_cost", result["printingCost"]
except baserequests.HTTPError as e:
yield "error", str(f"Error estimating order: {e} {e.response.text}")
raise
class Slant3DEstimateShippingBlock(Slant3DBlockBase):
@@ -240,17 +246,17 @@ class Slant3DEstimateShippingBlock(Slant3DBlockBase):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
order_data = await self._format_order_data(
order_data = self._format_order_data(
input_data.customer,
input_data.order_number,
input_data.items,
credentials.api_key.get_secret_value(),
)
result = await self._make_request(
result = self._make_request(
"POST",
"order/estimateShipping",
credentials.api_key.get_secret_value(),
@@ -306,11 +312,11 @@ class Slant3DGetOrdersBlock(Slant3DBlockBase):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self._make_request(
result = self._make_request(
"GET", "order", credentials.api_key.get_secret_value()
)
yield "orders", [str(order["orderId"]) for order in result["ordersData"]]
@@ -353,11 +359,11 @@ class Slant3DTrackingBlock(Slant3DBlockBase):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self._make_request(
result = self._make_request(
"GET",
f"order/{input_data.order_id}/get-tracking",
credentials.api_key.get_secret_value(),
@@ -397,11 +403,11 @@ class Slant3DCancelOrderBlock(Slant3DBlockBase):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self._make_request(
result = self._make_request(
"DELETE",
f"order/{input_data.order_id}",
credentials.api_key.get_secret_value(),

View File

@@ -44,11 +44,11 @@ class Slant3DSlicerBlock(Slant3DBlockBase):
},
)
async def run(
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self._make_request(
result = self._make_request(
"POST",
"slicer",
credentials.api_key.get_secret_value(),

View File

@@ -37,7 +37,7 @@ class Slant3DTriggerBase:
description="Error message if payload processing failed"
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "payload", input_data.payload
yield "order_id", input_data.payload["orderId"]
@@ -117,9 +117,8 @@ class Slant3DOrderWebhookBlock(Slant3DTriggerBase, Block):
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
async for name, value in super().run(input_data, **kwargs):
yield name, value
def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
yield from super().run(input_data, **kwargs)
# Extract and normalize values from the payload
yield "status", input_data.payload["status"]

View File

@@ -26,10 +26,10 @@ logger = logging.getLogger(__name__)
@thread_cached
def get_database_manager_client():
from backend.executor import DatabaseManagerAsyncClient
from backend.executor import DatabaseManager
from backend.util.service import get_service_client
return get_service_client(DatabaseManagerAsyncClient, health_check=False)
return get_service_client(DatabaseManager)
def _get_tool_requests(entry: dict[str, Any]) -> list[str]:
@@ -85,7 +85,7 @@ def _get_tool_responses(entry: dict[str, Any]) -> list[str]:
return tool_call_ids
def _create_tool_response(call_id: str, output: Any) -> dict[str, Any]:
def _create_tool_response(call_id: str, output: dict[str, Any]) -> dict[str, Any]:
"""
Create a tool response message for either OpenAI or Anthropics,
based on the tool_id format.
@@ -142,12 +142,6 @@ class SmartDecisionMakerBlock(Block):
advanced=False,
)
credentials: llm.AICredentials = llm.AICredentialsField()
multiple_tool_calls: bool = SchemaField(
title="Multiple Tool Calls",
default=False,
description="Whether to allow multiple tool calls in a single response.",
advanced=True,
)
sys_prompt: str = SchemaField(
title="System Prompt",
default="Thinking carefully step by step decide which function to call. "
@@ -156,7 +150,7 @@ class SmartDecisionMakerBlock(Block):
"matching the required jsonschema signature, no missing argument is allowed. "
"If you have already completed the task objective, you can end the task "
"by providing the end result of your work as a finish message. "
"Function parameters that has no default value and not optional typed has to be provided. ",
"Only provide EXACTLY one function call, multiple tool calls is strictly prohibited.",
description="The system prompt to provide additional context to the model.",
)
conversation_history: list[dict] = SchemaField(
@@ -212,15 +206,6 @@ class SmartDecisionMakerBlock(Block):
"link like the output of `StoreValue` or `AgentInput` block"
)
# Check that both conversation_history and last_tool_output are connected together
if any(link.sink_name == "conversation_history" for link in links) != any(
link.sink_name == "last_tool_output" for link in links
):
raise ValueError(
"Last Tool Output is needed when Conversation History is used, "
"and vice versa. Please connect both inputs together."
)
return missing_links
@classmethod
@@ -231,15 +216,8 @@ class SmartDecisionMakerBlock(Block):
conversation_history = data.get("conversation_history", [])
pending_tool_calls = get_pending_tool_calls(conversation_history)
last_tool_output = data.get("last_tool_output")
# Tool call is pending, wait for the tool output to be provided.
if last_tool_output is None and pending_tool_calls:
if not last_tool_output and pending_tool_calls:
return {"last_tool_output"}
# No tool call is pending, wait for the conversation history to be updated.
if last_tool_output is not None and not pending_tool_calls:
return {"conversation_history"}
return set()
class Output(BlockSchema):
@@ -269,11 +247,7 @@ class SmartDecisionMakerBlock(Block):
)
@staticmethod
def cleanup(s: str):
return re.sub(r"[^a-zA-Z0-9_-]", "_", s).lower()
@staticmethod
async def _create_block_function_signature(
def _create_block_function_signature(
sink_node: "Node", links: list["Link"]
) -> dict[str, Any]:
"""
@@ -292,27 +266,38 @@ class SmartDecisionMakerBlock(Block):
block = sink_node.block
tool_function: dict[str, Any] = {
"name": SmartDecisionMakerBlock.cleanup(block.name),
"name": re.sub(r"[^a-zA-Z0-9_-]", "_", block.name).lower(),
"description": block.description,
}
sink_block_input_schema = block.input_schema
properties = {}
required = []
for link in links:
sink_name = SmartDecisionMakerBlock.cleanup(link.sink_name)
properties[sink_name] = sink_block_input_schema.get_field_schema(
link.sink_name
sink_block_input_schema = block.input_schema
description = (
sink_block_input_schema.model_fields[link.sink_name].description
if link.sink_name in sink_block_input_schema.model_fields
and sink_block_input_schema.model_fields[link.sink_name].description
else f"The {link.sink_name} of the tool"
)
properties[link.sink_name.lower()] = {
"type": "string",
"description": description,
}
tool_function["parameters"] = {
**block.input_schema.jsonschema(),
"type": "object",
"properties": properties,
"required": required,
"additionalProperties": False,
"strict": True,
}
return {"type": "function", "function": tool_function}
@staticmethod
async def _create_agent_function_signature(
def _create_agent_function_signature(
sink_node: "Node", links: list["Link"]
) -> dict[str, Any]:
"""
@@ -334,39 +319,37 @@ class SmartDecisionMakerBlock(Block):
raise ValueError("Graph ID or Graph Version not found in sink node.")
db_client = get_database_manager_client()
sink_graph_meta = await db_client.get_graph_metadata(graph_id, graph_version)
sink_graph_meta = db_client.get_graph_metadata(graph_id, graph_version)
if not sink_graph_meta:
raise ValueError(
f"Sink graph metadata not found: {graph_id} {graph_version}"
)
tool_function: dict[str, Any] = {
"name": SmartDecisionMakerBlock.cleanup(sink_graph_meta.name),
"name": re.sub(r"[^a-zA-Z0-9_-]", "_", sink_graph_meta.name).lower(),
"description": sink_graph_meta.description,
}
properties = {}
required = []
for link in links:
sink_block_input_schema = sink_node.input_default["input_schema"]
sink_block_properties = sink_block_input_schema.get("properties", {}).get(
link.sink_name, {}
)
sink_name = SmartDecisionMakerBlock.cleanup(link.sink_name)
description = (
sink_block_properties["description"]
if "description" in sink_block_properties
sink_block_input_schema["properties"][link.sink_name]["description"]
if "description"
in sink_block_input_schema["properties"][link.sink_name]
else f"The {link.sink_name} of the tool"
)
properties[sink_name] = {
properties[link.sink_name.lower()] = {
"type": "string",
"description": description,
"default": json.dumps(sink_block_properties.get("default", None)),
}
tool_function["parameters"] = {
"type": "object",
"properties": properties,
"required": required,
"additionalProperties": False,
"strict": True,
}
@@ -374,7 +357,7 @@ class SmartDecisionMakerBlock(Block):
return {"type": "function", "function": tool_function}
@staticmethod
async def _create_function_signature(node_id: str) -> list[dict[str, Any]]:
def _create_function_signature(node_id: str) -> list[dict[str, Any]]:
"""
Creates function signatures for tools linked to a specified node within a graph.
@@ -396,13 +379,13 @@ class SmartDecisionMakerBlock(Block):
db_client = get_database_manager_client()
tools = [
(link, node)
for link, node in await db_client.get_connected_output_nodes(node_id)
for link, node in db_client.get_connected_output_nodes(node_id)
if link.source_name.startswith("tools_^_") and link.source_id == node_id
]
if not tools:
raise ValueError("There is no next node to execute.")
return_tool_functions: list[dict[str, Any]] = []
return_tool_functions = []
grouped_tool_links: dict[str, tuple["Node", list["Link"]]] = {}
for link, node in tools:
@@ -417,20 +400,20 @@ class SmartDecisionMakerBlock(Block):
if sink_node.block_id == AgentExecutorBlock().id:
return_tool_functions.append(
await SmartDecisionMakerBlock._create_agent_function_signature(
SmartDecisionMakerBlock._create_agent_function_signature(
sink_node, links
)
)
else:
return_tool_functions.append(
await SmartDecisionMakerBlock._create_block_function_signature(
SmartDecisionMakerBlock._create_block_function_signature(
sink_node, links
)
)
return return_tool_functions
async def run(
def run(
self,
input_data: Input,
*,
@@ -442,43 +425,37 @@ class SmartDecisionMakerBlock(Block):
user_id: str,
**kwargs,
) -> BlockOutput:
tool_functions = await self._create_function_signature(node_id)
yield "tool_functions", json.dumps(tool_functions)
tool_functions = self._create_function_signature(node_id)
input_data.conversation_history = input_data.conversation_history or []
prompt = [json.to_dict(p) for p in input_data.conversation_history if p]
pending_tool_calls = get_pending_tool_calls(input_data.conversation_history)
if pending_tool_calls and input_data.last_tool_output is None:
if pending_tool_calls and not input_data.last_tool_output:
raise ValueError(f"Tool call requires an output for {pending_tool_calls}")
# Only assign the last tool output to the first pending tool call
tool_output = []
if pending_tool_calls and input_data.last_tool_output is not None:
# Get the first pending tool call ID
first_call_id = next(iter(pending_tool_calls.keys()))
tool_output.append(
_create_tool_response(first_call_id, input_data.last_tool_output)
# Prefill all missing tool calls with the last tool output/
# TODO: we need a better way to handle this.
tool_output = [
_create_tool_response(pending_call_id, input_data.last_tool_output)
for pending_call_id, count in pending_tool_calls.items()
for _ in range(count)
]
# If the SDM block only calls 1 tool at a time, this should not happen.
if len(tool_output) > 1:
logger.warning(
f"[SmartDecisionMakerBlock-node_exec_id={node_exec_id}] "
f"Multiple pending tool calls are prefilled using a single output. "
f"Execution may not be accurate."
)
# Add tool output to prompt right away
prompt.extend(tool_output)
# Check if there are still pending tool calls after handling the first one
remaining_pending_calls = get_pending_tool_calls(prompt)
# If there are still pending tool calls, yield the conversation and return early
if remaining_pending_calls:
yield "conversations", prompt
return
# Fallback on adding tool output in the conversation history as user prompt.
elif input_data.last_tool_output:
logger.error(
if len(tool_output) == 0 and input_data.last_tool_output:
logger.warning(
f"[SmartDecisionMakerBlock-node_exec_id={node_exec_id}] "
f"No pending tool calls found. This may indicate an issue with the "
f"conversation history, or the tool giving response more than once."
f"This should not happen! Please check the conversation history for any inconsistencies."
f"conversation history, or an LLM calling two tools at the same time."
)
tool_output.append(
{
@@ -486,11 +463,8 @@ class SmartDecisionMakerBlock(Block):
"content": f"Last tool output: {json.dumps(input_data.last_tool_output)}",
}
)
prompt.extend(tool_output)
if input_data.multiple_tool_calls:
input_data.sys_prompt += "\nYou can call a tool (different tools) multiple times in a single response."
else:
input_data.sys_prompt += "\nOnly provide EXACTLY one function call, multiple tool calls is strictly prohibited."
prompt.extend(tool_output)
values = input_data.prompt_values
if values:
@@ -509,7 +483,7 @@ class SmartDecisionMakerBlock(Block):
):
prompt.append({"role": "user", "content": prefix + input_data.prompt})
response = await llm.llm_call(
response = llm.llm_call(
credentials=credentials,
llm_model=input_data.model,
prompt=prompt,
@@ -517,7 +491,7 @@ class SmartDecisionMakerBlock(Block):
max_tokens=input_data.max_tokens,
tools=tool_functions,
ollama_host=input_data.ollama_host,
parallel_tool_calls=input_data.multiple_tool_calls,
parallel_tool_calls=False,
)
if not response.tool_calls:
@@ -528,37 +502,8 @@ class SmartDecisionMakerBlock(Block):
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
# Find the tool definition to get the expected arguments
tool_def = next(
(
tool
for tool in tool_functions
if tool["function"]["name"] == tool_name
),
None,
)
for arg_name, arg_value in tool_args.items():
yield f"tools_^_{tool_name}_{arg_name}".lower(), arg_value
if (
tool_def
and "function" in tool_def
and "parameters" in tool_def["function"]
):
expected_args = tool_def["function"]["parameters"].get("properties", {})
else:
expected_args = tool_args.keys()
# Yield provided arguments and None for missing ones
for arg_name in expected_args:
if arg_name in tool_args:
yield f"tools_^_{tool_name}_~_{arg_name}", tool_args[arg_name]
else:
yield f"tools_^_{tool_name}_~_{arg_name}", None
# Add reasoning to conversation history if available
if response.reasoning:
prompt.append(
{"role": "assistant", "content": f"[Reasoning]: {response.reasoning}"}
)
prompt.append(response.raw_response)
yield "conversations", prompt
response.prompt.append(response.raw_response)
yield "conversations", response.prompt

View File

@@ -27,11 +27,9 @@ class SmartLeadClient:
def _handle_error(self, e: Exception) -> str:
return e.__str__().replace(self.api_key, "API KEY")
async def create_campaign(
self, request: CreateCampaignRequest
) -> CreateCampaignResponse:
def create_campaign(self, request: CreateCampaignRequest) -> CreateCampaignResponse:
try:
response = await self.requests.post(
response = self.requests.post(
self._add_auth_to_url(f"{self.API_URL}/campaigns/create"),
json=request.model_dump(),
)
@@ -42,11 +40,11 @@ class SmartLeadClient:
except Exception as e:
raise ValueError(f"Failed to create campaign: {self._handle_error(e)}")
async def add_leads_to_campaign(
def add_leads_to_campaign(
self, request: AddLeadsRequest
) -> AddLeadsToCampaignResponse:
try:
response = await self.requests.post(
response = self.requests.post(
self._add_auth_to_url(
f"{self.API_URL}/campaigns/{request.campaign_id}/leads"
),
@@ -66,7 +64,7 @@ class SmartLeadClient:
f"Failed to add leads to campaign: {self._handle_error(e)}"
)
async def save_campaign_sequences(
def save_campaign_sequences(
self, campaign_id: int, request: SaveSequencesRequest
) -> SaveSequencesResponse:
"""
@@ -86,13 +84,13 @@ class SmartLeadClient:
- MANUAL_PERCENTAGE: Requires variant_distribution_percentage in seq_variants
"""
try:
response = await self.requests.post(
response = self.requests.post(
self._add_auth_to_url(
f"{self.API_URL}/campaigns/{campaign_id}/sequences"
),
json=request.model_dump(exclude_none=True),
)
return SaveSequencesResponse(**(response.json()))
return SaveSequencesResponse(**response.json())
except Exception as e:
raise ValueError(
f"Failed to save campaign sequences: {e.__str__().replace(self.api_key, 'API KEY')}"

Some files were not shown because too many files have changed in this diff Show More