mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-12 00:28:31 -05:00
Compare commits
239 Commits
change-log
...
fix/cookie
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8958357343 | ||
|
|
0b1b29a9bb | ||
|
|
d5dfc40263 | ||
|
|
d9f9f80346 | ||
|
|
a47e1916fb | ||
|
|
49b22576b5 | ||
|
|
f3731afaf2 | ||
|
|
29395665c3 | ||
|
|
fc4d0d4bb8 | ||
|
|
d0beebcbff | ||
|
|
93e611d609 | ||
|
|
c29b5e3f0f | ||
|
|
db3d62eaa0 | ||
|
|
46da6a1c5f | ||
|
|
753a2bf200 | ||
|
|
375777fe3c | ||
|
|
1e0a3d3c1b | ||
|
|
f1471377c3 | ||
|
|
c4797a5f84 | ||
|
|
4923318cfe | ||
|
|
13e5f6bf8e | ||
|
|
add32b8449 | ||
|
|
86361fc1ae | ||
|
|
2f11dade70 | ||
|
|
b477d31641 | ||
|
|
2269e3593a | ||
|
|
97e72cb485 | ||
|
|
8f1ebfc696 | ||
|
|
81d3eb7c34 | ||
|
|
f950f35af8 | ||
|
|
e05c34e76a | ||
|
|
1ff924e260 | ||
|
|
fc975e9e17 | ||
|
|
7985da3e8e | ||
|
|
34184f7cc0 | ||
|
|
ade66f3d27 | ||
|
|
9b221ff931 | ||
|
|
0955cfb869 | ||
|
|
bf26b8f14a | ||
|
|
fb18ddf95d | ||
|
|
6e253ecade | ||
|
|
36d304f03f | ||
|
|
5dafc086fb | ||
|
|
c109b676b8 | ||
|
|
a259eac9ff | ||
|
|
2ab9cfdf79 | ||
|
|
82f6687646 | ||
|
|
796f896042 | ||
|
|
8028a766b1 | ||
|
|
1e89bf5c37 | ||
|
|
2e96da36c2 | ||
|
|
de83c35c5f | ||
|
|
450c1ee668 | ||
|
|
5385520c53 | ||
|
|
210d457ecd | ||
|
|
f9b37d2693 | ||
|
|
2f16511f24 | ||
|
|
4a03e5cbaf | ||
|
|
7165958feb | ||
|
|
014b276552 | ||
|
|
6771476d01 | ||
|
|
a3d082a5fa | ||
|
|
10efb1772e | ||
|
|
a5ff8e8f69 | ||
|
|
f881570325 | ||
|
|
12972fde77 | ||
|
|
6df4dd3739 | ||
|
|
79b38343c2 | ||
|
|
705be3ec86 | ||
|
|
692c6defce | ||
|
|
08c56a337b | ||
|
|
41ebd5fe5d | ||
|
|
e8657ed711 | ||
|
|
3a20c5a4bb | ||
|
|
36634b7ba2 | ||
|
|
781f138c09 | ||
|
|
2647417e9f | ||
|
|
f2a04f9845 | ||
|
|
96df40f7b6 | ||
|
|
7d10dc4e7b | ||
|
|
5d0faab4b1 | ||
|
|
5b324abc7c | ||
|
|
b900e86c49 | ||
|
|
ef6ba3e84a | ||
|
|
95137323f7 | ||
|
|
512ce6d473 | ||
|
|
da0482b54e | ||
|
|
d710d14339 | ||
|
|
47adab575b | ||
|
|
fa7fcb3dd4 | ||
|
|
6629052a6b | ||
|
|
d8cf62c8be | ||
|
|
7abe6eb328 | ||
|
|
4b70e778d2 | ||
|
|
34009bc749 | ||
|
|
722c6bcc18 | ||
|
|
eaf6da02d1 | ||
|
|
d5d613e014 | ||
|
|
73a3d980ca | ||
|
|
bac07b79e9 | ||
|
|
c8f2c7bc88 | ||
|
|
0f558876e2 | ||
|
|
3f6585f763 | ||
|
|
0ec557b942 | ||
|
|
453834f475 | ||
|
|
768c6b1c97 | ||
|
|
eeb1764779 | ||
|
|
7c65e53d51 | ||
|
|
56ddffeaa0 | ||
|
|
16d6f5377c | ||
|
|
85e108a37a | ||
|
|
692f32a350 | ||
|
|
9f2b9d08c9 | ||
|
|
b91b026164 | ||
|
|
35a5755958 | ||
|
|
b244726b20 | ||
|
|
3471781b98 | ||
|
|
17e973a8cb | ||
|
|
8e2fb2daa4 | ||
|
|
767d2f2c1e | ||
|
|
45578136e3 | ||
|
|
a51af36296 | ||
|
|
5518c2e9a2 | ||
|
|
dc981b52a3 | ||
|
|
61643e6a47 | ||
|
|
21b4d272ce | ||
|
|
b8ba572629 | ||
|
|
47deeb53c3 | ||
|
|
1b81a7c755 | ||
|
|
793d056d81 | ||
|
|
8f1b3eb8ba | ||
|
|
73ee6e272a | ||
|
|
f466b010e4 | ||
|
|
f8965e530f | ||
|
|
5e7b66da90 | ||
|
|
701d283f69 | ||
|
|
1bc4a48d53 | ||
|
|
47c1a64cc2 | ||
|
|
cf9cf4e7dd | ||
|
|
0a79e1c5fd | ||
|
|
ac532ca4b9 | ||
|
|
694f701194 | ||
|
|
aa2c2c1ad2 | ||
|
|
bd425331f1 | ||
|
|
0e53c540d4 | ||
|
|
e48aec921e | ||
|
|
d754c2349c | ||
|
|
870f8265b3 | ||
|
|
ba91c9f736 | ||
|
|
e5368f3857 | ||
|
|
c73c6fe5c3 | ||
|
|
9bef383df2 | ||
|
|
2dc038b6c0 | ||
|
|
cd6deb87c3 | ||
|
|
1999ba38d9 | ||
|
|
e8fa996c2f | ||
|
|
e22d2c848a | ||
|
|
9471fd6b58 | ||
|
|
c4bbfd5050 | ||
|
|
08639bb1f0 | ||
|
|
4d99ae27c9 | ||
|
|
64ff161323 | ||
|
|
2b5b93a0f7 | ||
|
|
79cc08787b | ||
|
|
b740a6edc0 | ||
|
|
c5946927ea | ||
|
|
30086357bc | ||
|
|
e090195e57 | ||
|
|
d2bf0af3cd | ||
|
|
4413366ea7 | ||
|
|
3d05c26f26 | ||
|
|
c736d401a6 | ||
|
|
e8bc83445a | ||
|
|
8de88395f1 | ||
|
|
82cf0bcde7 | ||
|
|
089e7aae88 | ||
|
|
74e6a6a43a | ||
|
|
433b76b539 | ||
|
|
1ad6c76f9c | ||
|
|
104928c614 | ||
|
|
0726a00fb7 | ||
|
|
ac8ef9bdb2 | ||
|
|
519ad94ec9 | ||
|
|
505320fcd3 | ||
|
|
6f1578239a | ||
|
|
79319ad1a7 | ||
|
|
afb66f75ec | ||
|
|
59ec61ef98 | ||
|
|
d7077b5161 | ||
|
|
475c5a5cc3 | ||
|
|
f5a07f1a35 | ||
|
|
86d5cfe60b | ||
|
|
602f887623 | ||
|
|
1edde778c5 | ||
|
|
3526986f98 | ||
|
|
04c4340ee3 | ||
|
|
9fa62c03f6 | ||
|
|
d5dc687484 | ||
|
|
fb5ce0a16d | ||
|
|
a1f17ca797 | ||
|
|
8fdfd75cc4 | ||
|
|
5b5b2043e8 | ||
|
|
7d83f1db05 | ||
|
|
f07696e3c1 | ||
|
|
96a173a85f | ||
|
|
9715ea5313 | ||
|
|
ef022720d5 | ||
|
|
4ddb206f86 | ||
|
|
91f34966c8 | ||
|
|
11a69170b5 | ||
|
|
0675a41e42 | ||
|
|
56ce1a0c1c | ||
|
|
7fbe135ec8 | ||
|
|
eb6a0b34e1 | ||
|
|
1e3236a041 | ||
|
|
160a622ba4 | ||
|
|
e2a226dc49 | ||
|
|
5047e99fd1 | ||
|
|
c80d357149 | ||
|
|
20d39f6d44 | ||
|
|
d5b82c01e0 | ||
|
|
69b8d96516 | ||
|
|
67af77e179 | ||
|
|
2a92970a5f | ||
|
|
9052ee7b95 | ||
|
|
c783f64b33 | ||
|
|
055a231aed | ||
|
|
417d7732af | ||
|
|
f16a398a8e | ||
|
|
e8bbd945f2 | ||
|
|
d1730d7b1d | ||
|
|
8ea64327a1 | ||
|
|
3cf30c22fb | ||
|
|
05c670eef9 | ||
|
|
f6a4b036c7 | ||
|
|
c43924cd4e | ||
|
|
e3846c22bd | ||
|
|
9a7a838418 | ||
|
|
d61d815208 |
@@ -27,7 +27,7 @@
|
||||
!autogpt_platform/frontend/src/
|
||||
!autogpt_platform/frontend/public/
|
||||
!autogpt_platform/frontend/package.json
|
||||
!autogpt_platform/frontend/yarn.lock
|
||||
!autogpt_platform/frontend/pnpm-lock.yaml
|
||||
!autogpt_platform/frontend/tsconfig.json
|
||||
!autogpt_platform/frontend/README.md
|
||||
## config
|
||||
|
||||
64
.github/dependabot.yml
vendored
64
.github/dependabot.yml
vendored
@@ -10,17 +10,19 @@ updates:
|
||||
commit-message:
|
||||
prefix: "chore(libs/deps)"
|
||||
prefix-development: "chore(libs/deps-dev)"
|
||||
ignore:
|
||||
- dependency-name: "poetry"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# backend (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
@@ -32,17 +34,19 @@ updates:
|
||||
commit-message:
|
||||
prefix: "chore(backend/deps)"
|
||||
prefix-development: "chore(backend/deps-dev)"
|
||||
ignore:
|
||||
- dependency-name: "poetry"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# frontend (Next.js project)
|
||||
- package-ecosystem: "npm"
|
||||
@@ -58,13 +62,13 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# infra (Terraform)
|
||||
- package-ecosystem: "terraform"
|
||||
@@ -81,14 +85,13 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
@@ -101,14 +104,13 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# Docker
|
||||
- package-ecosystem: "docker"
|
||||
@@ -121,16 +123,16 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: 'pip'
|
||||
- package-ecosystem: "pip"
|
||||
directory: "docs/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
@@ -142,10 +144,10 @@ updates:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
5
.github/labeler.yml
vendored
5
.github/labeler.yml
vendored
@@ -24,8 +24,9 @@ platform/frontend:
|
||||
|
||||
platform/backend:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: autogpt_platform/backend/**
|
||||
- all-globs-to-all-files: '!autogpt_platform/backend/backend/blocks/**'
|
||||
- all-globs-to-any-file:
|
||||
- autogpt_platform/backend/**
|
||||
- '!autogpt_platform/backend/backend/blocks/**'
|
||||
|
||||
platform/blocks:
|
||||
- changed-files:
|
||||
|
||||
47
.github/workflows/claude.yml
vendored
Normal file
47
.github/workflows/claude.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: Claude Code
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened, assigned]
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
|
||||
jobs:
|
||||
claude:
|
||||
if: |
|
||||
(
|
||||
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
|
||||
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
|
||||
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
|
||||
) && (
|
||||
github.event.comment.author_association == 'OWNER' ||
|
||||
github.event.comment.author_association == 'MEMBER' ||
|
||||
github.event.comment.author_association == 'COLLABORATOR' ||
|
||||
github.event.review.author_association == 'OWNER' ||
|
||||
github.event.review.author_association == 'MEMBER' ||
|
||||
github.event.review.author_association == 'COLLABORATOR' ||
|
||||
github.event.issue.author_association == 'OWNER' ||
|
||||
github.event.issue.author_association == 'MEMBER' ||
|
||||
github.event.issue.author_association == 'COLLABORATOR'
|
||||
)
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
issues: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Run Claude Code
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@beta
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
6
.github/workflows/platform-backend-ci.yml
vendored
6
.github/workflows/platform-backend-ci.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
python-version: ["3.11"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
@@ -81,12 +81,12 @@ jobs:
|
||||
- name: Install Poetry (Unix)
|
||||
run: |
|
||||
# Extract Poetry version from backend/poetry.lock
|
||||
HEAD_POETRY_VERSION=$(head -n 1 poetry.lock | grep -oP '(?<=Poetry )[0-9]+\.[0-9]+\.[0-9]+')
|
||||
HEAD_POETRY_VERSION=$(python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry)
|
||||
echo "Found Poetry version ${HEAD_POETRY_VERSION} in backend/poetry.lock"
|
||||
|
||||
if [ -n "$BASE_REF" ]; then
|
||||
BASE_BRANCH=${BASE_REF/refs\/heads\//}
|
||||
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | head -n 1 | grep -oP '(?<=Poetry )[0-9]+\.[0-9]+\.[0-9]+')
|
||||
BASE_POETRY_VERSION=$((git show "origin/$BASE_BRANCH":./poetry.lock; true) | python ../../.github/workflows/scripts/get_package_version_from_lockfile.py poetry -)
|
||||
echo "Found Poetry version ${BASE_POETRY_VERSION} in backend/poetry.lock on ${BASE_REF}"
|
||||
POETRY_VERSION=$(printf '%s\n' "$HEAD_POETRY_VERSION" "$BASE_POETRY_VERSION" | sort -V | tail -n1)
|
||||
else
|
||||
|
||||
198
.github/workflows/platform-dev-deploy-event-dispatcher.yml
vendored
Normal file
198
.github/workflows/platform-dev-deploy-event-dispatcher.yml
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
name: AutoGPT Platform - Dev Deploy PR Event Dispatcher
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check comment permissions and deployment status
|
||||
id: check_status
|
||||
if: github.event_name == 'issue_comment' && github.event.issue.pull_request
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const commentBody = context.payload.comment.body.trim();
|
||||
const commentUser = context.payload.comment.user.login;
|
||||
const prAuthor = context.payload.issue.user.login;
|
||||
const authorAssociation = context.payload.comment.author_association;
|
||||
|
||||
// Check permissions
|
||||
const hasPermission = (
|
||||
authorAssociation === 'OWNER' ||
|
||||
authorAssociation === 'MEMBER' ||
|
||||
authorAssociation === 'COLLABORATOR'
|
||||
);
|
||||
|
||||
core.setOutput('comment_body', commentBody);
|
||||
core.setOutput('has_permission', hasPermission);
|
||||
|
||||
if (!hasPermission && (commentBody === '!deploy' || commentBody === '!undeploy')) {
|
||||
core.setOutput('permission_denied', 'true');
|
||||
return;
|
||||
}
|
||||
|
||||
if (commentBody !== '!deploy' && commentBody !== '!undeploy') {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process deploy command
|
||||
if (commentBody === '!deploy') {
|
||||
core.setOutput('should_deploy', 'true');
|
||||
}
|
||||
// Process undeploy command
|
||||
else if (commentBody === '!undeploy') {
|
||||
core.setOutput('should_undeploy', 'true');
|
||||
}
|
||||
|
||||
- name: Post permission denied comment
|
||||
if: steps.check_status.outputs.permission_denied == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `❌ **Permission denied**: Only the repository owners, members, or collaborators can use deployment commands.`
|
||||
});
|
||||
|
||||
- name: Get PR details for deployment
|
||||
id: pr_details
|
||||
if: steps.check_status.outputs.should_deploy == 'true' || steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const pr = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: context.issue.number
|
||||
});
|
||||
core.setOutput('pr_number', pr.data.number);
|
||||
core.setOutput('pr_title', pr.data.title);
|
||||
core.setOutput('pr_state', pr.data.state);
|
||||
|
||||
- name: Dispatch Deploy Event
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "deploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post deploy success comment
|
||||
if: steps.check_status.outputs.should_deploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🚀 **Deploying PR #${{ steps.pr_details.outputs.pr_number }}** to development environment...`
|
||||
});
|
||||
|
||||
- name: Dispatch Undeploy Event (from comment)
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ steps.pr_details.outputs.pr_number }}",
|
||||
"pr_title": "${{ steps.pr_details.outputs.pr_title }}",
|
||||
"pr_state": "${{ steps.pr_details.outputs.pr_state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post undeploy success comment
|
||||
if: steps.check_status.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🗑️ **Undeploying PR #${{ steps.pr_details.outputs.pr_number }}** from development environment...`
|
||||
});
|
||||
|
||||
- name: Check deployment status on PR close
|
||||
id: check_pr_close
|
||||
if: github.event_name == 'pull_request' && github.event.action == 'closed'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const comments = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number
|
||||
});
|
||||
|
||||
let lastDeployIndex = -1;
|
||||
let lastUndeployIndex = -1;
|
||||
|
||||
comments.data.forEach((comment, index) => {
|
||||
if (comment.body.trim() === '!deploy') {
|
||||
lastDeployIndex = index;
|
||||
} else if (comment.body.trim() === '!undeploy') {
|
||||
lastUndeployIndex = index;
|
||||
}
|
||||
});
|
||||
|
||||
// Should undeploy if there's a !deploy without a subsequent !undeploy
|
||||
const shouldUndeploy = lastDeployIndex !== -1 && lastDeployIndex > lastUndeployIndex;
|
||||
core.setOutput('should_undeploy', shouldUndeploy);
|
||||
|
||||
- name: Dispatch Undeploy Event (PR closed with active deployment)
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DISPATCH_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: pr-event
|
||||
client-payload: |
|
||||
{
|
||||
"action": "undeploy",
|
||||
"pr_number": "${{ github.event.pull_request.number }}",
|
||||
"pr_title": "${{ github.event.pull_request.title }}",
|
||||
"pr_state": "${{ github.event.pull_request.state }}",
|
||||
"repo": "${{ github.repository }}"
|
||||
}
|
||||
|
||||
- name: Post PR close undeploy comment
|
||||
if: >-
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.action == 'closed' &&
|
||||
steps.check_pr_close.outputs.should_undeploy == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `🧹 **Auto-undeploying**: PR closed with active deployment. Cleaning up development environment for PR #${{ github.event.pull_request.number }}.`
|
||||
});
|
||||
44
.github/workflows/platform-frontend-ci.yml
vendored
44
.github/workflows/platform-frontend-ci.yml
vendored
@@ -29,13 +29,14 @@ jobs:
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
yarn lint
|
||||
run: pnpm lint
|
||||
|
||||
type-check:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -48,13 +49,14 @@ jobs:
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Run tsc check
|
||||
run: |
|
||||
yarn type-check
|
||||
run: pnpm type-check
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -74,6 +76,9 @@ jobs:
|
||||
with:
|
||||
node-version: "21"
|
||||
|
||||
- name: Enable corepack
|
||||
run: corepack enable
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
@@ -93,25 +98,24 @@ jobs:
|
||||
docker compose -f ../docker-compose.yml up -d
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
yarn install --frozen-lockfile
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Setup Builder .env
|
||||
run: |
|
||||
cp .env.example .env
|
||||
- name: Setup .env
|
||||
run: cp .env.example .env
|
||||
|
||||
- name: Build frontend
|
||||
run: pnpm build --turbo
|
||||
# uses Turbopack, much faster and safe enough for a test pipeline
|
||||
|
||||
- name: Install Browser '${{ matrix.browser }}'
|
||||
run: yarn playwright install --with-deps ${{ matrix.browser }}
|
||||
run: pnpm playwright install --with-deps ${{ matrix.browser }}
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
yarn test --project=${{ matrix.browser }}
|
||||
- name: Run Playwright tests
|
||||
run: pnpm test:no-build --project=${{ matrix.browser }}
|
||||
|
||||
- name: Print Final Docker Compose logs
|
||||
if: always()
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml logs
|
||||
run: docker compose -f ../docker-compose.yml logs
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
@@ -16,7 +16,7 @@ jobs:
|
||||
# operations-per-run: 5000
|
||||
stale-issue-message: >
|
||||
This issue has automatically been marked as _stale_ because it has not had
|
||||
any activity in the last 50 days. You can _unstale_ it by commenting or
|
||||
any activity in the last 170 days. You can _unstale_ it by commenting or
|
||||
removing the label. Otherwise, this issue will be closed in 10 days.
|
||||
stale-pr-message: >
|
||||
This pull request has automatically been marked as _stale_ because it has
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
close-issue-message: >
|
||||
This issue was closed automatically because it has been stale for 10 days
|
||||
with no activity.
|
||||
days-before-stale: 100
|
||||
days-before-stale: 170
|
||||
days-before-close: 10
|
||||
# Do not touch meta issues:
|
||||
exempt-issue-labels: meta,fridge,project management
|
||||
|
||||
60
.github/workflows/scripts/get_package_version_from_lockfile.py
vendored
Normal file
60
.github/workflows/scripts/get_package_version_from_lockfile.py
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python3
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
print("Python version 3.11 or higher required")
|
||||
sys.exit(1)
|
||||
|
||||
import tomllib
|
||||
|
||||
|
||||
def get_package_version(package_name: str, lockfile_path: str) -> str | None:
|
||||
"""Extract package version from poetry.lock file."""
|
||||
try:
|
||||
if lockfile_path == "-":
|
||||
data = tomllib.load(sys.stdin.buffer)
|
||||
else:
|
||||
with open(lockfile_path, "rb") as f:
|
||||
data = tomllib.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"Error: File '{lockfile_path}' not found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except tomllib.TOMLDecodeError as e:
|
||||
print(f"Error parsing TOML file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(f"Error reading file: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Look for the package in the packages list
|
||||
packages = data.get("package", [])
|
||||
for package in packages:
|
||||
if package.get("name", "").lower() == package_name.lower():
|
||||
return package.get("version")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) not in (2, 3):
|
||||
print(
|
||||
"Usages: python get_package_version_from_lockfile.py <package name> [poetry.lock path]\n"
|
||||
" cat poetry.lock | python get_package_version_from_lockfile.py <package name> -",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
package_name = sys.argv[1]
|
||||
lockfile_path = sys.argv[2] if len(sys.argv) == 3 else "poetry.lock"
|
||||
|
||||
version = get_package_version(package_name, lockfile_path)
|
||||
|
||||
if version:
|
||||
print(version)
|
||||
else:
|
||||
print(f"Package '{package_name}' not found in {lockfile_path}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -176,3 +176,4 @@ autogpt_platform/backend/settings.py
|
||||
|
||||
*.ign.*
|
||||
.test-contents
|
||||
.claude/settings.local.json
|
||||
|
||||
@@ -17,7 +17,7 @@ repos:
|
||||
name: Detect secrets
|
||||
description: Detects high entropy strings that are likely to be passwords.
|
||||
files: ^autogpt_platform/
|
||||
stages: [push]
|
||||
stages: [pre-push]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, all dependencies need to be up-to-date.
|
||||
@@ -235,44 +235,44 @@ repos:
|
||||
hooks:
|
||||
- id: tsc
|
||||
name: Typecheck - AutoGPT Platform - Frontend
|
||||
entry: bash -c 'cd autogpt_platform/frontend && npm run type-check'
|
||||
entry: bash -c 'cd autogpt_platform/frontend && pnpm type-check'
|
||||
files: ^autogpt_platform/frontend/
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest
|
||||
name: Run tests - AutoGPT Platform - Backend
|
||||
alias: pytest-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
|
||||
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
# - repo: local
|
||||
# hooks:
|
||||
# - id: pytest
|
||||
# name: Run tests - AutoGPT Platform - Backend
|
||||
# alias: pytest-platform-backend
|
||||
# entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
|
||||
# # include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
|
||||
# files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - AutoGPT (excl. slow tests)
|
||||
alias: pytest-classic-autogpt
|
||||
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
# - id: pytest
|
||||
# name: Run tests - Classic - AutoGPT (excl. slow tests)
|
||||
# alias: pytest-classic-autogpt
|
||||
# entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# # include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
# files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Forge (excl. slow tests)
|
||||
alias: pytest-classic-forge
|
||||
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
# - id: pytest
|
||||
# name: Run tests - Classic - Forge (excl. slow tests)
|
||||
# alias: pytest-classic-forge
|
||||
# entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
# files: ^classic/forge/(forge/|tests/|poetry\.lock$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Benchmark
|
||||
alias: pytest-classic-benchmark
|
||||
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
# - id: pytest
|
||||
# name: Run tests - Classic - Benchmark
|
||||
# alias: pytest-classic-benchmark
|
||||
# entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
# files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
# language: system
|
||||
# pass_filenames: false
|
||||
|
||||
6
.vscode/launch.json
vendored
6
.vscode/launch.json
vendored
@@ -32,9 +32,9 @@
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "backend.app",
|
||||
// "env": {
|
||||
// "ENV": "dev"
|
||||
// },
|
||||
"env": {
|
||||
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY": "YES"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/backend/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/backend"
|
||||
|
||||
53
AGENTS.md
Normal file
53
AGENTS.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# AutoGPT Platform Contribution Guide
|
||||
|
||||
This guide provides context for Codex when updating the **autogpt_platform** folder.
|
||||
|
||||
## Directory overview
|
||||
|
||||
- `autogpt_platform/backend` – FastAPI based backend service.
|
||||
- `autogpt_platform/autogpt_libs` – Shared Python libraries.
|
||||
- `autogpt_platform/frontend` – Next.js + Typescript frontend.
|
||||
- `autogpt_platform/docker-compose.yml` – development stack.
|
||||
|
||||
See `docs/content/platform/getting-started.md` for setup instructions.
|
||||
|
||||
## Code style
|
||||
|
||||
- Format Python code with `poetry run format`.
|
||||
- Format frontend code using `pnpm format`.
|
||||
|
||||
## Testing
|
||||
|
||||
- Backend: `poetry run test` (runs pytest with a docker based postgres + prisma).
|
||||
- Frontend: `pnpm test` or `pnpm test-ui` for Playwright tests. See `docs/content/platform/contributing/tests.md` for tips.
|
||||
|
||||
Always run the relevant linters and tests before committing.
|
||||
Use conventional commit messages for all commits (e.g. `feat(backend): add API`).
|
||||
Types:
|
||||
- feat
|
||||
- fix
|
||||
- refactor
|
||||
- ci
|
||||
- dx (developer experience)
|
||||
Scopes:
|
||||
- platform
|
||||
- platform/library
|
||||
- platform/marketplace
|
||||
- backend
|
||||
- backend/executor
|
||||
- frontend
|
||||
- frontend/library
|
||||
- frontend/marketplace
|
||||
- blocks
|
||||
|
||||
## Pull requests
|
||||
|
||||
- Use the template in `.github/PULL_REQUEST_TEMPLATE.md`.
|
||||
- Rely on the pre-commit checks for linting and formatting
|
||||
- Fill out the **Changes** section and the checklist.
|
||||
- Use conventional commit titles with a scope (e.g. `feat(frontend): add feature`).
|
||||
- Keep out-of-scope changes under 20% of the PR.
|
||||
- Ensure PR descriptions are complete.
|
||||
- For changes touching `data/*.py`, validate user ID checks or explain why not needed.
|
||||
- If adding protected frontend routes, update `frontend/lib/supabase/middleware.ts`.
|
||||
- Use the linear ticket branch structure if given codex/open-1668-resume-dropped-runs
|
||||
31
README.md
31
README.md
@@ -15,8 +15,35 @@
|
||||
> Setting up and hosting the AutoGPT Platform yourself is a technical process.
|
||||
> If you'd rather something that just works, we recommend [joining the waitlist](https://bit.ly/3ZDijAI) for the cloud-hosted beta.
|
||||
|
||||
### System Requirements
|
||||
|
||||
Before proceeding with the installation, ensure your system meets the following requirements:
|
||||
|
||||
#### Hardware Requirements
|
||||
- CPU: 4+ cores recommended
|
||||
- RAM: Minimum 8GB, 16GB recommended
|
||||
- Storage: At least 10GB of free space
|
||||
|
||||
#### Software Requirements
|
||||
- Operating Systems:
|
||||
- Linux (Ubuntu 20.04 or newer recommended)
|
||||
- macOS (10.15 or newer)
|
||||
- Windows 10/11 with WSL2
|
||||
- Required Software (with minimum versions):
|
||||
- Docker Engine (20.10.0 or newer)
|
||||
- Docker Compose (2.0.0 or newer)
|
||||
- Git (2.30 or newer)
|
||||
- Node.js (16.x or newer)
|
||||
- npm (8.x or newer)
|
||||
- VSCode (1.60 or newer) or any modern code editor
|
||||
|
||||
#### Network Requirements
|
||||
- Stable internet connection
|
||||
- Access to required ports (will be configured in Docker)
|
||||
- Ability to make outbound HTTPS connections
|
||||
|
||||
### Updated Setup Instructions:
|
||||
We’ve moved to a fully maintained and regularly updated documentation site.
|
||||
We've moved to a fully maintained and regularly updated documentation site.
|
||||
|
||||
👉 [Follow the official self-hosting guide here](https://docs.agpt.co/platform/getting-started/)
|
||||
|
||||
@@ -152,7 +179,7 @@ Just clone the repo, install dependencies with `./run setup`, and you should be
|
||||
|
||||
[](https://discord.gg/autogpt)
|
||||
|
||||
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn’t created an issue for the same topic.
|
||||
To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn't created an issue for the same topic.
|
||||
|
||||
## 🤝 Sister projects
|
||||
|
||||
|
||||
132
autogpt_platform/CLAUDE.md
Normal file
132
autogpt_platform/CLAUDE.md
Normal file
@@ -0,0 +1,132 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Repository Overview
|
||||
|
||||
AutoGPT Platform is a monorepo containing:
|
||||
- **Backend** (`/backend`): Python FastAPI server with async support
|
||||
- **Frontend** (`/frontend`): Next.js React application
|
||||
- **Shared Libraries** (`/autogpt_libs`): Common Python utilities
|
||||
|
||||
## Essential Commands
|
||||
|
||||
### Backend Development
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd backend && poetry install
|
||||
|
||||
# Run database migrations
|
||||
poetry run prisma migrate dev
|
||||
|
||||
# Start all services (database, redis, rabbitmq)
|
||||
docker compose up -d
|
||||
|
||||
# Run the backend server
|
||||
poetry run serve
|
||||
|
||||
# Run tests
|
||||
poetry run test
|
||||
|
||||
# Run specific test
|
||||
poetry run pytest path/to/test_file.py::test_function_name
|
||||
|
||||
# Lint and format
|
||||
poetry run format # Black + isort
|
||||
poetry run lint # ruff
|
||||
```
|
||||
More details can be found in TESTING.md
|
||||
|
||||
#### Creating/Updating Snapshots
|
||||
|
||||
When you first write a test or when the expected output changes:
|
||||
|
||||
```bash
|
||||
poetry run pytest path/to/test.py --snapshot-update
|
||||
```
|
||||
|
||||
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
|
||||
|
||||
|
||||
### Frontend Development
|
||||
```bash
|
||||
# Install dependencies
|
||||
cd frontend && npm install
|
||||
|
||||
# Start development server
|
||||
npm run dev
|
||||
|
||||
# Run E2E tests
|
||||
npm run test
|
||||
|
||||
# Run Storybook for component development
|
||||
npm run storybook
|
||||
|
||||
# Build production
|
||||
npm run build
|
||||
|
||||
# Type checking
|
||||
npm run type-check
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Backend Architecture
|
||||
- **API Layer**: FastAPI with REST and WebSocket endpoints
|
||||
- **Database**: PostgreSQL with Prisma ORM, includes pgvector for embeddings
|
||||
- **Queue System**: RabbitMQ for async task processing
|
||||
- **Execution Engine**: Separate executor service processes agent workflows
|
||||
- **Authentication**: JWT-based with Supabase integration
|
||||
|
||||
### Frontend Architecture
|
||||
- **Framework**: Next.js App Router with React Server Components
|
||||
- **State Management**: React hooks + Supabase client for real-time updates
|
||||
- **Workflow Builder**: Visual graph editor using @xyflow/react
|
||||
- **UI Components**: Radix UI primitives with Tailwind CSS styling
|
||||
- **Feature Flags**: LaunchDarkly integration
|
||||
|
||||
### Key Concepts
|
||||
1. **Agent Graphs**: Workflow definitions stored as JSON, executed by the backend
|
||||
2. **Blocks**: Reusable components in `/backend/blocks/` that perform specific tasks
|
||||
3. **Integrations**: OAuth and API connections stored per user
|
||||
4. **Store**: Marketplace for sharing agent templates
|
||||
|
||||
### Testing Approach
|
||||
- Backend uses pytest with snapshot testing for API responses
|
||||
- Test files are colocated with source files (`*_test.py`)
|
||||
- Frontend uses Playwright for E2E tests
|
||||
- Component testing via Storybook
|
||||
|
||||
### Database Schema
|
||||
Key models (defined in `/backend/schema.prisma`):
|
||||
- `User`: Authentication and profile data
|
||||
- `AgentGraph`: Workflow definitions with version control
|
||||
- `AgentGraphExecution`: Execution history and results
|
||||
- `AgentNode`: Individual nodes in a workflow
|
||||
- `StoreListing`: Marketplace listings for sharing agents
|
||||
|
||||
### Environment Configuration
|
||||
- Backend: `.env` file in `/backend`
|
||||
- Frontend: `.env.local` file in `/frontend`
|
||||
- Both require Supabase credentials and API keys for various services
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
**Adding a new block:**
|
||||
1. Create new file in `/backend/backend/blocks/`
|
||||
2. Inherit from `Block` base class
|
||||
3. Define input/output schemas
|
||||
4. Implement `run` method
|
||||
5. Register in block registry
|
||||
|
||||
**Modifying the API:**
|
||||
1. Update route in `/backend/backend/server/routers/`
|
||||
2. Add/update Pydantic models in same directory
|
||||
3. Write tests alongside the route file
|
||||
4. Run `poetry run test` to verify
|
||||
|
||||
**Frontend feature development:**
|
||||
1. Components go in `/frontend/src/components/`
|
||||
2. Use existing UI components from `/frontend/src/components/ui/`
|
||||
3. Add Storybook stories for new components
|
||||
4. Test with Playwright if user-facing
|
||||
@@ -15,44 +15,57 @@ Welcome to the AutoGPT Platform - a powerful system for creating and running AI
|
||||
To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
1. Clone this repository to your local machine and navigate to the `autogpt_platform` directory within the repository:
|
||||
|
||||
```
|
||||
git clone <https://github.com/Significant-Gravitas/AutoGPT.git | git@github.com:Significant-Gravitas/AutoGPT.git>
|
||||
cd AutoGPT/autogpt_platform
|
||||
```
|
||||
|
||||
2. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
3. Run the following command:
|
||||
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
4. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
|
||||
```
|
||||
cd frontend
|
||||
```
|
||||
|
||||
You will need to run your frontend application separately on your local machine.
|
||||
|
||||
5. Run the following command:
|
||||
5. Run the following command:
|
||||
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
6. Run the following command:
|
||||
|
||||
Enable corepack and install dependencies by running:
|
||||
|
||||
```
|
||||
npm install
|
||||
npm run dev
|
||||
corepack enable
|
||||
pnpm i
|
||||
```
|
||||
This command will install the necessary dependencies and start the frontend application in development mode.
|
||||
If you are using Yarn, you can run the following commands instead:
|
||||
|
||||
Then start the frontend application in development mode:
|
||||
|
||||
```
|
||||
yarn install && yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
@@ -68,43 +81,52 @@ Here are some useful Docker Compose commands for managing your AutoGPT Platform:
|
||||
- `docker compose down`: Stop and remove containers, networks, and volumes.
|
||||
- `docker compose watch`: Watch for changes in your services and automatically update them.
|
||||
|
||||
|
||||
### Sample Scenarios
|
||||
|
||||
Here are some common scenarios where you might use multiple Docker Compose commands:
|
||||
|
||||
1. Updating and restarting a specific service:
|
||||
|
||||
```
|
||||
docker compose build api_srv
|
||||
docker compose up -d --no-deps api_srv
|
||||
```
|
||||
|
||||
This rebuilds the `api_srv` service and restarts it without affecting other services.
|
||||
|
||||
2. Viewing logs for troubleshooting:
|
||||
|
||||
```
|
||||
docker compose logs -f api_srv ws_srv
|
||||
```
|
||||
|
||||
This shows and follows the logs for both `api_srv` and `ws_srv` services.
|
||||
|
||||
3. Scaling a service for increased load:
|
||||
|
||||
```
|
||||
docker compose up -d --scale executor=3
|
||||
```
|
||||
|
||||
This scales the `executor` service to 3 instances to handle increased load.
|
||||
|
||||
4. Stopping the entire system for maintenance:
|
||||
|
||||
```
|
||||
docker compose stop
|
||||
docker compose rm -f
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This stops all services, removes containers, pulls the latest images, and restarts the system.
|
||||
|
||||
5. Developing with live updates:
|
||||
|
||||
```
|
||||
docker compose watch
|
||||
```
|
||||
|
||||
This watches for changes in your code and automatically updates the relevant services.
|
||||
|
||||
6. Checking the status of services:
|
||||
@@ -115,7 +137,6 @@ Here are some common scenarios where you might use multiple Docker Compose comma
|
||||
|
||||
These scenarios demonstrate how to use Docker Compose commands in combination to manage your AutoGPT Platform effectively.
|
||||
|
||||
|
||||
### Persisting Data
|
||||
|
||||
To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml` file to add volumes. Here's how:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# AutoGPT Libs
|
||||
|
||||
This is a new project to store shared functionality across different services in NextGen AutoGPT (e.g. authentication)
|
||||
This is a new project to store shared functionality across different services in the AutoGPT Platform (e.g. authentication)
|
||||
|
||||
@@ -31,4 +31,5 @@ class APIKeyManager:
|
||||
"""Verify if a provided API key matches the stored hash."""
|
||||
if not provided_key.startswith(self.PREFIX):
|
||||
return False
|
||||
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
||||
provided_hash = hashlib.sha256(provided_key.encode()).hexdigest()
|
||||
return secrets.compare_digest(provided_hash, stored_hash)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import inspect
|
||||
import logging
|
||||
import secrets
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
from fastapi import HTTPException, Request, Security
|
||||
@@ -16,7 +17,7 @@ logger = logging.getLogger(__name__)
|
||||
async def auth_middleware(request: Request):
|
||||
if not settings.ENABLE_AUTH:
|
||||
# If authentication is disabled, allow the request to proceed
|
||||
logger.warn("Auth disabled")
|
||||
logger.warning("Auth disabled")
|
||||
return {}
|
||||
|
||||
security = HTTPBearer()
|
||||
@@ -93,7 +94,11 @@ class APIKeyValidator:
|
||||
self.error_message = error_message
|
||||
|
||||
async def default_validator(self, api_key: str) -> bool:
|
||||
return api_key == self.expected_token
|
||||
if not self.expected_token:
|
||||
raise ValueError(
|
||||
"Expected Token Required to be set when uisng API Key Validator default validation"
|
||||
)
|
||||
return secrets.compare_digest(api_key, self.expected_token)
|
||||
|
||||
async def __call__(
|
||||
self, request: Request, api_key: str = Security(APIKeyHeader)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import inspect
|
||||
import threading
|
||||
from typing import Any, Awaitable, Callable, ParamSpec, TypeVar, cast, overload
|
||||
from typing import Awaitable, Callable, ParamSpec, TypeVar, cast, overload
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
@@ -19,41 +19,41 @@ def thread_cached(
|
||||
) -> Callable[P, R] | Callable[P, Awaitable[R]]:
|
||||
thread_local = threading.local()
|
||||
|
||||
def _clear():
|
||||
if hasattr(thread_local, "cache"):
|
||||
del thread_local.cache
|
||||
|
||||
if inspect.iscoroutinefunction(func):
|
||||
|
||||
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
cache = getattr(thread_local, "cache", None)
|
||||
if cache is None:
|
||||
cache = thread_local.cache = {}
|
||||
key = (func, args, tuple(sorted(kwargs.items())))
|
||||
key = (args, tuple(sorted(kwargs.items())))
|
||||
if key not in cache:
|
||||
cache[key] = await cast(Callable[P, Awaitable[R]], func)(
|
||||
*args, **kwargs
|
||||
)
|
||||
return cache[key]
|
||||
|
||||
setattr(async_wrapper, "clear_cache", _clear)
|
||||
return async_wrapper
|
||||
|
||||
else:
|
||||
|
||||
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
cache = getattr(thread_local, "cache", None)
|
||||
if cache is None:
|
||||
cache = thread_local.cache = {}
|
||||
# Include function in the key to prevent collisions between different functions
|
||||
key = (func, args, tuple(sorted(kwargs.items())))
|
||||
key = (args, tuple(sorted(kwargs.items())))
|
||||
if key not in cache:
|
||||
cache[key] = func(*args, **kwargs)
|
||||
return cache[key]
|
||||
|
||||
setattr(sync_wrapper, "clear_cache", _clear)
|
||||
return sync_wrapper
|
||||
|
||||
|
||||
def clear_thread_cache(func: Callable[..., Any]) -> None:
|
||||
"""Clear the cache for a thread-cached function."""
|
||||
thread_local = threading.local()
|
||||
cache = getattr(thread_local, "cache", None)
|
||||
if cache is not None:
|
||||
# Clear all entries that match the function
|
||||
for key in list(cache.keys()):
|
||||
if key and len(key) > 0 and key[0] == func:
|
||||
del cache[key]
|
||||
def clear_thread_cache(func: Callable) -> None:
|
||||
if clear := getattr(func, "clear_cache", None):
|
||||
clear()
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
from contextlib import contextmanager
|
||||
from threading import Lock
|
||||
import asyncio
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from expiringdict import ExpiringDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from redis import Redis
|
||||
from redis.lock import Lock as RedisLock
|
||||
from redis.asyncio import Redis as AsyncRedis
|
||||
from redis.asyncio.lock import Lock as AsyncRedisLock
|
||||
|
||||
|
||||
class RedisKeyedMutex:
|
||||
class AsyncRedisKeyedMutex:
|
||||
"""
|
||||
This class provides a mutex that can be locked and unlocked by a specific key,
|
||||
using Redis as a distributed locking provider.
|
||||
@@ -17,41 +17,45 @@ class RedisKeyedMutex:
|
||||
in case the key is not unlocked for a specified duration, to prevent memory leaks.
|
||||
"""
|
||||
|
||||
def __init__(self, redis: "Redis", timeout: int | None = 60):
|
||||
def __init__(self, redis: "AsyncRedis", timeout: int | None = 60):
|
||||
self.redis = redis
|
||||
self.timeout = timeout
|
||||
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
|
||||
self.locks: dict[Any, "AsyncRedisLock"] = ExpiringDict(
|
||||
max_len=6000, max_age_seconds=self.timeout
|
||||
)
|
||||
self.locks_lock = Lock()
|
||||
self.locks_lock = asyncio.Lock()
|
||||
|
||||
@contextmanager
|
||||
def locked(self, key: Any):
|
||||
lock = self.acquire(key)
|
||||
@asynccontextmanager
|
||||
async def locked(self, key: Any):
|
||||
lock = await self.acquire(key)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if lock.locked():
|
||||
lock.release()
|
||||
if (await lock.locked()) and (await lock.owned()):
|
||||
await lock.release()
|
||||
|
||||
def acquire(self, key: Any) -> "RedisLock":
|
||||
async def acquire(self, key: Any) -> "AsyncRedisLock":
|
||||
"""Acquires and returns a lock with the given key"""
|
||||
with self.locks_lock:
|
||||
async with self.locks_lock:
|
||||
if key not in self.locks:
|
||||
self.locks[key] = self.redis.lock(
|
||||
str(key), self.timeout, thread_local=False
|
||||
)
|
||||
lock = self.locks[key]
|
||||
lock.acquire()
|
||||
await lock.acquire()
|
||||
return lock
|
||||
|
||||
def release(self, key: Any):
|
||||
if (lock := self.locks.get(key)) and lock.locked() and lock.owned():
|
||||
lock.release()
|
||||
async def release(self, key: Any):
|
||||
if (
|
||||
(lock := self.locks.get(key))
|
||||
and (await lock.locked())
|
||||
and (await lock.owned())
|
||||
):
|
||||
await lock.release()
|
||||
|
||||
def release_all_locks(self):
|
||||
async def release_all_locks(self):
|
||||
"""Call this on process termination to ensure all locks are released"""
|
||||
self.locks_lock.acquire(blocking=False)
|
||||
for lock in self.locks.values():
|
||||
if lock.locked() and lock.owned():
|
||||
lock.release()
|
||||
async with self.locks_lock:
|
||||
for lock in self.locks.values():
|
||||
if (await lock.locked()) and (await lock.owned()):
|
||||
await lock.release()
|
||||
|
||||
440
autogpt_platform/autogpt_libs/poetry.lock
generated
440
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -323,6 +323,21 @@ files = [
|
||||
{file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "click"
|
||||
version = "8.2.1"
|
||||
description = "Composable command line interface toolkit"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"},
|
||||
{file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
||||
|
||||
[[package]]
|
||||
name = "colorama"
|
||||
version = "0.4.6"
|
||||
@@ -399,6 +414,27 @@ files = [
|
||||
[package.extras]
|
||||
tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.115.12"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "fastapi-0.115.12-py3-none-any.whl", hash = "sha256:e94613d6c05e27be7ffebdd6ea5f388112e5e430c8f7d6494a9d1d88d43e814d"},
|
||||
{file = "fastapi-0.115.12.tar.gz", hash = "sha256:1e2c2a2646905f9e83d32f04a3f86aff4a286669c6c950ca95b5fd68c2602681"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0"
|
||||
starlette = ">=0.40.0,<0.47.0"
|
||||
typing-extensions = ">=4.8.0"
|
||||
|
||||
[package.extras]
|
||||
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
|
||||
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "frozenlist"
|
||||
version = "1.4.1"
|
||||
@@ -562,19 +598,19 @@ protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-audit-log"
|
||||
version = "0.3.0"
|
||||
version = "0.3.2"
|
||||
description = "Google Cloud Audit Protos"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "google_cloud_audit_log-0.3.0-py2.py3-none-any.whl", hash = "sha256:8340793120a1d5aa143605def8704ecdcead15106f754ef1381ae3bab533722f"},
|
||||
{file = "google_cloud_audit_log-0.3.0.tar.gz", hash = "sha256:901428b257020d8c1d1133e0fa004164a555e5a395c7ca3cdbb8486513df3a65"},
|
||||
{file = "google_cloud_audit_log-0.3.2-py3-none-any.whl", hash = "sha256:daaedfb947a0d77f524e1bd2b560242ab4836fe1afd6b06b92f152b9658554ed"},
|
||||
{file = "google_cloud_audit_log-0.3.2.tar.gz", hash = "sha256:2598f1533a7d7cdd6c7bf448c12e5519c1d53162d78784e10bcdd1df67791bc3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
googleapis-common-protos = ">=1.56.2,<2.0dev"
|
||||
protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev"
|
||||
googleapis-common-protos = ">=1.56.2,<2.0.0"
|
||||
protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-core"
|
||||
@@ -597,30 +633,30 @@ grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-logging"
|
||||
version = "3.11.4"
|
||||
version = "3.12.1"
|
||||
description = "Stackdriver Logging API client library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "google_cloud_logging-3.11.4-py2.py3-none-any.whl", hash = "sha256:1d465ac62df29fb94bba4d6b4891035e57d573d84541dd8a40eebbc74422b2f0"},
|
||||
{file = "google_cloud_logging-3.11.4.tar.gz", hash = "sha256:32305d989323f3c58603044e2ac5d9cf23e9465ede511bbe90b4309270d3195c"},
|
||||
{file = "google_cloud_logging-3.12.1-py2.py3-none-any.whl", hash = "sha256:6817878af76ec4e7568976772839ab2c43ddfd18fbbf2ce32b13ef549cd5a862"},
|
||||
{file = "google_cloud_logging-3.12.1.tar.gz", hash = "sha256:36efc823985055b203904e83e1c8f9f999b3c64270bcda39d57386ca4effd678"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]}
|
||||
google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev"
|
||||
google-cloud-appengine-logging = ">=0.1.3,<2.0.0dev"
|
||||
google-cloud-audit-log = ">=0.2.4,<1.0.0dev"
|
||||
google-cloud-core = ">=2.0.0,<3.0.0dev"
|
||||
grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev"
|
||||
google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0", extras = ["grpc"]}
|
||||
google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0"
|
||||
google-cloud-appengine-logging = ">=0.1.3,<2.0.0"
|
||||
google-cloud-audit-log = ">=0.3.1,<1.0.0"
|
||||
google-cloud-core = ">=2.0.0,<3.0.0"
|
||||
grpc-google-iam-v1 = ">=0.12.4,<1.0.0"
|
||||
opentelemetry-api = ">=1.9.0"
|
||||
proto-plus = [
|
||||
{version = ">=1.25.0,<2.0.0dev", markers = "python_version >= \"3.13\""},
|
||||
{version = ">=1.22.2,<2.0.0dev", markers = "python_version >= \"3.11\" and python_version < \"3.13\""},
|
||||
{version = ">=1.22.0,<2.0.0dev", markers = "python_version < \"3.11\""},
|
||||
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
|
||||
{version = ">=1.22.2,<2.0.0", markers = "python_version >= \"3.11\" and python_version < \"3.13\""},
|
||||
{version = ">=1.22.0,<2.0.0", markers = "python_version < \"3.11\""},
|
||||
]
|
||||
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev"
|
||||
protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "googleapis-common-protos"
|
||||
@@ -895,6 +931,47 @@ files = [
|
||||
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-eventsource"
|
||||
version = "1.2.4"
|
||||
description = "LaunchDarkly SSE Client"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "launchdarkly_eventsource-1.2.4-py3-none-any.whl", hash = "sha256:048ef8c4440d0d8219778661ee4d4b5e12aa6ed2c29a3004417ede44c2386e8c"},
|
||||
{file = "launchdarkly_eventsource-1.2.4.tar.gz", hash = "sha256:b8b9342681f55e1d35c56243431cbbaca4eb9812d6785f8de204af322104e066"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
urllib3 = ">=1.26.0,<3"
|
||||
|
||||
[[package]]
|
||||
name = "launchdarkly-server-sdk"
|
||||
version = "9.11.1"
|
||||
description = "LaunchDarkly SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "launchdarkly_server_sdk-9.11.1-py3-none-any.whl", hash = "sha256:128569cebf666dd115cc0ba03c48ff75f6acc9788301a7e2c3a54d06107e445a"},
|
||||
{file = "launchdarkly_server_sdk-9.11.1.tar.gz", hash = "sha256:150e29656cb8c506d1967f3c59e62b69310d345ec27217640a6146dd1db5d250"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = ">=2018.4.16"
|
||||
expiringdict = ">=1.1.4"
|
||||
launchdarkly-eventsource = ">=1.2.4,<2.0.0"
|
||||
pyRFC3339 = ">=1.0"
|
||||
semver = ">=2.10.2"
|
||||
urllib3 = ">=1.26.0,<3"
|
||||
|
||||
[package.extras]
|
||||
consul = ["python-consul (>=1.0.1)"]
|
||||
dynamodb = ["boto3 (>=1.9.71)"]
|
||||
redis = ["redis (>=2.10.5)"]
|
||||
test-filesource = ["pyyaml (>=5.3.1)", "watchdog (>=3.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "multidict"
|
||||
version = "6.1.0"
|
||||
@@ -1238,19 +1315,19 @@ pyasn1 = ">=0.4.6,<0.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.11.1"
|
||||
version = "2.11.4"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydantic-2.11.1-py3-none-any.whl", hash = "sha256:5b6c415eee9f8123a14d859be0c84363fec6b1feb6b688d6435801230b56e0b8"},
|
||||
{file = "pydantic-2.11.1.tar.gz", hash = "sha256:442557d2910e75c991c39f4b4ab18963d57b9b55122c8b2a9cd176d8c29ce968"},
|
||||
{file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
|
||||
{file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.6.0"
|
||||
pydantic-core = "2.33.0"
|
||||
pydantic-core = "2.33.2"
|
||||
typing-extensions = ">=4.12.2"
|
||||
typing-inspection = ">=0.4.0"
|
||||
|
||||
@@ -1260,111 +1337,111 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.33.0"
|
||||
version = "2.33.2"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71dffba8fe9ddff628c68f3abd845e91b028361d43c5f8e7b3f8b91d7d85413e"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:abaeec1be6ed535a5d7ffc2e6c390083c425832b20efd621562fbb5bff6dc518"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:759871f00e26ad3709efc773ac37b4d571de065f9dfb1778012908bcc36b3a73"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dcfebee69cd5e1c0b76a17e17e347c84b00acebb8dd8edb22d4a03e88e82a207"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b1262b912435a501fa04cd213720609e2cefa723a07c92017d18693e69bf00b"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4726f1f3f42d6a25678c67da3f0b10f148f5655813c5aca54b0d1742ba821b8f"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e790954b5093dff1e3a9a2523fddc4e79722d6f07993b4cd5547825c3cbf97b5"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:34e7fb3abe375b5c4e64fab75733d605dda0f59827752debc99c17cb2d5f3276"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ecb158fb9b9091b515213bed3061eb7deb1d3b4e02327c27a0ea714ff46b0760"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:4d9149e7528af8bbd76cc055967e6e04617dcb2a2afdaa3dea899406c5521faa"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e81a295adccf73477220e15ff79235ca9dcbcee4be459eb9d4ce9a2763b8386c"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-win32.whl", hash = "sha256:f22dab23cdbce2005f26a8f0c71698457861f97fc6318c75814a50c75e87d025"},
|
||||
{file = "pydantic_core-2.33.0-cp310-cp310-win_amd64.whl", hash = "sha256:9cb2390355ba084c1ad49485d18449b4242da344dea3e0fe10babd1f0db7dcfc"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a608a75846804271cf9c83e40bbb4dab2ac614d33c6fd5b0c6187f53f5c593ef"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e1c69aa459f5609dec2fa0652d495353accf3eda5bdb18782bc5a2ae45c9273a"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9ec80eb5a5f45a2211793f1c4aeddff0c3761d1c70d684965c1807e923a588b"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e925819a98318d17251776bd3d6aa9f3ff77b965762155bdad15d1a9265c4cfd"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bf68bb859799e9cec3d9dd8323c40c00a254aabb56fe08f907e437005932f2b"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1b2ea72dea0825949a045fa4071f6d5b3d7620d2a208335207793cf29c5a182d"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1583539533160186ac546b49f5cde9ffc928062c96920f58bd95de32ffd7bffd"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:23c3e77bf8a7317612e5c26a3b084c7edeb9552d645742a54a5867635b4f2453"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a7a7f2a3f628d2f7ef11cb6188bcf0b9e1558151d511b974dfea10a49afe192b"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:f1fb026c575e16f673c61c7b86144517705865173f3d0907040ac30c4f9f5915"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:635702b2fed997e0ac256b2cfbdb4dd0bf7c56b5d8fba8ef03489c03b3eb40e2"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-win32.whl", hash = "sha256:07b4ced28fccae3f00626eaa0c4001aa9ec140a29501770a88dbbb0966019a86"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-win_amd64.whl", hash = "sha256:4927564be53239a87770a5f86bdc272b8d1fbb87ab7783ad70255b4ab01aa25b"},
|
||||
{file = "pydantic_core-2.33.0-cp311-cp311-win_arm64.whl", hash = "sha256:69297418ad644d521ea3e1aa2e14a2a422726167e9ad22b89e8f1130d68e1e9a"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6c32a40712e3662bebe524abe8abb757f2fa2000028d64cc5a1006016c06af43"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8ec86b5baa36f0a0bfb37db86c7d52652f8e8aa076ab745ef7725784183c3fdd"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4deac83a8cc1d09e40683be0bc6d1fa4cde8df0a9bf0cda5693f9b0569ac01b6"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:175ab598fb457a9aee63206a1993874badf3ed9a456e0654273e56f00747bbd6"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f36afd0d56a6c42cf4e8465b6441cf546ed69d3a4ec92724cc9c8c61bd6ecf4"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a98257451164666afafc7cbf5fb00d613e33f7e7ebb322fbcd99345695a9a61"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecc6d02d69b54a2eb83ebcc6f29df04957f734bcf309d346b4f83354d8376862"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a69b7596c6603afd049ce7f3835bcf57dd3892fc7279f0ddf987bebed8caa5a"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ea30239c148b6ef41364c6f51d103c2988965b643d62e10b233b5efdca8c0099"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:abfa44cf2f7f7d7a199be6c6ec141c9024063205545aa09304349781b9a125e6"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20d4275f3c4659d92048c70797e5fdc396c6e4446caf517ba5cad2db60cd39d3"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-win32.whl", hash = "sha256:918f2013d7eadea1d88d1a35fd4a1e16aaf90343eb446f91cb091ce7f9b431a2"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-win_amd64.whl", hash = "sha256:aec79acc183865bad120b0190afac467c20b15289050648b876b07777e67ea48"},
|
||||
{file = "pydantic_core-2.33.0-cp312-cp312-win_arm64.whl", hash = "sha256:5461934e895968655225dfa8b3be79e7e927e95d4bd6c2d40edd2fa7052e71b6"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f00e8b59e1fc8f09d05594aa7d2b726f1b277ca6155fc84c0396db1b373c4555"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a73be93ecef45786d7d95b0c5e9b294faf35629d03d5b145b09b81258c7cd6d"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff48a55be9da6930254565ff5238d71d5e9cd8c5487a191cb85df3bdb8c77365"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a4ea04195638dcd8c53dadb545d70badba51735b1594810e9768c2c0b4a5da"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41d698dcbe12b60661f0632b543dbb119e6ba088103b364ff65e951610cb7ce0"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae62032ef513fe6281ef0009e30838a01057b832dc265da32c10469622613885"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f225f3a3995dbbc26affc191d0443c6c4aa71b83358fd4c2b7d63e2f6f0336f9"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5bdd36b362f419c78d09630cbaebc64913f66f62bda6d42d5fbb08da8cc4f181"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2a0147c0bef783fd9abc9f016d66edb6cac466dc54a17ec5f5ada08ff65caf5d"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:c860773a0f205926172c6644c394e02c25421dc9a456deff16f64c0e299487d3"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:138d31e3f90087f42aa6286fb640f3c7a8eb7bdae829418265e7e7474bd2574b"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-win32.whl", hash = "sha256:d20cbb9d3e95114325780f3cfe990f3ecae24de7a2d75f978783878cce2ad585"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-win_amd64.whl", hash = "sha256:ca1103d70306489e3d006b0f79db8ca5dd3c977f6f13b2c59ff745249431a606"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313-win_arm64.whl", hash = "sha256:6291797cad239285275558e0a27872da735b05c75d5237bbade8736f80e4c225"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7b79af799630af263eca9ec87db519426d8c9b3be35016eddad1832bac812d87"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eabf946a4739b5237f4f56d77fa6668263bc466d06a8036c055587c130a46f7b"},
|
||||
{file = "pydantic_core-2.33.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8a1d581e8cdbb857b0e0e81df98603376c1a5c34dc5e54039dcc00f043df81e7"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:7c9c84749f5787781c1c45bb99f433402e484e515b40675a5d121ea14711cf61"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:64672fa888595a959cfeff957a654e947e65bbe1d7d82f550417cbd6898a1d6b"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26bc7367c0961dec292244ef2549afa396e72e28cc24706210bd44d947582c59"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ce72d46eb201ca43994303025bd54d8a35a3fc2a3495fac653d6eb7205ce04f4"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14229c1504287533dbf6b1fc56f752ce2b4e9694022ae7509631ce346158de11"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:085d8985b1c1e48ef271e98a658f562f29d89bda98bf120502283efbc87313eb"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31860fbda80d8f6828e84b4a4d129fd9c4535996b8249cfb8c720dc2a1a00bb8"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f200b2f20856b5a6c3a35f0d4e344019f805e363416e609e9b47c552d35fd5ea"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f72914cfd1d0176e58ddc05c7a47674ef4222c8253bf70322923e73e14a4ac3"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:91301a0980a1d4530d4ba7e6a739ca1a6b31341252cb709948e0aca0860ce0ae"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7419241e17c7fbe5074ba79143d5523270e04f86f1b3a0dff8df490f84c8273a"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-win32.whl", hash = "sha256:7a25493320203005d2a4dac76d1b7d953cb49bce6d459d9ae38e30dd9f29bc9c"},
|
||||
{file = "pydantic_core-2.33.0-cp39-cp39-win_amd64.whl", hash = "sha256:82a4eba92b7ca8af1b7d5ef5f3d9647eee94d1f74d21ca7c21e3a2b92e008358"},
|
||||
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2762c568596332fdab56b07060c8ab8362c56cf2a339ee54e491cd503612c50"},
|
||||
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bf637300ff35d4f59c006fff201c510b2b5e745b07125458a5389af3c0dff8c"},
|
||||
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c151ce3d59ed56ebd7ce9ce5986a409a85db697d25fc232f8e81f195aa39a1"},
|
||||
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee65f0cc652261744fd07f2c6e6901c914aa6c5ff4dcfaf1136bc394d0dd26b"},
|
||||
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:024d136ae44d233e6322027bbf356712b3940bee816e6c948ce4b90f18471b3d"},
|
||||
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e37f10f6d4bc67c58fbd727108ae1d8b92b397355e68519f1e4a7babb1473442"},
|
||||
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:502ed542e0d958bd12e7c3e9a015bce57deaf50eaa8c2e1c439b512cb9db1e3a"},
|
||||
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:715c62af74c236bf386825c0fdfa08d092ab0f191eb5b4580d11c3189af9d330"},
|
||||
{file = "pydantic_core-2.33.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bccc06fa0372151f37f6b69834181aa9eb57cf8665ed36405fb45fbf6cac3bae"},
|
||||
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5d8dc9f63a26f7259b57f46a7aab5af86b2ad6fbe48487500bb1f4b27e051e4c"},
|
||||
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:30369e54d6d0113d2aa5aee7a90d17f225c13d87902ace8fcd7bbf99b19124db"},
|
||||
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3eb479354c62067afa62f53bb387827bee2f75c9c79ef25eef6ab84d4b1ae3b"},
|
||||
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0310524c833d91403c960b8a3cf9f46c282eadd6afd276c8c5edc617bd705dc9"},
|
||||
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eddb18a00bbb855325db27b4c2a89a4ba491cd6a0bd6d852b225172a1f54b36c"},
|
||||
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ade5dbcf8d9ef8f4b28e682d0b29f3008df9842bb5ac48ac2c17bc55771cc976"},
|
||||
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:2c0afd34f928383e3fd25740f2050dbac9d077e7ba5adbaa2227f4d4f3c8da5c"},
|
||||
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7da333f21cd9df51d5731513a6d39319892947604924ddf2e24a4612975fb936"},
|
||||
{file = "pydantic_core-2.33.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4b6d77c75a57f041c5ee915ff0b0bb58eabb78728b69ed967bc5b780e8f701b8"},
|
||||
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba95691cf25f63df53c1d342413b41bd7762d9acb425df8858d7efa616c0870e"},
|
||||
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f1ab031feb8676f6bd7c85abec86e2935850bf19b84432c64e3e239bffeb1ec"},
|
||||
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58c1151827eef98b83d49b6ca6065575876a02d2211f259fb1a6b7757bd24dd8"},
|
||||
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a66d931ea2c1464b738ace44b7334ab32a2fd50be023d863935eb00f42be1778"},
|
||||
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0bcf0bab28995d483f6c8d7db25e0d05c3efa5cebfd7f56474359e7137f39856"},
|
||||
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:89670d7a0045acb52be0566df5bc8b114ac967c662c06cf5e0c606e4aadc964b"},
|
||||
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:b716294e721d8060908dbebe32639b01bfe61b15f9f57bcc18ca9a0e00d9520b"},
|
||||
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fc53e05c16697ff0c1c7c2b98e45e131d4bfb78068fffff92a82d169cbb4c7b7"},
|
||||
{file = "pydantic_core-2.33.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:68504959253303d3ae9406b634997a2123a0b0c1da86459abbd0ffc921695eac"},
|
||||
{file = "pydantic_core-2.33.0.tar.gz", hash = "sha256:40eb8af662ba409c3cbf4a8150ad32ae73514cd7cb1f1a2113af39763dd616b3"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"},
|
||||
{file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"},
|
||||
{file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"},
|
||||
{file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"},
|
||||
{file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"},
|
||||
{file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"},
|
||||
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"},
|
||||
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"},
|
||||
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"},
|
||||
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"},
|
||||
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"},
|
||||
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"},
|
||||
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"},
|
||||
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"},
|
||||
{file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"},
|
||||
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"},
|
||||
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"},
|
||||
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"},
|
||||
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"},
|
||||
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"},
|
||||
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"},
|
||||
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"},
|
||||
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"},
|
||||
{file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"},
|
||||
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"},
|
||||
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"},
|
||||
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"},
|
||||
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"},
|
||||
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"},
|
||||
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"},
|
||||
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"},
|
||||
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"},
|
||||
{file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"},
|
||||
{file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1372,22 +1449,25 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-settings"
|
||||
version = "2.8.1"
|
||||
version = "2.9.1"
|
||||
description = "Settings management using Pydantic"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pydantic_settings-2.8.1-py3-none-any.whl", hash = "sha256:81942d5ac3d905f7f3ee1a70df5dfb62d5569c12f51a5a647defc1c3d9ee2e9c"},
|
||||
{file = "pydantic_settings-2.8.1.tar.gz", hash = "sha256:d5c663dfbe9db9d5e1c646b2e161da12f0d734d422ee56f567d0ea2cee4e8585"},
|
||||
{file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"},
|
||||
{file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pydantic = ">=2.7.0"
|
||||
python-dotenv = ">=0.21.0"
|
||||
typing-inspection = ">=0.4.0"
|
||||
|
||||
[package.extras]
|
||||
aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"]
|
||||
azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"]
|
||||
gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"]
|
||||
toml = ["tomli (>=2.0.1)"]
|
||||
yaml = ["pyyaml (>=6.0.1)"]
|
||||
|
||||
@@ -1409,6 +1489,18 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte
|
||||
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
|
||||
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyrfc3339"
|
||||
version = "2.0.1"
|
||||
description = "Generate and parse RFC 3339 timestamps"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "pyRFC3339-2.0.1-py3-none-any.whl", hash = "sha256:30b70a366acac3df7386b558c21af871522560ed7f3f73cf344b8c2cbb8b0c9d"},
|
||||
{file = "pyrfc3339-2.0.1.tar.gz", hash = "sha256:e47843379ea35c1296c3b6c67a948a1a490ae0584edfcbdea0eaffb5dd29960b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "8.3.3"
|
||||
@@ -1575,30 +1667,42 @@ pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.11.2"
|
||||
version = "0.11.10"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.11.2-py3-none-linux_armv6l.whl", hash = "sha256:c69e20ea49e973f3afec2c06376eb56045709f0212615c1adb0eda35e8a4e477"},
|
||||
{file = "ruff-0.11.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:2c5424cc1c4eb1d8ecabe6d4f1b70470b4f24a0c0171356290b1953ad8f0e272"},
|
||||
{file = "ruff-0.11.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:ecf20854cc73f42171eedb66f006a43d0a21bfb98a2523a809931cda569552d9"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c543bf65d5d27240321604cee0633a70c6c25c9a2f2492efa9f6d4b8e4199bb"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20967168cc21195db5830b9224be0e964cc9c8ecf3b5a9e3ce19876e8d3a96e3"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:955a9ce63483999d9f0b8f0b4a3ad669e53484232853054cc8b9d51ab4c5de74"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:86b3a27c38b8fce73bcd262b0de32e9a6801b76d52cdb3ae4c914515f0cef608"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3b66a03b248c9fcd9d64d445bafdf1589326bee6fc5c8e92d7562e58883e30f"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0397c2672db015be5aa3d4dac54c69aa012429097ff219392c018e21f5085147"},
|
||||
{file = "ruff-0.11.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869bcf3f9abf6457fbe39b5a37333aa4eecc52a3b99c98827ccc371a8e5b6f1b"},
|
||||
{file = "ruff-0.11.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2a2b50ca35457ba785cd8c93ebbe529467594087b527a08d487cf0ee7b3087e9"},
|
||||
{file = "ruff-0.11.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7c69c74bf53ddcfbc22e6eb2f31211df7f65054bfc1f72288fc71e5f82db3eab"},
|
||||
{file = "ruff-0.11.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6e8fb75e14560f7cf53b15bbc55baf5ecbe373dd5f3aab96ff7aa7777edd7630"},
|
||||
{file = "ruff-0.11.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:842a472d7b4d6f5924e9297aa38149e5dcb1e628773b70e6387ae2c97a63c58f"},
|
||||
{file = "ruff-0.11.2-py3-none-win32.whl", hash = "sha256:aca01ccd0eb5eb7156b324cfaa088586f06a86d9e5314b0eb330cb48415097cc"},
|
||||
{file = "ruff-0.11.2-py3-none-win_amd64.whl", hash = "sha256:3170150172a8f994136c0c66f494edf199a0bbea7a409f649e4bc8f4d7084080"},
|
||||
{file = "ruff-0.11.2-py3-none-win_arm64.whl", hash = "sha256:52933095158ff328f4c77af3d74f0379e34fd52f175144cefc1b192e7ccd32b4"},
|
||||
{file = "ruff-0.11.2.tar.gz", hash = "sha256:ec47591497d5a1050175bdf4e1a4e6272cddff7da88a2ad595e1e326041d8d94"},
|
||||
{file = "ruff-0.11.10-py3-none-linux_armv6l.whl", hash = "sha256:859a7bfa7bc8888abbea31ef8a2b411714e6a80f0d173c2a82f9041ed6b50f58"},
|
||||
{file = "ruff-0.11.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:968220a57e09ea5e4fd48ed1c646419961a0570727c7e069842edd018ee8afed"},
|
||||
{file = "ruff-0.11.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:1067245bad978e7aa7b22f67113ecc6eb241dca0d9b696144256c3a879663bca"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4854fd09c7aed5b1590e996a81aeff0c9ff51378b084eb5a0b9cd9518e6cff2"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b4564e9f99168c0f9195a0fd5fa5928004b33b377137f978055e40008a082c5"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b6a9cc5b62c03cc1fea0044ed8576379dbaf751d5503d718c973d5418483641"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:607ecbb6f03e44c9e0a93aedacb17b4eb4f3563d00e8b474298a201622677947"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b3a522fa389402cd2137df9ddefe848f727250535c70dafa840badffb56b7a4"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f071b0deed7e9245d5820dac235cbdd4ef99d7b12ff04c330a241ad3534319f"},
|
||||
{file = "ruff-0.11.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a60e3a0a617eafba1f2e4186d827759d65348fa53708ca547e384db28406a0b"},
|
||||
{file = "ruff-0.11.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:da8ec977eaa4b7bf75470fb575bea2cb41a0e07c7ea9d5a0a97d13dbca697bf2"},
|
||||
{file = "ruff-0.11.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ddf8967e08227d1bd95cc0851ef80d2ad9c7c0c5aab1eba31db49cf0a7b99523"},
|
||||
{file = "ruff-0.11.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5a94acf798a82db188f6f36575d80609072b032105d114b0f98661e1679c9125"},
|
||||
{file = "ruff-0.11.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3afead355f1d16d95630df28d4ba17fb2cb9c8dfac8d21ced14984121f639bad"},
|
||||
{file = "ruff-0.11.10-py3-none-win32.whl", hash = "sha256:dc061a98d32a97211af7e7f3fa1d4ca2fcf919fb96c28f39551f35fc55bdbc19"},
|
||||
{file = "ruff-0.11.10-py3-none-win_amd64.whl", hash = "sha256:5cc725fbb4d25b0f185cb42df07ab6b76c4489b4bfb740a175f3a59c70e8a224"},
|
||||
{file = "ruff-0.11.10-py3-none-win_arm64.whl", hash = "sha256:ef69637b35fb8b210743926778d0e45e1bffa850a7c61e428c6b971549b5f5d1"},
|
||||
{file = "ruff-0.11.10.tar.gz", hash = "sha256:d522fb204b4959909ecac47da02830daec102eeb100fb50ea9554818d47a5fa6"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "semver"
|
||||
version = "3.0.4"
|
||||
description = "Python helper for Semantic Versioning (https://semver.org)"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "semver-3.0.4-py3-none-any.whl", hash = "sha256:9c824d87ba7f7ab4a1890799cec8596f15c1241cb473404ea1cb0c55e4b04746"},
|
||||
{file = "semver-3.0.4.tar.gz", hash = "sha256:afc7d8c584a5ed0a11033af086e8af226a9c0b206f313e0301f8dd7b6b589602"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1625,6 +1729,24 @@ files = [
|
||||
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "starlette"
|
||||
version = "0.46.2"
|
||||
description = "The little ASGI library that shines."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35"},
|
||||
{file = "starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
anyio = ">=3.6.2,<5"
|
||||
|
||||
[package.extras]
|
||||
full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"]
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "0.11.0"
|
||||
@@ -1660,14 +1782,14 @@ test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"]
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.15.0"
|
||||
version = "2.15.1"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "supabase-2.15.0-py3-none-any.whl", hash = "sha256:a665c7ab6c8ad1d80609ab62ad657f66fdaf38070ec9e0db5c7887fd72b109c0"},
|
||||
{file = "supabase-2.15.0.tar.gz", hash = "sha256:2e66289ad74ae9c4cb04a69f9de00cd2ce880cd890de23269a40ac5b69151d26"},
|
||||
{file = "supabase-2.15.1-py3-none-any.whl", hash = "sha256:749299cdd74ecf528f52045c1e60d9dba81cc2054656f754c0ca7fba0dd34827"},
|
||||
{file = "supabase-2.15.1.tar.gz", hash = "sha256:66e847dab9346062aa6a25b4e81ac786b972c5d4299827c57d1d5bd6a0346070"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1752,6 +1874,26 @@ h2 = ["h2 (>=4,<5)"]
|
||||
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||
zstd = ["zstandard (>=0.18.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "uvicorn"
|
||||
version = "0.34.3"
|
||||
description = "The lightning-fast ASGI server."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "uvicorn-0.34.3-py3-none-any.whl", hash = "sha256:16246631db62bdfbf069b0645177d6e8a77ba950cfedbfd093acef9444e4d885"},
|
||||
{file = "uvicorn-0.34.3.tar.gz", hash = "sha256:35919a9a979d7a59334b6b10e05d77c1d0d574c50e0fc98b8b1a0f165708b55a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
click = ">=7.0"
|
||||
h11 = ">=0.8"
|
||||
typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"]
|
||||
|
||||
[[package]]
|
||||
name = "websockets"
|
||||
version = "12.0"
|
||||
@@ -2034,4 +2176,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "c8e23c0609cae0717447f575849b658bee9203b784ec7270b62629cddbbbd9ca"
|
||||
content-hash = "d92143928a88ca3a56ac200c335910eafac938940022fed8bd0d17c95040b54f"
|
||||
|
||||
@@ -7,20 +7,23 @@ readme = "README.md"
|
||||
packages = [{ include = "autogpt_libs" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<4.0"
|
||||
colorama = "^0.4.6"
|
||||
expiringdict = "^1.2.2"
|
||||
google-cloud-logging = "^3.11.4"
|
||||
pydantic = "^2.11.1"
|
||||
pydantic-settings = "^2.8.1"
|
||||
google-cloud-logging = "^3.12.1"
|
||||
pydantic = "^2.11.4"
|
||||
pydantic-settings = "^2.9.1"
|
||||
pyjwt = "^2.10.1"
|
||||
pytest-asyncio = "^0.26.0"
|
||||
pytest-mock = "^3.14.0"
|
||||
python = ">=3.10,<4.0"
|
||||
supabase = "^2.15.0"
|
||||
supabase = "^2.15.1"
|
||||
launchdarkly-server-sdk = "^9.11.1"
|
||||
fastapi = "^0.115.12"
|
||||
uvicorn = "^0.34.3"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
redis = "^5.2.1"
|
||||
ruff = "^0.11.0"
|
||||
ruff = "^0.11.10"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
@@ -13,7 +13,6 @@ PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
|
||||
# EXECUTOR
|
||||
NUM_GRAPH_WORKERS=10
|
||||
NUM_NODE_WORKERS=3
|
||||
|
||||
BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
|
||||
@@ -66,6 +65,13 @@ MEDIA_GCS_BUCKET_NAME=
|
||||
## and tunnel it to your locally running backend.
|
||||
PLATFORM_BASE_URL=http://localhost:3000
|
||||
|
||||
## Cloudflare Turnstile (CAPTCHA) Configuration
|
||||
## Get these from the Cloudflare Turnstile dashboard: https://dash.cloudflare.com/?to=/:account/turnstile
|
||||
## This is the backend secret key
|
||||
TURNSTILE_SECRET_KEY=
|
||||
## This is the verify URL
|
||||
TURNSTILE_VERIFY_URL=https://challenges.cloudflare.com/turnstile/v0/siteverify
|
||||
|
||||
## == INTEGRATION CREDENTIALS == ##
|
||||
# Each set of server side credentials is required for the corresponding 3rd party
|
||||
# integration to work.
|
||||
@@ -120,8 +126,10 @@ TODOIST_CLIENT_SECRET=
|
||||
# LLM
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
AIML_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
LLAMA_API_KEY=
|
||||
|
||||
# Reddit
|
||||
# Go to https://www.reddit.com/prefs/apps and create a new app
|
||||
|
||||
237
autogpt_platform/backend/TESTING.md
Normal file
237
autogpt_platform/backend/TESTING.md
Normal file
@@ -0,0 +1,237 @@
|
||||
# Backend Testing Guide
|
||||
|
||||
This guide covers testing practices for the AutoGPT Platform backend, with a focus on snapshot testing for API endpoints.
|
||||
|
||||
## Table of Contents
|
||||
- [Overview](#overview)
|
||||
- [Running Tests](#running-tests)
|
||||
- [Snapshot Testing](#snapshot-testing)
|
||||
- [Writing Tests for API Routes](#writing-tests-for-api-routes)
|
||||
- [Best Practices](#best-practices)
|
||||
|
||||
## Overview
|
||||
|
||||
The backend uses pytest for testing with the following key libraries:
|
||||
- `pytest` - Test framework
|
||||
- `pytest-asyncio` - Async test support
|
||||
- `pytest-mock` - Mocking support
|
||||
- `pytest-snapshot` - Snapshot testing for API responses
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Run all tests
|
||||
```bash
|
||||
poetry run test
|
||||
```
|
||||
|
||||
### Run specific test file
|
||||
```bash
|
||||
poetry run pytest path/to/test_file.py
|
||||
```
|
||||
|
||||
### Run with verbose output
|
||||
```bash
|
||||
poetry run pytest -v
|
||||
```
|
||||
|
||||
### Run with coverage
|
||||
```bash
|
||||
poetry run pytest --cov=backend
|
||||
```
|
||||
|
||||
## Snapshot Testing
|
||||
|
||||
Snapshot testing captures the output of your code and compares it against previously saved snapshots. This is particularly useful for testing API responses.
|
||||
|
||||
### How Snapshot Testing Works
|
||||
|
||||
1. First run: Creates snapshot files in `snapshots/` directories
|
||||
2. Subsequent runs: Compares output against saved snapshots
|
||||
3. Changes detected: Test fails if output differs from snapshot
|
||||
|
||||
### Creating/Updating Snapshots
|
||||
|
||||
When you first write a test or when the expected output changes:
|
||||
|
||||
```bash
|
||||
poetry run pytest path/to/test.py --snapshot-update
|
||||
```
|
||||
|
||||
⚠️ **Important**: Always review snapshot changes before committing! Use `git diff` to verify the changes are expected.
|
||||
|
||||
### Snapshot Test Example
|
||||
|
||||
```python
|
||||
import json
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
def test_api_endpoint(snapshot: Snapshot):
|
||||
response = client.get("/api/endpoint")
|
||||
|
||||
# Snapshot the response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response.json(), indent=2, sort_keys=True),
|
||||
"endpoint_response"
|
||||
)
|
||||
```
|
||||
|
||||
### Best Practices for Snapshots
|
||||
|
||||
1. **Use descriptive names**: `"user_list_response"` not `"response1"`
|
||||
2. **Sort JSON keys**: Ensures consistent snapshots
|
||||
3. **Format JSON**: Use `indent=2` for readable diffs
|
||||
4. **Exclude dynamic data**: Remove timestamps, IDs, etc. that change between runs
|
||||
|
||||
Example of excluding dynamic data:
|
||||
```python
|
||||
response_data = response.json()
|
||||
# Remove dynamic fields for snapshot
|
||||
response_data.pop("created_at", None)
|
||||
response_data.pop("id", None)
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response_data, indent=2, sort_keys=True),
|
||||
"static_response_data"
|
||||
)
|
||||
```
|
||||
|
||||
## Writing Tests for API Routes
|
||||
|
||||
### Basic Structure
|
||||
|
||||
```python
|
||||
import json
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
from pytest_snapshot.plugin import Snapshot
|
||||
|
||||
from backend.server.v2.myroute import router
|
||||
|
||||
app = fastapi.FastAPI()
|
||||
app.include_router(router)
|
||||
client = fastapi.testclient.TestClient(app)
|
||||
|
||||
def test_endpoint_success(snapshot: Snapshot):
|
||||
response = client.get("/endpoint")
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test specific fields
|
||||
data = response.json()
|
||||
assert data["status"] == "success"
|
||||
|
||||
# Snapshot the full response
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(data, indent=2, sort_keys=True),
|
||||
"endpoint_success_response"
|
||||
)
|
||||
```
|
||||
|
||||
### Testing with Authentication
|
||||
|
||||
```python
|
||||
def override_auth_middleware():
|
||||
return {"sub": "test-user-id"}
|
||||
|
||||
def override_get_user_id():
|
||||
return "test-user-id"
|
||||
|
||||
app.dependency_overrides[auth_middleware] = override_auth_middleware
|
||||
app.dependency_overrides[get_user_id] = override_get_user_id
|
||||
```
|
||||
|
||||
### Mocking External Services
|
||||
|
||||
```python
|
||||
def test_external_api_call(mocker, snapshot):
|
||||
# Mock external service
|
||||
mock_response = {"external": "data"}
|
||||
mocker.patch(
|
||||
"backend.services.external_api.call",
|
||||
return_value=mock_response
|
||||
)
|
||||
|
||||
response = client.post("/api/process")
|
||||
assert response.status_code == 200
|
||||
|
||||
snapshot.snapshot_dir = "snapshots"
|
||||
snapshot.assert_match(
|
||||
json.dumps(response.json(), indent=2, sort_keys=True),
|
||||
"process_with_external_response"
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Test Organization
|
||||
- Place tests next to the code: `routes.py` → `routes_test.py`
|
||||
- Use descriptive test names: `test_create_user_with_invalid_email`
|
||||
- Group related tests in classes when appropriate
|
||||
|
||||
### 2. Test Coverage
|
||||
- Test happy path and error cases
|
||||
- Test edge cases (empty data, invalid formats)
|
||||
- Test authentication and authorization
|
||||
|
||||
### 3. Snapshot Testing Guidelines
|
||||
- Review all snapshot changes carefully
|
||||
- Don't snapshot sensitive data
|
||||
- Keep snapshots focused and minimal
|
||||
- Update snapshots intentionally, not accidentally
|
||||
|
||||
### 4. Async Testing
|
||||
- Use regular `def` for FastAPI TestClient tests
|
||||
- Use `async def` with `@pytest.mark.asyncio` for testing async functions directly
|
||||
|
||||
### 5. Fixtures
|
||||
Create reusable fixtures for common test data:
|
||||
|
||||
```python
|
||||
@pytest.fixture
|
||||
def sample_user():
|
||||
return {
|
||||
"email": "test@example.com",
|
||||
"name": "Test User"
|
||||
}
|
||||
|
||||
def test_create_user(sample_user, snapshot):
|
||||
response = client.post("/users", json=sample_user)
|
||||
# ... test implementation
|
||||
```
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
The GitHub Actions workflow automatically runs tests on:
|
||||
- Pull requests
|
||||
- Pushes to main branch
|
||||
|
||||
Snapshot tests work in CI by:
|
||||
1. Committing snapshot files to the repository
|
||||
2. CI compares against committed snapshots
|
||||
3. Fails if snapshots don't match
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Snapshot Mismatches
|
||||
- Review the diff carefully
|
||||
- If changes are expected: `poetry run pytest --snapshot-update`
|
||||
- If changes are unexpected: Fix the code causing the difference
|
||||
|
||||
### Async Test Issues
|
||||
- Ensure async functions use `@pytest.mark.asyncio`
|
||||
- Use `AsyncMock` for mocking async functions
|
||||
- FastAPI TestClient handles async automatically
|
||||
|
||||
### Import Errors
|
||||
- Check that all dependencies are in `pyproject.toml`
|
||||
- Run `poetry install` to ensure dependencies are installed
|
||||
- Verify import paths are correct
|
||||
|
||||
## Summary
|
||||
|
||||
Snapshot testing provides a powerful way to ensure API responses remain consistent. Combined with traditional assertions, it creates a robust test suite that catches regressions while remaining maintainable.
|
||||
|
||||
Remember: Good tests are as important as good code!
|
||||
@@ -1,3 +1,4 @@
|
||||
import functools
|
||||
import importlib
|
||||
import os
|
||||
import re
|
||||
@@ -10,17 +11,11 @@ if TYPE_CHECKING:
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
_AVAILABLE_BLOCKS: dict[str, type["Block"]] = {}
|
||||
|
||||
|
||||
@functools.cache
|
||||
def load_all_blocks() -> dict[str, type["Block"]]:
|
||||
from backend.data.block import Block
|
||||
|
||||
if _AVAILABLE_BLOCKS:
|
||||
return _AVAILABLE_BLOCKS
|
||||
|
||||
# Dynamically load all modules under backend.blocks
|
||||
AVAILABLE_MODULES = []
|
||||
current_dir = Path(__file__).parent
|
||||
modules = [
|
||||
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
|
||||
@@ -35,9 +30,9 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
||||
)
|
||||
|
||||
importlib.import_module(f".{module}", package=__name__)
|
||||
AVAILABLE_MODULES.append(module)
|
||||
|
||||
# Load all Block instances from the available modules
|
||||
available_blocks: dict[str, type["Block"]] = {}
|
||||
for block_cls in all_subclasses(Block):
|
||||
class_name = block_cls.__name__
|
||||
|
||||
@@ -58,7 +53,7 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
||||
f"Block ID {block.name} error: {block.id} is not a valid UUID"
|
||||
)
|
||||
|
||||
if block.id in _AVAILABLE_BLOCKS:
|
||||
if block.id in available_blocks:
|
||||
raise ValueError(
|
||||
f"Block ID {block.name} error: {block.id} is already in use"
|
||||
)
|
||||
@@ -89,9 +84,9 @@ def load_all_blocks() -> dict[str, type["Block"]]:
|
||||
f"{block.name} has a boolean field with no default value"
|
||||
)
|
||||
|
||||
_AVAILABLE_BLOCKS[block.id] = block_cls
|
||||
available_blocks[block.id] = block_cls
|
||||
|
||||
return _AVAILABLE_BLOCKS
|
||||
return available_blocks
|
||||
|
||||
|
||||
__all__ = ["load_all_blocks"]
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, Optional
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
@@ -11,7 +12,7 @@ from backend.data.block import (
|
||||
get_block,
|
||||
)
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.data.model import SchemaField
|
||||
from backend.data.model import CredentialsMetaInput, SchemaField
|
||||
from backend.util import json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -23,17 +24,21 @@ class AgentExecutorBlock(Block):
|
||||
graph_id: str = SchemaField(description="Graph ID")
|
||||
graph_version: int = SchemaField(description="Graph Version")
|
||||
|
||||
data: BlockInput = SchemaField(description="Input data for the graph")
|
||||
inputs: BlockInput = SchemaField(description="Input data for the graph")
|
||||
input_schema: dict = SchemaField(description="Input schema for the graph")
|
||||
output_schema: dict = SchemaField(description="Output schema for the graph")
|
||||
|
||||
node_credentials_input_map: Optional[
|
||||
dict[str, dict[str, CredentialsMetaInput]]
|
||||
] = SchemaField(default=None, hidden=True)
|
||||
|
||||
@classmethod
|
||||
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
|
||||
return data.get("input_schema", {})
|
||||
|
||||
@classmethod
|
||||
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
|
||||
return data.get("data", {})
|
||||
return data.get("inputs", {})
|
||||
|
||||
@classmethod
|
||||
def get_missing_input(cls, data: BlockInput) -> set[str]:
|
||||
@@ -57,38 +62,80 @@ class AgentExecutorBlock(Block):
|
||||
categories={BlockCategory.AGENT},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
from backend.data.execution import ExecutionEventType
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
|
||||
from backend.executor import utils as execution_utils
|
||||
|
||||
event_bus = execution_utils.get_execution_event_bus()
|
||||
|
||||
graph_exec = execution_utils.add_graph_execution(
|
||||
graph_exec = await execution_utils.add_graph_execution(
|
||||
graph_id=input_data.graph_id,
|
||||
graph_version=input_data.graph_version,
|
||||
user_id=input_data.user_id,
|
||||
data=input_data.data,
|
||||
inputs=input_data.inputs,
|
||||
node_credentials_input_map=input_data.node_credentials_input_map,
|
||||
use_db_query=False,
|
||||
)
|
||||
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
|
||||
|
||||
try:
|
||||
async for name, data in self._run(
|
||||
graph_id=input_data.graph_id,
|
||||
graph_version=input_data.graph_version,
|
||||
graph_exec_id=graph_exec.id,
|
||||
user_id=input_data.user_id,
|
||||
):
|
||||
yield name, data
|
||||
except asyncio.CancelledError:
|
||||
logger.warning(
|
||||
f"Execution of graph {input_data.graph_id} version {input_data.graph_version} was cancelled."
|
||||
)
|
||||
await execution_utils.stop_graph_execution(
|
||||
graph_exec.id, use_db_query=False
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Execution of graph {input_data.graph_id} version {input_data.graph_version} failed: {e}, stopping execution."
|
||||
)
|
||||
await execution_utils.stop_graph_execution(
|
||||
graph_exec.id, use_db_query=False
|
||||
)
|
||||
raise
|
||||
|
||||
async def _run(
|
||||
self,
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
graph_exec_id: str,
|
||||
user_id: str,
|
||||
) -> BlockOutput:
|
||||
|
||||
from backend.data.execution import ExecutionEventType
|
||||
from backend.executor import utils as execution_utils
|
||||
|
||||
event_bus = execution_utils.get_async_execution_event_bus()
|
||||
|
||||
log_id = f"Graph #{graph_id}-V{graph_version}, exec-id: {graph_exec_id}"
|
||||
logger.info(f"Starting execution of {log_id}")
|
||||
|
||||
for event in event_bus.listen(
|
||||
user_id=graph_exec.user_id,
|
||||
graph_id=graph_exec.graph_id,
|
||||
graph_exec_id=graph_exec.graph_exec_id,
|
||||
async for event in event_bus.listen(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
graph_exec_id=graph_exec_id,
|
||||
):
|
||||
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
|
||||
if event.status in [
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
ExecutionStatus.FAILED,
|
||||
]:
|
||||
logger.info(f"Execution {log_id} ended with status {event.status}")
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if event.status not in [
|
||||
ExecutionStatus.COMPLETED,
|
||||
ExecutionStatus.TERMINATED,
|
||||
ExecutionStatus.FAILED,
|
||||
]:
|
||||
logger.debug(
|
||||
f"Execution {log_id} received event {event.event_type} with status {event.status}"
|
||||
)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
if event.event_type == ExecutionEventType.GRAPH_EXEC_UPDATE:
|
||||
# If the graph execution is COMPLETED, TERMINATED, or FAILED,
|
||||
# we can stop listening for further events.
|
||||
break
|
||||
|
||||
logger.debug(
|
||||
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
|
||||
)
|
||||
|
||||
@@ -106,5 +153,7 @@ class AgentExecutorBlock(Block):
|
||||
continue
|
||||
|
||||
for output_data in event.output_data.get("output", []):
|
||||
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
|
||||
logger.debug(
|
||||
f"Execution {log_id} produced {output_name}: {output_data}"
|
||||
)
|
||||
yield output_name, output_data
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from pydantic import SecretStr
|
||||
from replicate.client import Client as ReplicateClient
|
||||
from replicate.helpers import FileOutput
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockSchema
|
||||
@@ -165,15 +165,15 @@ class AIImageGeneratorBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def _run_client(
|
||||
async def _run_client(
|
||||
self, credentials: APIKeyCredentials, model_name: str, input_params: dict
|
||||
):
|
||||
try:
|
||||
# Initialize Replicate client
|
||||
client = replicate.Client(api_token=credentials.api_key.get_secret_value())
|
||||
client = ReplicateClient(api_token=credentials.api_key.get_secret_value())
|
||||
|
||||
# Run the model with input parameters
|
||||
output = client.run(model_name, input=input_params, wait=False)
|
||||
output = await client.async_run(model_name, input=input_params, wait=False)
|
||||
|
||||
# Process output
|
||||
if isinstance(output, list) and len(output) > 0:
|
||||
@@ -195,7 +195,7 @@ class AIImageGeneratorBlock(Block):
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Unexpected error during model execution: {e}")
|
||||
|
||||
def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
|
||||
async def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
|
||||
try:
|
||||
# Handle style-based prompt modification for models without native style support
|
||||
modified_prompt = input_data.prompt
|
||||
@@ -213,7 +213,7 @@ class AIImageGeneratorBlock(Block):
|
||||
"steps": 40,
|
||||
"cfg_scale": 7.0,
|
||||
}
|
||||
output = self._run_client(
|
||||
output = await self._run_client(
|
||||
credentials,
|
||||
"stability-ai/stable-diffusion-3.5-medium",
|
||||
input_params,
|
||||
@@ -231,7 +231,7 @@ class AIImageGeneratorBlock(Block):
|
||||
"output_format": "jpg", # Set to jpg for Flux models
|
||||
"output_quality": 90,
|
||||
}
|
||||
output = self._run_client(
|
||||
output = await self._run_client(
|
||||
credentials, "black-forest-labs/flux-1.1-pro", input_params
|
||||
)
|
||||
return output
|
||||
@@ -246,7 +246,7 @@ class AIImageGeneratorBlock(Block):
|
||||
"output_format": "jpg",
|
||||
"output_quality": 90,
|
||||
}
|
||||
output = self._run_client(
|
||||
output = await self._run_client(
|
||||
credentials, "black-forest-labs/flux-1.1-pro-ultra", input_params
|
||||
)
|
||||
return output
|
||||
@@ -257,7 +257,7 @@ class AIImageGeneratorBlock(Block):
|
||||
"size": SIZE_TO_RECRAFT_DIMENSIONS[input_data.size],
|
||||
"style": input_data.style.value,
|
||||
}
|
||||
output = self._run_client(
|
||||
output = await self._run_client(
|
||||
credentials, "recraft-ai/recraft-v3", input_params
|
||||
)
|
||||
return output
|
||||
@@ -296,9 +296,9 @@ class AIImageGeneratorBlock(Block):
|
||||
style_text = style_map.get(style, "")
|
||||
return f"{style_text} of" if style_text else ""
|
||||
|
||||
def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
|
||||
async def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
|
||||
try:
|
||||
url = self.generate_image(input_data, credentials)
|
||||
url = await self.generate_image(input_data, credentials)
|
||||
if url:
|
||||
yield "image_url", url
|
||||
else:
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from pydantic import SecretStr
|
||||
from replicate.client import Client as ReplicateClient
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
@@ -142,7 +142,7 @@ class AIMusicGeneratorBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
max_retries = 3
|
||||
@@ -154,7 +154,7 @@ class AIMusicGeneratorBlock(Block):
|
||||
logger.debug(
|
||||
f"[AIMusicGeneratorBlock] - Running model (attempt {attempt + 1})"
|
||||
)
|
||||
result = self.run_model(
|
||||
result = await self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
music_gen_model_version=input_data.music_gen_model_version,
|
||||
prompt=input_data.prompt,
|
||||
@@ -176,13 +176,13 @@ class AIMusicGeneratorBlock(Block):
|
||||
last_error = f"Unexpected error: {str(e)}"
|
||||
logger.error(f"[AIMusicGeneratorBlock] - Error: {last_error}")
|
||||
if attempt < max_retries - 1:
|
||||
time.sleep(retry_delay)
|
||||
await asyncio.sleep(retry_delay)
|
||||
continue
|
||||
|
||||
# If we've exhausted all retries, yield the error
|
||||
yield "error", f"Failed after {max_retries} attempts. Last error: {last_error}"
|
||||
|
||||
def run_model(
|
||||
async def run_model(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
music_gen_model_version: MusicGenModelVersion,
|
||||
@@ -196,10 +196,10 @@ class AIMusicGeneratorBlock(Block):
|
||||
normalization_strategy: NormalizationStrategy,
|
||||
):
|
||||
# Initialize Replicate client with the API key
|
||||
client = replicate.Client(api_token=api_key.get_secret_value())
|
||||
client = ReplicateClient(api_token=api_key.get_secret_value())
|
||||
|
||||
# Run the model with parameters
|
||||
output = client.run(
|
||||
output = await client.async_run(
|
||||
"meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb",
|
||||
input={
|
||||
"prompt": prompt,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
@@ -13,7 +14,7 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -216,29 +217,29 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def create_webhook(self):
|
||||
async def create_webhook(self):
|
||||
url = "https://webhook.site/token"
|
||||
headers = {"Accept": "application/json", "Content-Type": "application/json"}
|
||||
response = requests.post(url, headers=headers)
|
||||
response = await Requests().post(url, headers=headers)
|
||||
webhook_data = response.json()
|
||||
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
|
||||
|
||||
def create_video(self, api_key: SecretStr, payload: dict) -> dict:
|
||||
async def create_video(self, api_key: SecretStr, payload: dict) -> dict:
|
||||
url = "https://www.revid.ai/api/public/v2/render"
|
||||
headers = {"key": api_key.get_secret_value()}
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
response = await Requests().post(url, json=payload, headers=headers)
|
||||
logger.debug(
|
||||
f"API Response Status Code: {response.status_code}, Content: {response.text}"
|
||||
f"API Response Status Code: {response.status}, Content: {response.text}"
|
||||
)
|
||||
return response.json()
|
||||
|
||||
def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
|
||||
async def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
|
||||
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
|
||||
headers = {"key": api_key.get_secret_value()}
|
||||
response = requests.get(url, headers=headers)
|
||||
response = await Requests().get(url, headers=headers)
|
||||
return response.json()
|
||||
|
||||
def wait_for_video(
|
||||
async def wait_for_video(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
pid: str,
|
||||
@@ -247,7 +248,7 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
) -> str:
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < max_wait_time:
|
||||
status = self.check_video_status(api_key, pid)
|
||||
status = await self.check_video_status(api_key, pid)
|
||||
logger.debug(f"Video status: {status}")
|
||||
|
||||
if status.get("status") == "ready" and "videoUrl" in status:
|
||||
@@ -260,16 +261,16 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
logger.error(f"Video creation failed: {status.get('message')}")
|
||||
raise ValueError(f"Video creation failed: {status.get('message')}")
|
||||
|
||||
time.sleep(10)
|
||||
await asyncio.sleep(10)
|
||||
|
||||
logger.error("Video creation timed out")
|
||||
raise TimeoutError("Video creation timed out")
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Create a new Webhook.site URL
|
||||
webhook_token, webhook_url = self.create_webhook()
|
||||
webhook_token, webhook_url = await self.create_webhook()
|
||||
logger.debug(f"Webhook URL: {webhook_url}")
|
||||
|
||||
audio_url = input_data.background_music.audio_url
|
||||
@@ -306,7 +307,7 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
}
|
||||
|
||||
logger.debug("Creating video...")
|
||||
response = self.create_video(credentials.api_key, payload)
|
||||
response = await self.create_video(credentials.api_key, payload)
|
||||
pid = response.get("pid")
|
||||
|
||||
if not pid:
|
||||
@@ -318,6 +319,8 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
logger.debug(
|
||||
f"Video created with project ID: {pid}. Waiting for completion..."
|
||||
)
|
||||
video_url = self.wait_for_video(credentials.api_key, pid, webhook_token)
|
||||
video_url = await self.wait_for_video(
|
||||
credentials.api_key, pid, webhook_token
|
||||
)
|
||||
logger.debug(f"Video ready: {video_url}")
|
||||
yield "video_url", video_url
|
||||
|
||||
@@ -27,14 +27,15 @@ class ApolloClient:
|
||||
def _get_headers(self) -> dict[str, str]:
|
||||
return {"x-api-key": self.credentials.api_key.get_secret_value()}
|
||||
|
||||
def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
|
||||
async def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
|
||||
"""Search for people in Apollo"""
|
||||
response = self.requests.get(
|
||||
response = await self.requests.get(
|
||||
f"{self.API_URL}/mixed_people/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchPeopleResponse(**response.json())
|
||||
data = response.json()
|
||||
parsed_response = SearchPeopleResponse(**data)
|
||||
if parsed_response.pagination.total_entries == 0:
|
||||
return []
|
||||
|
||||
@@ -52,27 +53,29 @@ class ApolloClient:
|
||||
and len(parsed_response.people) > 0
|
||||
):
|
||||
query.page += 1
|
||||
response = self.requests.get(
|
||||
response = await self.requests.get(
|
||||
f"{self.API_URL}/mixed_people/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchPeopleResponse(**response.json())
|
||||
data = response.json()
|
||||
parsed_response = SearchPeopleResponse(**data)
|
||||
people.extend(parsed_response.people[: query.max_results - len(people)])
|
||||
|
||||
logger.info(f"Found {len(people)} people")
|
||||
return people[: query.max_results] if query.max_results else people
|
||||
|
||||
def search_organizations(
|
||||
async def search_organizations(
|
||||
self, query: SearchOrganizationsRequest
|
||||
) -> List[Organization]:
|
||||
"""Search for organizations in Apollo"""
|
||||
response = self.requests.get(
|
||||
response = await self.requests.get(
|
||||
f"{self.API_URL}/mixed_companies/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchOrganizationsResponse(**response.json())
|
||||
data = response.json()
|
||||
parsed_response = SearchOrganizationsResponse(**data)
|
||||
if parsed_response.pagination.total_entries == 0:
|
||||
return []
|
||||
|
||||
@@ -90,12 +93,13 @@ class ApolloClient:
|
||||
and len(parsed_response.organizations) > 0
|
||||
):
|
||||
query.page += 1
|
||||
response = self.requests.get(
|
||||
response = await self.requests.get(
|
||||
f"{self.API_URL}/mixed_companies/search",
|
||||
headers=self._get_headers(),
|
||||
params=query.model_dump(exclude={"credentials", "max_results"}),
|
||||
)
|
||||
parsed_response = SearchOrganizationsResponse(**response.json())
|
||||
data = response.json()
|
||||
parsed_response = SearchOrganizationsResponse(**data)
|
||||
organizations.extend(
|
||||
parsed_response.organizations[
|
||||
: query.max_results - len(organizations)
|
||||
|
||||
@@ -201,19 +201,19 @@ To find IDs, identify the values for organization_id when you call this endpoint
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_organizations(
|
||||
async def search_organizations(
|
||||
query: SearchOrganizationsRequest, credentials: ApolloCredentials
|
||||
) -> list[Organization]:
|
||||
client = ApolloClient(credentials)
|
||||
return client.search_organizations(query)
|
||||
return await client.search_organizations(query)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: ApolloCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
query = SearchOrganizationsRequest(
|
||||
**input_data.model_dump(exclude={"credentials"})
|
||||
)
|
||||
organizations = self.search_organizations(query, credentials)
|
||||
organizations = await self.search_organizations(query, credentials)
|
||||
for organization in organizations:
|
||||
yield "organization", organization
|
||||
yield "organizations", organizations
|
||||
|
||||
@@ -373,13 +373,13 @@ class SearchPeopleBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_people(
|
||||
async def search_people(
|
||||
query: SearchPeopleRequest, credentials: ApolloCredentials
|
||||
) -> list[Contact]:
|
||||
client = ApolloClient(credentials)
|
||||
return client.search_people(query)
|
||||
return await client.search_people(query)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -388,7 +388,7 @@ class SearchPeopleBlock(Block):
|
||||
) -> BlockOutput:
|
||||
|
||||
query = SearchPeopleRequest(**input_data.model_dump(exclude={"credentials"}))
|
||||
people = self.search_people(query, credentials)
|
||||
people = await self.search_people(query, credentials)
|
||||
for person in people:
|
||||
yield "person", person
|
||||
yield "people", people
|
||||
|
||||
@@ -30,14 +30,14 @@ class FileStoreBlock(Block):
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
graph_exec_id: str,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
file_path = store_media_file(
|
||||
file_path = await store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.file_in,
|
||||
return_content=False,
|
||||
@@ -84,10 +84,37 @@ class StoreValueBlock(Block):
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "output", input_data.data or input_data.input
|
||||
|
||||
|
||||
class PrintToConsoleBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: Any = SchemaField(description="The data to print to the console.")
|
||||
|
||||
class Output(BlockSchema):
|
||||
output: Any = SchemaField(description="The data printed to the console.")
|
||||
status: str = SchemaField(description="The status of the print operation.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f3b1c1b2-4c4f-4f0d-8d2f-4c4f0d8d2f4c",
|
||||
description="Print the given text to the console, this is used for a debugging purpose.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=PrintToConsoleBlock.Input,
|
||||
output_schema=PrintToConsoleBlock.Output,
|
||||
test_input={"text": "Hello, World!"},
|
||||
test_output=[
|
||||
("output", "Hello, World!"),
|
||||
("status", "printed"),
|
||||
],
|
||||
)
|
||||
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "output", input_data.text
|
||||
yield "status", "printed"
|
||||
|
||||
|
||||
class FindInDictionaryBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
input: Any = SchemaField(description="Dictionary to lookup from")
|
||||
@@ -124,7 +151,7 @@ class FindInDictionaryBlock(Block):
|
||||
categories={BlockCategory.BASIC},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
obj = input_data.input
|
||||
key = input_data.key
|
||||
|
||||
@@ -214,7 +241,7 @@ class AddToDictionaryBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
updated_dict = input_data.dictionary.copy()
|
||||
|
||||
if input_data.value is not None and input_data.key:
|
||||
@@ -292,7 +319,7 @@ class AddToListBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
entries_added = input_data.entries.copy()
|
||||
if input_data.entry:
|
||||
entries_added.append(input_data.entry)
|
||||
@@ -339,7 +366,7 @@ class FindInListBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
yield "index", input_data.list.index(input_data.value)
|
||||
yield "found", True
|
||||
@@ -369,7 +396,7 @@ class NoteBlock(Block):
|
||||
block_type=BlockType.NOTE,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "output", input_data.text
|
||||
|
||||
|
||||
@@ -415,7 +442,7 @@ class CreateDictionaryBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
# The values are already validated by Pydantic schema
|
||||
yield "dictionary", input_data.values
|
||||
@@ -463,7 +490,7 @@ class CreateListBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
# The values are already validated by Pydantic schema
|
||||
yield "list", input_data.values
|
||||
@@ -498,7 +525,7 @@ class UniversalTypeConverterBlock(Block):
|
||||
output_schema=UniversalTypeConverterBlock.Output,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
converted_value = convert(
|
||||
input_data.value,
|
||||
|
||||
@@ -38,7 +38,7 @@ class BlockInstallationBlock(Block):
|
||||
disabled=True,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
code = input_data.code
|
||||
|
||||
if search := re.search(r"class (\w+)\(Block\):", code):
|
||||
@@ -64,7 +64,7 @@ class BlockInstallationBlock(Block):
|
||||
|
||||
from backend.util.test import execute_block_test
|
||||
|
||||
execute_block_test(block)
|
||||
await execute_block_test(block)
|
||||
yield "success", "Block installed successfully."
|
||||
except Exception as e:
|
||||
os.remove(file_path)
|
||||
|
||||
@@ -70,7 +70,7 @@ class ConditionBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
operator = input_data.operator
|
||||
|
||||
value1 = input_data.value1
|
||||
@@ -180,7 +180,7 @@ class IfInputMatchesBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
if input_data.input == input_data.value or input_data.input is input_data.value:
|
||||
yield "result", True
|
||||
yield "yes_output", input_data.yes_value
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
from e2b_code_interpreter import Sandbox
|
||||
from e2b_code_interpreter import AsyncSandbox
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
@@ -123,7 +123,7 @@ class CodeExecutionBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def execute_code(
|
||||
async def execute_code(
|
||||
self,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
@@ -135,21 +135,21 @@ class CodeExecutionBlock(Block):
|
||||
try:
|
||||
sandbox = None
|
||||
if template_id:
|
||||
sandbox = Sandbox(
|
||||
sandbox = await AsyncSandbox.create(
|
||||
template=template_id, api_key=api_key, timeout=timeout
|
||||
)
|
||||
else:
|
||||
sandbox = Sandbox(api_key=api_key, timeout=timeout)
|
||||
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
|
||||
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not created")
|
||||
|
||||
# Running setup commands
|
||||
for cmd in setup_commands:
|
||||
sandbox.commands.run(cmd)
|
||||
await sandbox.commands.run(cmd)
|
||||
|
||||
# Executing the code
|
||||
execution = sandbox.run_code(
|
||||
execution = await sandbox.run_code(
|
||||
code,
|
||||
language=language.value,
|
||||
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
|
||||
@@ -167,11 +167,11 @@ class CodeExecutionBlock(Block):
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
response, stdout_logs, stderr_logs = self.execute_code(
|
||||
response, stdout_logs, stderr_logs = await self.execute_code(
|
||||
input_data.code,
|
||||
input_data.language,
|
||||
input_data.setup_commands,
|
||||
@@ -278,11 +278,11 @@ class InstantiationBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
sandbox_id, response, stdout_logs, stderr_logs = self.execute_code(
|
||||
sandbox_id, response, stdout_logs, stderr_logs = await self.execute_code(
|
||||
input_data.setup_code,
|
||||
input_data.language,
|
||||
input_data.setup_commands,
|
||||
@@ -303,7 +303,7 @@ class InstantiationBlock(Block):
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
def execute_code(
|
||||
async def execute_code(
|
||||
self,
|
||||
code: str,
|
||||
language: ProgrammingLanguage,
|
||||
@@ -315,21 +315,21 @@ class InstantiationBlock(Block):
|
||||
try:
|
||||
sandbox = None
|
||||
if template_id:
|
||||
sandbox = Sandbox(
|
||||
sandbox = await AsyncSandbox.create(
|
||||
template=template_id, api_key=api_key, timeout=timeout
|
||||
)
|
||||
else:
|
||||
sandbox = Sandbox(api_key=api_key, timeout=timeout)
|
||||
sandbox = await AsyncSandbox.create(api_key=api_key, timeout=timeout)
|
||||
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not created")
|
||||
|
||||
# Running setup commands
|
||||
for cmd in setup_commands:
|
||||
sandbox.commands.run(cmd)
|
||||
await sandbox.commands.run(cmd)
|
||||
|
||||
# Executing the code
|
||||
execution = sandbox.run_code(
|
||||
execution = await sandbox.run_code(
|
||||
code,
|
||||
language=language.value,
|
||||
on_error=lambda e: sandbox.kill(), # Kill the sandbox if there is an error
|
||||
@@ -409,7 +409,7 @@ class StepExecutionBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def execute_step_code(
|
||||
async def execute_step_code(
|
||||
self,
|
||||
sandbox_id: str,
|
||||
code: str,
|
||||
@@ -417,12 +417,12 @@ class StepExecutionBlock(Block):
|
||||
api_key: str,
|
||||
):
|
||||
try:
|
||||
sandbox = Sandbox.connect(sandbox_id=sandbox_id, api_key=api_key)
|
||||
sandbox = await AsyncSandbox.connect(sandbox_id=sandbox_id, api_key=api_key)
|
||||
if not sandbox:
|
||||
raise Exception("Sandbox not found")
|
||||
|
||||
# Executing the code
|
||||
execution = sandbox.run_code(code, language=language.value)
|
||||
execution = await sandbox.run_code(code, language=language.value)
|
||||
|
||||
if execution.error:
|
||||
raise Exception(execution.error)
|
||||
@@ -436,11 +436,11 @@ class StepExecutionBlock(Block):
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
response, stdout_logs, stderr_logs = self.execute_step_code(
|
||||
response, stdout_logs, stderr_logs = await self.execute_step_code(
|
||||
input_data.sandbox_id,
|
||||
input_data.step_code,
|
||||
input_data.language,
|
||||
|
||||
@@ -49,7 +49,7 @@ class CodeExtractionBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# List of supported programming languages with mapped aliases
|
||||
language_aliases = {
|
||||
"html": ["html", "htm"],
|
||||
|
||||
@@ -56,5 +56,5 @@ class CompassAITriggerBlock(Block):
|
||||
# ],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "transcription", input_data.payload.transcription
|
||||
|
||||
@@ -30,7 +30,7 @@ class WordCharacterCountBlock(Block):
|
||||
test_output=[("word_count", 4), ("character_count", 19)],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
text = input_data.text
|
||||
word_count = len(text.split())
|
||||
|
||||
@@ -69,7 +69,7 @@ class ReadCsvBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
import csv
|
||||
from io import StringIO
|
||||
|
||||
|
||||
@@ -34,6 +34,6 @@ This is a "quoted" string.""",
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
decoded_text = codecs.decode(input_data.text, "unicode_escape")
|
||||
yield "decoded_text", decoded_text
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import asyncio
|
||||
from typing import Literal
|
||||
|
||||
import aiohttp
|
||||
@@ -74,7 +73,11 @@ class ReadDiscordMessagesBlock(Block):
|
||||
("username", "test_user"),
|
||||
],
|
||||
test_mock={
|
||||
"run_bot": lambda token: asyncio.Future() # Create a Future object for mocking
|
||||
"run_bot": lambda token: {
|
||||
"output_data": "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.",
|
||||
"channel_name": "general",
|
||||
"username": "test_user",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@@ -106,37 +109,24 @@ class ReadDiscordMessagesBlock(Block):
|
||||
if attachment.filename.endswith((".txt", ".py")):
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(attachment.url) as response:
|
||||
file_content = await response.text()
|
||||
file_content = response.text()
|
||||
self.output_data += f"\n\nFile from user: {attachment.filename}\nContent: {file_content}"
|
||||
|
||||
await client.close()
|
||||
|
||||
await client.start(token.get_secret_value())
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
while True:
|
||||
for output_name, output_value in self.__run(input_data, credentials):
|
||||
yield output_name, output_value
|
||||
break
|
||||
async for output_name, output_value in self.__run(input_data, credentials):
|
||||
yield output_name, output_value
|
||||
|
||||
def __run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput:
|
||||
async def __run(
|
||||
self, input_data: Input, credentials: APIKeyCredentials
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
future = self.run_bot(credentials.api_key)
|
||||
|
||||
# If it's a Future (mock), set the result
|
||||
if isinstance(future, asyncio.Future):
|
||||
future.set_result(
|
||||
{
|
||||
"output_data": "Hello!\n\nFile from user: example.txt\nContent: This is the content of the file.",
|
||||
"channel_name": "general",
|
||||
"username": "test_user",
|
||||
}
|
||||
)
|
||||
|
||||
result = loop.run_until_complete(future)
|
||||
result = await self.run_bot(credentials.api_key)
|
||||
|
||||
# For testing purposes, use the mocked result
|
||||
if isinstance(result, dict):
|
||||
@@ -190,7 +180,7 @@ class SendDiscordMessageBlock(Block):
|
||||
},
|
||||
test_output=[("status", "Message sent")],
|
||||
test_mock={
|
||||
"send_message": lambda token, channel_name, message_content: asyncio.Future()
|
||||
"send_message": lambda token, channel_name, message_content: "Message sent"
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
@@ -222,23 +212,16 @@ class SendDiscordMessageBlock(Block):
|
||||
"""Splits a message into chunks not exceeding the Discord limit."""
|
||||
return [message[i : i + limit] for i in range(0, len(message), limit)]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
future = self.send_message(
|
||||
result = await self.send_message(
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.channel_name,
|
||||
input_data.message_content,
|
||||
)
|
||||
|
||||
# If it's a Future (mock), set the result
|
||||
if isinstance(future, asyncio.Future):
|
||||
future.set_result("Message sent")
|
||||
|
||||
result = loop.run_until_complete(future)
|
||||
|
||||
# For testing purposes, use the mocked result
|
||||
if isinstance(result, str):
|
||||
self.output_data = result
|
||||
|
||||
@@ -121,7 +121,7 @@ class SendEmailBlock(Block):
|
||||
|
||||
return "Email sent successfully"
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: SMTPCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
yield "status", self.send_email(
|
||||
|
||||
@@ -9,7 +9,7 @@ from backend.blocks.exa._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class ContentRetrievalSettings(BaseModel):
|
||||
@@ -62,7 +62,7 @@ class ExaContentsBlock(Block):
|
||||
output_schema=ExaContentsBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://api.exa.ai/contents"
|
||||
@@ -79,10 +79,8 @@ class ExaContentsBlock(Block):
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
response = await Requests().post(url, headers=headers, json=payload)
|
||||
data = response.json()
|
||||
yield "results", data.get("results", [])
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
yield "results", []
|
||||
|
||||
@@ -9,7 +9,7 @@ from backend.blocks.exa._auth import (
|
||||
from backend.blocks.exa.helpers import ContentSettings
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class ExaSearchBlock(Block):
|
||||
@@ -78,6 +78,9 @@ class ExaSearchBlock(Block):
|
||||
description="List of search results",
|
||||
default_factory=list,
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the request failed",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -88,7 +91,7 @@ class ExaSearchBlock(Block):
|
||||
output_schema=ExaSearchBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://api.exa.ai/search"
|
||||
@@ -133,11 +136,9 @@ class ExaSearchBlock(Block):
|
||||
payload[api_field] = value
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
response = await Requests().post(url, headers=headers, json=payload)
|
||||
data = response.json()
|
||||
# Extract just the results array from the response
|
||||
yield "results", data.get("results", [])
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
yield "results", []
|
||||
|
||||
@@ -8,7 +8,7 @@ from backend.blocks.exa._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
from .helpers import ContentSettings
|
||||
|
||||
@@ -67,6 +67,7 @@ class ExaFindSimilarBlock(Block):
|
||||
description="List of similar documents with title, URL, published date, author, and score",
|
||||
default_factory=list,
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -77,7 +78,7 @@ class ExaFindSimilarBlock(Block):
|
||||
output_schema=ExaFindSimilarBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://api.exa.ai/findSimilar"
|
||||
@@ -119,10 +120,8 @@ class ExaFindSimilarBlock(Block):
|
||||
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
response = await Requests().post(url, headers=headers, json=payload)
|
||||
data = response.json()
|
||||
yield "results", data.get("results", [])
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
yield "results", []
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
from backend.blocks.fal._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -14,6 +12,7 @@ from backend.blocks.fal._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import ClientResponseError, Requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -21,6 +20,7 @@ logger = logging.getLogger(__name__)
|
||||
class FalModel(str, Enum):
|
||||
MOCHI = "fal-ai/mochi-v1"
|
||||
LUMA = "fal-ai/luma-dream-machine"
|
||||
VEO3 = "fal-ai/veo3"
|
||||
|
||||
|
||||
class AIVideoGeneratorBlock(Block):
|
||||
@@ -65,35 +65,37 @@ class AIVideoGeneratorBlock(Block):
|
||||
)
|
||||
|
||||
def _get_headers(self, api_key: str) -> dict[str, str]:
|
||||
"""Get headers for FAL API requests."""
|
||||
"""Get headers for FAL API Requests."""
|
||||
return {
|
||||
"Authorization": f"Key {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
def _submit_request(
|
||||
async def _submit_request(
|
||||
self, url: str, headers: dict[str, str], data: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Submit a request to the FAL API."""
|
||||
try:
|
||||
response = httpx.post(url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
response = await Requests().post(url, headers=headers, json=data)
|
||||
return response.json()
|
||||
except httpx.HTTPError as e:
|
||||
except ClientResponseError as e:
|
||||
logger.error(f"FAL API request failed: {str(e)}")
|
||||
raise RuntimeError(f"Failed to submit request: {str(e)}")
|
||||
|
||||
def _poll_status(self, status_url: str, headers: dict[str, str]) -> dict[str, Any]:
|
||||
async def _poll_status(
|
||||
self, status_url: str, headers: dict[str, str]
|
||||
) -> dict[str, Any]:
|
||||
"""Poll the status endpoint until completion or failure."""
|
||||
try:
|
||||
response = httpx.get(status_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
response = await Requests().get(status_url, headers=headers)
|
||||
return response.json()
|
||||
except httpx.HTTPError as e:
|
||||
except ClientResponseError as e:
|
||||
logger.error(f"Failed to get status: {str(e)}")
|
||||
raise RuntimeError(f"Failed to get status: {str(e)}")
|
||||
|
||||
def generate_video(self, input_data: Input, credentials: FalCredentials) -> str:
|
||||
async def generate_video(
|
||||
self, input_data: Input, credentials: FalCredentials
|
||||
) -> str:
|
||||
"""Generate video using the specified FAL model."""
|
||||
base_url = "https://queue.fal.run"
|
||||
api_key = credentials.api_key.get_secret_value()
|
||||
@@ -102,13 +104,16 @@ class AIVideoGeneratorBlock(Block):
|
||||
# Submit generation request
|
||||
submit_url = f"{base_url}/{input_data.model.value}"
|
||||
submit_data = {"prompt": input_data.prompt}
|
||||
if input_data.model == FalModel.VEO3:
|
||||
submit_data["generate_audio"] = True # type: ignore
|
||||
|
||||
seen_logs = set()
|
||||
|
||||
try:
|
||||
# Submit request to queue
|
||||
submit_response = httpx.post(submit_url, headers=headers, json=submit_data)
|
||||
submit_response.raise_for_status()
|
||||
submit_response = await Requests().post(
|
||||
submit_url, headers=headers, json=submit_data
|
||||
)
|
||||
request_data = submit_response.json()
|
||||
|
||||
# Get request_id and urls from initial response
|
||||
@@ -119,14 +124,23 @@ class AIVideoGeneratorBlock(Block):
|
||||
if not all([request_id, status_url, result_url]):
|
||||
raise ValueError("Missing required data in submission response")
|
||||
|
||||
# Ensure status_url is a string
|
||||
if not isinstance(status_url, str):
|
||||
raise ValueError("Invalid status URL format")
|
||||
|
||||
# Ensure result_url is a string
|
||||
if not isinstance(result_url, str):
|
||||
raise ValueError("Invalid result URL format")
|
||||
|
||||
# Poll for status with exponential backoff
|
||||
max_attempts = 30
|
||||
attempt = 0
|
||||
base_wait_time = 5
|
||||
|
||||
while attempt < max_attempts:
|
||||
status_response = httpx.get(f"{status_url}?logs=1", headers=headers)
|
||||
status_response.raise_for_status()
|
||||
status_response = await Requests().get(
|
||||
f"{status_url}?logs=1", headers=headers
|
||||
)
|
||||
status_data = status_response.json()
|
||||
|
||||
# Process new logs only
|
||||
@@ -149,8 +163,7 @@ class AIVideoGeneratorBlock(Block):
|
||||
status = status_data.get("status")
|
||||
if status == "COMPLETED":
|
||||
# Get the final result
|
||||
result_response = httpx.get(result_url, headers=headers)
|
||||
result_response.raise_for_status()
|
||||
result_response = await Requests().get(result_url, headers=headers)
|
||||
result_data = result_response.json()
|
||||
|
||||
if "video" not in result_data or not isinstance(
|
||||
@@ -159,8 +172,8 @@ class AIVideoGeneratorBlock(Block):
|
||||
raise ValueError("Invalid response format - missing video data")
|
||||
|
||||
video_url = result_data["video"].get("url")
|
||||
if not video_url:
|
||||
raise ValueError("No video URL in response")
|
||||
if not video_url or not isinstance(video_url, str):
|
||||
raise ValueError("No valid video URL in response")
|
||||
|
||||
return video_url
|
||||
|
||||
@@ -180,19 +193,19 @@ class AIVideoGeneratorBlock(Block):
|
||||
logger.info(f"[FAL Generation] Status: Unknown status: {status}")
|
||||
|
||||
wait_time = min(base_wait_time * (2**attempt), 60) # Cap at 60 seconds
|
||||
time.sleep(wait_time)
|
||||
await asyncio.sleep(wait_time)
|
||||
attempt += 1
|
||||
|
||||
raise RuntimeError("Maximum polling attempts reached")
|
||||
|
||||
except httpx.HTTPError as e:
|
||||
except ClientResponseError as e:
|
||||
raise RuntimeError(f"API request failed: {str(e)}")
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: FalCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
video_url = self.generate_video(input_data, credentials)
|
||||
video_url = await self.generate_video(input_data, credentials)
|
||||
yield "video_url", video_url
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
|
||||
174
autogpt_platform/backend/backend/blocks/flux_kontext.py
Normal file
174
autogpt_platform/backend/backend/blocks/flux_kontext.py
Normal file
@@ -0,0 +1,174 @@
|
||||
from enum import Enum
|
||||
from typing import Literal, Optional
|
||||
|
||||
from pydantic import SecretStr
|
||||
from replicate.client import Client as ReplicateClient
|
||||
from replicate.helpers import FileOutput
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.file import MediaFileType
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="replicate",
|
||||
api_key=SecretStr("mock-replicate-api-key"),
|
||||
title="Mock Replicate API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class FluxKontextModelName(str, Enum):
|
||||
PRO = "Flux Kontext Pro"
|
||||
MAX = "Flux Kontext Max"
|
||||
|
||||
@property
|
||||
def api_name(self) -> str:
|
||||
return f"black-forest-labs/flux-kontext-{self.name.lower()}"
|
||||
|
||||
|
||||
class AspectRatio(str, Enum):
|
||||
MATCH_INPUT_IMAGE = "match_input_image"
|
||||
ASPECT_1_1 = "1:1"
|
||||
ASPECT_16_9 = "16:9"
|
||||
ASPECT_9_16 = "9:16"
|
||||
ASPECT_4_3 = "4:3"
|
||||
ASPECT_3_4 = "3:4"
|
||||
ASPECT_3_2 = "3:2"
|
||||
ASPECT_2_3 = "2:3"
|
||||
ASPECT_4_5 = "4:5"
|
||||
ASPECT_5_4 = "5:4"
|
||||
ASPECT_21_9 = "21:9"
|
||||
ASPECT_9_21 = "9:21"
|
||||
ASPECT_2_1 = "2:1"
|
||||
ASPECT_1_2 = "1:2"
|
||||
|
||||
|
||||
class AIImageEditorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal[ProviderName.REPLICATE], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
description="Replicate API key with permissions for Flux Kontext models",
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="Text instruction describing the desired edit",
|
||||
title="Prompt",
|
||||
)
|
||||
input_image: Optional[MediaFileType] = SchemaField(
|
||||
description="Reference image URI (jpeg, png, gif, webp)",
|
||||
default=None,
|
||||
title="Input Image",
|
||||
)
|
||||
aspect_ratio: AspectRatio = SchemaField(
|
||||
description="Aspect ratio of the generated image",
|
||||
default=AspectRatio.MATCH_INPUT_IMAGE,
|
||||
title="Aspect Ratio",
|
||||
advanced=False,
|
||||
)
|
||||
seed: Optional[int] = SchemaField(
|
||||
description="Random seed. Set for reproducible generation",
|
||||
default=None,
|
||||
title="Seed",
|
||||
advanced=True,
|
||||
)
|
||||
model: FluxKontextModelName = SchemaField(
|
||||
description="Model variant to use",
|
||||
default=FluxKontextModelName.PRO,
|
||||
title="Model",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
output_image: MediaFileType = SchemaField(
|
||||
description="URL of the transformed image"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if generation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3fd9c73d-4370-4925-a1ff-1b86b99fabfa",
|
||||
description=(
|
||||
"Edit images using BlackForest Labs' Flux Kontext models. Provide a prompt "
|
||||
"and optional reference image to generate a modified image."
|
||||
),
|
||||
categories={BlockCategory.AI, BlockCategory.MULTIMEDIA},
|
||||
input_schema=AIImageEditorBlock.Input,
|
||||
output_schema=AIImageEditorBlock.Output,
|
||||
test_input={
|
||||
"prompt": "Add a hat to the cat",
|
||||
"input_image": "https://example.com/cat.png",
|
||||
"aspect_ratio": AspectRatio.MATCH_INPUT_IMAGE,
|
||||
"seed": None,
|
||||
"model": FluxKontextModelName.PRO,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
("output_image", "https://replicate.com/output/edited-image.png"),
|
||||
],
|
||||
test_mock={
|
||||
"run_model": lambda *args, **kwargs: "https://replicate.com/output/edited-image.png",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
result = await self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
model_name=input_data.model.api_name,
|
||||
prompt=input_data.prompt,
|
||||
input_image=input_data.input_image,
|
||||
aspect_ratio=input_data.aspect_ratio.value,
|
||||
seed=input_data.seed,
|
||||
)
|
||||
yield "output_image", result
|
||||
|
||||
async def run_model(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
model_name: str,
|
||||
prompt: str,
|
||||
input_image: Optional[MediaFileType],
|
||||
aspect_ratio: str,
|
||||
seed: Optional[int],
|
||||
) -> MediaFileType:
|
||||
client = ReplicateClient(api_token=api_key.get_secret_value())
|
||||
input_params = {
|
||||
"prompt": prompt,
|
||||
"input_image": input_image,
|
||||
"aspect_ratio": aspect_ratio,
|
||||
**({"seed": seed} if seed is not None else {}),
|
||||
}
|
||||
|
||||
output: FileOutput | list[FileOutput] = await client.async_run( # type: ignore
|
||||
model_name,
|
||||
input=input_params,
|
||||
wait=False,
|
||||
)
|
||||
|
||||
if isinstance(output, list) and output:
|
||||
output = output[0]
|
||||
|
||||
if isinstance(output, FileOutput):
|
||||
return MediaFileType(output.url)
|
||||
if isinstance(output, str):
|
||||
return MediaFileType(output)
|
||||
|
||||
raise ValueError("No output received")
|
||||
@@ -46,6 +46,6 @@ class GenericWebhookTriggerBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "constants", input_data.constants
|
||||
yield "payload", input_data.payload
|
||||
|
||||
@@ -1,19 +1,30 @@
|
||||
from typing import overload
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from backend.blocks.github._auth import (
|
||||
GithubCredentials,
|
||||
GithubFineGrainedAPICredentials,
|
||||
)
|
||||
from backend.util.request import Requests
|
||||
from backend.util.request import URL, Requests
|
||||
|
||||
|
||||
def _convert_to_api_url(url: str) -> str:
|
||||
@overload
|
||||
def _convert_to_api_url(url: str) -> str: ...
|
||||
|
||||
|
||||
@overload
|
||||
def _convert_to_api_url(url: URL) -> URL: ...
|
||||
|
||||
|
||||
def _convert_to_api_url(url: str | URL) -> str | URL:
|
||||
"""
|
||||
Converts a standard GitHub URL to the corresponding GitHub API URL.
|
||||
Handles repository URLs, issue URLs, pull request URLs, and more.
|
||||
"""
|
||||
parsed_url = urlparse(url)
|
||||
path_parts = parsed_url.path.strip("/").split("/")
|
||||
if url_as_str := isinstance(url, str):
|
||||
url = urlparse(url)
|
||||
|
||||
path_parts = url.path.strip("/").split("/")
|
||||
|
||||
if len(path_parts) >= 2:
|
||||
owner, repo = path_parts[0], path_parts[1]
|
||||
@@ -28,7 +39,7 @@ def _convert_to_api_url(url: str) -> str:
|
||||
else:
|
||||
raise ValueError("Invalid GitHub URL format.")
|
||||
|
||||
return api_url
|
||||
return api_url if url_as_str else urlparse(api_url)
|
||||
|
||||
|
||||
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
|
||||
|
||||
@@ -129,7 +129,7 @@ class GithubCreateCheckRunBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_check_run(
|
||||
async def create_check_run(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
name: str,
|
||||
@@ -172,7 +172,7 @@ class GithubCreateCheckRunBlock(Block):
|
||||
data.output = output_data
|
||||
|
||||
check_runs_url = f"{repo_url}/check-runs"
|
||||
response = api.post(
|
||||
response = await api.post(
|
||||
check_runs_url, data=data.model_dump_json(exclude_none=True)
|
||||
)
|
||||
result = response.json()
|
||||
@@ -183,7 +183,7 @@ class GithubCreateCheckRunBlock(Block):
|
||||
"status": result["status"],
|
||||
}
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -191,7 +191,7 @@ class GithubCreateCheckRunBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self.create_check_run(
|
||||
result = await self.create_check_run(
|
||||
credentials=credentials,
|
||||
repo_url=input_data.repo_url,
|
||||
name=input_data.name,
|
||||
@@ -292,7 +292,7 @@ class GithubUpdateCheckRunBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_check_run(
|
||||
async def update_check_run(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
check_run_id: int,
|
||||
@@ -325,7 +325,7 @@ class GithubUpdateCheckRunBlock(Block):
|
||||
data.output = output_data
|
||||
|
||||
check_run_url = f"{repo_url}/check-runs/{check_run_id}"
|
||||
response = api.patch(
|
||||
response = await api.patch(
|
||||
check_run_url, data=data.model_dump_json(exclude_none=True)
|
||||
)
|
||||
result = response.json()
|
||||
@@ -337,7 +337,7 @@ class GithubUpdateCheckRunBlock(Block):
|
||||
"conclusion": result.get("conclusion"),
|
||||
}
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -345,7 +345,7 @@ class GithubUpdateCheckRunBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self.update_check_run(
|
||||
result = await self.update_check_run(
|
||||
credentials=credentials,
|
||||
repo_url=input_data.repo_url,
|
||||
check_run_id=input_data.check_run_id,
|
||||
|
||||
@@ -80,7 +80,7 @@ class GithubCommentBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def post_comment(
|
||||
async def post_comment(
|
||||
credentials: GithubCredentials, issue_url: str, body_text: str
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials)
|
||||
@@ -88,18 +88,18 @@ class GithubCommentBlock(Block):
|
||||
if "pull" in issue_url:
|
||||
issue_url = issue_url.replace("pull", "issues")
|
||||
comments_url = issue_url + "/comments"
|
||||
response = api.post(comments_url, json=data)
|
||||
response = await api.post(comments_url, json=data)
|
||||
comment = response.json()
|
||||
return comment["id"], comment["html_url"]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
id, url = self.post_comment(
|
||||
id, url = await self.post_comment(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.comment,
|
||||
@@ -171,7 +171,7 @@ class GithubUpdateCommentBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_comment(
|
||||
async def update_comment(
|
||||
credentials: GithubCredentials, comment_url: str, body_text: str
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials, convert_urls=False)
|
||||
@@ -179,11 +179,11 @@ class GithubUpdateCommentBlock(Block):
|
||||
url = convert_comment_url_to_api_endpoint(comment_url)
|
||||
|
||||
logger.info(url)
|
||||
response = api.patch(url, json=data)
|
||||
response = await api.patch(url, json=data)
|
||||
comment = response.json()
|
||||
return comment["id"], comment["html_url"]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -209,7 +209,7 @@ class GithubUpdateCommentBlock(Block):
|
||||
raise ValueError(
|
||||
"Must provide either comment_url or comment_id and issue_url"
|
||||
)
|
||||
id, url = self.update_comment(
|
||||
id, url = await self.update_comment(
|
||||
credentials,
|
||||
input_data.comment_url,
|
||||
input_data.comment,
|
||||
@@ -288,7 +288,7 @@ class GithubListCommentsBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_comments(
|
||||
async def list_comments(
|
||||
credentials: GithubCredentials, issue_url: str
|
||||
) -> list[Output.CommentItem]:
|
||||
parsed_url = urlparse(issue_url)
|
||||
@@ -305,7 +305,7 @@ class GithubListCommentsBlock(Block):
|
||||
|
||||
# Set convert_urls=False since we're already providing an API URL
|
||||
api = get_api(credentials, convert_urls=False)
|
||||
response = api.get(api_url)
|
||||
response = await api.get(api_url)
|
||||
comments = response.json()
|
||||
parsed_comments: list[GithubListCommentsBlock.Output.CommentItem] = [
|
||||
{
|
||||
@@ -318,18 +318,19 @@ class GithubListCommentsBlock(Block):
|
||||
]
|
||||
return parsed_comments
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
comments = self.list_comments(
|
||||
comments = await self.list_comments(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
)
|
||||
yield from (("comment", comment) for comment in comments)
|
||||
for comment in comments:
|
||||
yield "comment", comment
|
||||
yield "comments", comments
|
||||
|
||||
|
||||
@@ -381,24 +382,24 @@ class GithubMakeIssueBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_issue(
|
||||
async def create_issue(
|
||||
credentials: GithubCredentials, repo_url: str, title: str, body: str
|
||||
) -> tuple[int, str]:
|
||||
api = get_api(credentials)
|
||||
data = {"title": title, "body": body}
|
||||
issues_url = repo_url + "/issues"
|
||||
response = api.post(issues_url, json=data)
|
||||
response = await api.post(issues_url, json=data)
|
||||
issue = response.json()
|
||||
return issue["number"], issue["html_url"]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
number, url = self.create_issue(
|
||||
number, url = await self.create_issue(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.title,
|
||||
@@ -451,25 +452,25 @@ class GithubReadIssueBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def read_issue(
|
||||
async def read_issue(
|
||||
credentials: GithubCredentials, issue_url: str
|
||||
) -> tuple[str, str, str]:
|
||||
api = get_api(credentials)
|
||||
response = api.get(issue_url)
|
||||
response = await api.get(issue_url)
|
||||
data = response.json()
|
||||
title = data.get("title", "No title found")
|
||||
body = data.get("body", "No body content found")
|
||||
user = data.get("user", {}).get("login", "No user found")
|
||||
return title, body, user
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
title, body, user = self.read_issue(
|
||||
title, body, user = await self.read_issue(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
)
|
||||
@@ -531,30 +532,30 @@ class GithubListIssuesBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_issues(
|
||||
async def list_issues(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.IssueItem]:
|
||||
api = get_api(credentials)
|
||||
issues_url = repo_url + "/issues"
|
||||
response = api.get(issues_url)
|
||||
response = await api.get(issues_url)
|
||||
data = response.json()
|
||||
issues: list[GithubListIssuesBlock.Output.IssueItem] = [
|
||||
{"title": issue["title"], "url": issue["html_url"]} for issue in data
|
||||
]
|
||||
return issues
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
issues = self.list_issues(
|
||||
for issue in await self.list_issues(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("issue", issue) for issue in issues)
|
||||
):
|
||||
yield "issue", issue
|
||||
|
||||
|
||||
class GithubAddLabelBlock(Block):
|
||||
@@ -593,21 +594,23 @@ class GithubAddLabelBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def add_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
|
||||
async def add_label(
|
||||
credentials: GithubCredentials, issue_url: str, label: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
data = {"labels": [label]}
|
||||
labels_url = issue_url + "/labels"
|
||||
api.post(labels_url, json=data)
|
||||
await api.post(labels_url, json=data)
|
||||
return "Label added successfully"
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
status = self.add_label(
|
||||
status = await self.add_label(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.label,
|
||||
@@ -653,20 +656,22 @@ class GithubRemoveLabelBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def remove_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
|
||||
async def remove_label(
|
||||
credentials: GithubCredentials, issue_url: str, label: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
label_url = issue_url + f"/labels/{label}"
|
||||
api.delete(label_url)
|
||||
await api.delete(label_url)
|
||||
return "Label removed successfully"
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
status = self.remove_label(
|
||||
status = await self.remove_label(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.label,
|
||||
@@ -714,7 +719,7 @@ class GithubAssignIssueBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def assign_issue(
|
||||
async def assign_issue(
|
||||
credentials: GithubCredentials,
|
||||
issue_url: str,
|
||||
assignee: str,
|
||||
@@ -722,17 +727,17 @@ class GithubAssignIssueBlock(Block):
|
||||
api = get_api(credentials)
|
||||
assignees_url = issue_url + "/assignees"
|
||||
data = {"assignees": [assignee]}
|
||||
api.post(assignees_url, json=data)
|
||||
await api.post(assignees_url, json=data)
|
||||
return "Issue assigned successfully"
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
status = self.assign_issue(
|
||||
status = await self.assign_issue(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.assignee,
|
||||
@@ -780,7 +785,7 @@ class GithubUnassignIssueBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def unassign_issue(
|
||||
async def unassign_issue(
|
||||
credentials: GithubCredentials,
|
||||
issue_url: str,
|
||||
assignee: str,
|
||||
@@ -788,17 +793,17 @@ class GithubUnassignIssueBlock(Block):
|
||||
api = get_api(credentials)
|
||||
assignees_url = issue_url + "/assignees"
|
||||
data = {"assignees": [assignee]}
|
||||
api.delete(assignees_url, json=data)
|
||||
await api.delete(assignees_url, json=data)
|
||||
return "Issue unassigned successfully"
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
status = self.unassign_issue(
|
||||
status = await self.unassign_issue(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.assignee,
|
||||
|
||||
@@ -65,28 +65,31 @@ class GithubListPullRequestsBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_prs(credentials: GithubCredentials, repo_url: str) -> list[Output.PRItem]:
|
||||
async def list_prs(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.PRItem]:
|
||||
api = get_api(credentials)
|
||||
pulls_url = repo_url + "/pulls"
|
||||
response = api.get(pulls_url)
|
||||
response = await api.get(pulls_url)
|
||||
data = response.json()
|
||||
pull_requests: list[GithubListPullRequestsBlock.Output.PRItem] = [
|
||||
{"title": pr["title"], "url": pr["html_url"]} for pr in data
|
||||
]
|
||||
return pull_requests
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
pull_requests = self.list_prs(
|
||||
pull_requests = await self.list_prs(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("pull_request", pr) for pr in pull_requests)
|
||||
for pr in pull_requests:
|
||||
yield "pull_request", pr
|
||||
|
||||
|
||||
class GithubMakePullRequestBlock(Block):
|
||||
@@ -153,7 +156,7 @@ class GithubMakePullRequestBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_pr(
|
||||
async def create_pr(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
title: str,
|
||||
@@ -164,11 +167,11 @@ class GithubMakePullRequestBlock(Block):
|
||||
api = get_api(credentials)
|
||||
pulls_url = repo_url + "/pulls"
|
||||
data = {"title": title, "body": body, "head": head, "base": base}
|
||||
response = api.post(pulls_url, json=data)
|
||||
response = await api.post(pulls_url, json=data)
|
||||
pr_data = response.json()
|
||||
return pr_data["number"], pr_data["html_url"]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -176,7 +179,7 @@ class GithubMakePullRequestBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
number, url = self.create_pr(
|
||||
number, url = await self.create_pr(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.title,
|
||||
@@ -242,39 +245,39 @@ class GithubReadPullRequestBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def read_pr(credentials: GithubCredentials, pr_url: str) -> tuple[str, str, str]:
|
||||
async def read_pr(
|
||||
credentials: GithubCredentials, pr_url: str
|
||||
) -> tuple[str, str, str]:
|
||||
api = get_api(credentials)
|
||||
# Adjust the URL to access the issue endpoint for PR metadata
|
||||
issue_url = pr_url.replace("/pull/", "/issues/")
|
||||
response = api.get(issue_url)
|
||||
response = await api.get(issue_url)
|
||||
data = response.json()
|
||||
title = data.get("title", "No title found")
|
||||
body = data.get("body", "No body content found")
|
||||
author = data.get("user", {}).get("login", "No user found")
|
||||
author = data.get("user", {}).get("login", "Unknown author")
|
||||
return title, body, author
|
||||
|
||||
@staticmethod
|
||||
def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str:
|
||||
async def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str:
|
||||
api = get_api(credentials)
|
||||
files_url = prepare_pr_api_url(pr_url=pr_url, path="files")
|
||||
response = api.get(files_url)
|
||||
response = await api.get(files_url)
|
||||
files = response.json()
|
||||
changes = []
|
||||
for file in files:
|
||||
filename = file.get("filename")
|
||||
patch = file.get("patch")
|
||||
if filename and patch:
|
||||
changes.append(f"File: {filename}\n{patch}")
|
||||
return "\n\n".join(changes)
|
||||
filename = file.get("filename", "")
|
||||
status = file.get("status", "")
|
||||
changes.append(f"{filename}: {status}")
|
||||
return "\n".join(changes)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
title, body, author = self.read_pr(
|
||||
title, body, author = await self.read_pr(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
)
|
||||
@@ -283,7 +286,7 @@ class GithubReadPullRequestBlock(Block):
|
||||
yield "author", author
|
||||
|
||||
if input_data.include_pr_changes:
|
||||
changes = self.read_pr_changes(
|
||||
changes = await self.read_pr_changes(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
)
|
||||
@@ -330,16 +333,16 @@ class GithubAssignPRReviewerBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def assign_reviewer(
|
||||
async def assign_reviewer(
|
||||
credentials: GithubCredentials, pr_url: str, reviewer: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
|
||||
data = {"reviewers": [reviewer]}
|
||||
api.post(reviewers_url, json=data)
|
||||
await api.post(reviewers_url, json=data)
|
||||
return "Reviewer assigned successfully"
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -347,7 +350,7 @@ class GithubAssignPRReviewerBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.assign_reviewer(
|
||||
status = await self.assign_reviewer(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
input_data.reviewer,
|
||||
@@ -397,16 +400,16 @@ class GithubUnassignPRReviewerBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def unassign_reviewer(
|
||||
async def unassign_reviewer(
|
||||
credentials: GithubCredentials, pr_url: str, reviewer: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
|
||||
data = {"reviewers": [reviewer]}
|
||||
api.delete(reviewers_url, json=data)
|
||||
await api.delete(reviewers_url, json=data)
|
||||
return "Reviewer unassigned successfully"
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -414,7 +417,7 @@ class GithubUnassignPRReviewerBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.unassign_reviewer(
|
||||
status = await self.unassign_reviewer(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
input_data.reviewer,
|
||||
@@ -477,12 +480,12 @@ class GithubListPRReviewersBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_reviewers(
|
||||
async def list_reviewers(
|
||||
credentials: GithubCredentials, pr_url: str
|
||||
) -> list[Output.ReviewerItem]:
|
||||
api = get_api(credentials)
|
||||
reviewers_url = prepare_pr_api_url(pr_url=pr_url, path="requested_reviewers")
|
||||
response = api.get(reviewers_url)
|
||||
response = await api.get(reviewers_url)
|
||||
data = response.json()
|
||||
reviewers: list[GithubListPRReviewersBlock.Output.ReviewerItem] = [
|
||||
{"username": reviewer["login"], "url": reviewer["html_url"]}
|
||||
@@ -490,18 +493,18 @@ class GithubListPRReviewersBlock(Block):
|
||||
]
|
||||
return reviewers
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
reviewers = self.list_reviewers(
|
||||
for reviewer in await self.list_reviewers(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
)
|
||||
yield from (("reviewer", reviewer) for reviewer in reviewers)
|
||||
):
|
||||
yield "reviewer", reviewer
|
||||
|
||||
|
||||
def prepare_pr_api_url(pr_url: str, path: str) -> str:
|
||||
|
||||
@@ -65,12 +65,12 @@ class GithubListTagsBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_tags(
|
||||
async def list_tags(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.TagItem]:
|
||||
api = get_api(credentials)
|
||||
tags_url = repo_url + "/tags"
|
||||
response = api.get(tags_url)
|
||||
response = await api.get(tags_url)
|
||||
data = response.json()
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
tags: list[GithubListTagsBlock.Output.TagItem] = [
|
||||
@@ -82,18 +82,19 @@ class GithubListTagsBlock(Block):
|
||||
]
|
||||
return tags
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
tags = self.list_tags(
|
||||
tags = await self.list_tags(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("tag", tag) for tag in tags)
|
||||
for tag in tags:
|
||||
yield "tag", tag
|
||||
|
||||
|
||||
class GithubListBranchesBlock(Block):
|
||||
@@ -147,12 +148,12 @@ class GithubListBranchesBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_branches(
|
||||
async def list_branches(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.BranchItem]:
|
||||
api = get_api(credentials)
|
||||
branches_url = repo_url + "/branches"
|
||||
response = api.get(branches_url)
|
||||
response = await api.get(branches_url)
|
||||
data = response.json()
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
branches: list[GithubListBranchesBlock.Output.BranchItem] = [
|
||||
@@ -164,18 +165,19 @@ class GithubListBranchesBlock(Block):
|
||||
]
|
||||
return branches
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
branches = self.list_branches(
|
||||
branches = await self.list_branches(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("branch", branch) for branch in branches)
|
||||
for branch in branches:
|
||||
yield "branch", branch
|
||||
|
||||
|
||||
class GithubListDiscussionsBlock(Block):
|
||||
@@ -234,7 +236,7 @@ class GithubListDiscussionsBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_discussions(
|
||||
async def list_discussions(
|
||||
credentials: GithubCredentials, repo_url: str, num_discussions: int
|
||||
) -> list[Output.DiscussionItem]:
|
||||
api = get_api(credentials)
|
||||
@@ -254,7 +256,7 @@ class GithubListDiscussionsBlock(Block):
|
||||
}
|
||||
"""
|
||||
variables = {"owner": owner, "repo": repo, "num": num_discussions}
|
||||
response = api.post(
|
||||
response = await api.post(
|
||||
"https://api.github.com/graphql",
|
||||
json={"query": query, "variables": variables},
|
||||
)
|
||||
@@ -265,17 +267,20 @@ class GithubListDiscussionsBlock(Block):
|
||||
]
|
||||
return discussions
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
discussions = self.list_discussions(
|
||||
credentials, input_data.repo_url, input_data.num_discussions
|
||||
discussions = await self.list_discussions(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.num_discussions,
|
||||
)
|
||||
yield from (("discussion", discussion) for discussion in discussions)
|
||||
for discussion in discussions:
|
||||
yield "discussion", discussion
|
||||
|
||||
|
||||
class GithubListReleasesBlock(Block):
|
||||
@@ -329,30 +334,31 @@ class GithubListReleasesBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_releases(
|
||||
async def list_releases(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.ReleaseItem]:
|
||||
api = get_api(credentials)
|
||||
releases_url = repo_url + "/releases"
|
||||
response = api.get(releases_url)
|
||||
response = await api.get(releases_url)
|
||||
data = response.json()
|
||||
releases: list[GithubListReleasesBlock.Output.ReleaseItem] = [
|
||||
{"name": release["name"], "url": release["html_url"]} for release in data
|
||||
]
|
||||
return releases
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
releases = self.list_releases(
|
||||
releases = await self.list_releases(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("release", release) for release in releases)
|
||||
for release in releases:
|
||||
yield "release", release
|
||||
|
||||
|
||||
class GithubReadFileBlock(Block):
|
||||
@@ -405,40 +411,40 @@ class GithubReadFileBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def read_file(
|
||||
async def read_file(
|
||||
credentials: GithubCredentials, repo_url: str, file_path: str, branch: str
|
||||
) -> tuple[str, int]:
|
||||
api = get_api(credentials)
|
||||
content_url = repo_url + f"/contents/{file_path}?ref={branch}"
|
||||
response = api.get(content_url)
|
||||
content = response.json()
|
||||
response = await api.get(content_url)
|
||||
data = response.json()
|
||||
|
||||
if isinstance(content, list):
|
||||
if isinstance(data, list):
|
||||
# Multiple entries of different types exist at this path
|
||||
if not (file := next((f for f in content if f["type"] == "file"), None)):
|
||||
if not (file := next((f for f in data if f["type"] == "file"), None)):
|
||||
raise TypeError("Not a file")
|
||||
content = file
|
||||
data = file
|
||||
|
||||
if content["type"] != "file":
|
||||
if data["type"] != "file":
|
||||
raise TypeError("Not a file")
|
||||
|
||||
return content["content"], content["size"]
|
||||
return data["content"], data["size"]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
raw_content, size = self.read_file(
|
||||
content, size = await self.read_file(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.file_path.lstrip("/"),
|
||||
input_data.file_path,
|
||||
input_data.branch,
|
||||
)
|
||||
yield "raw_content", raw_content
|
||||
yield "text_content", base64.b64decode(raw_content).decode("utf-8")
|
||||
yield "raw_content", content
|
||||
yield "text_content", base64.b64decode(content).decode("utf-8")
|
||||
yield "size", size
|
||||
|
||||
|
||||
@@ -515,52 +521,55 @@ class GithubReadFolderBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def read_folder(
|
||||
async def read_folder(
|
||||
credentials: GithubCredentials, repo_url: str, folder_path: str, branch: str
|
||||
) -> tuple[list[Output.FileEntry], list[Output.DirEntry]]:
|
||||
api = get_api(credentials)
|
||||
contents_url = repo_url + f"/contents/{folder_path}?ref={branch}"
|
||||
response = api.get(contents_url)
|
||||
content = response.json()
|
||||
response = await api.get(contents_url)
|
||||
data = response.json()
|
||||
|
||||
if not isinstance(content, list):
|
||||
if not isinstance(data, list):
|
||||
raise TypeError("Not a folder")
|
||||
|
||||
files = [
|
||||
files: list[GithubReadFolderBlock.Output.FileEntry] = [
|
||||
GithubReadFolderBlock.Output.FileEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
size=entry["size"],
|
||||
)
|
||||
for entry in content
|
||||
for entry in data
|
||||
if entry["type"] == "file"
|
||||
]
|
||||
dirs = [
|
||||
|
||||
dirs: list[GithubReadFolderBlock.Output.DirEntry] = [
|
||||
GithubReadFolderBlock.Output.DirEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
)
|
||||
for entry in content
|
||||
for entry in data
|
||||
if entry["type"] == "dir"
|
||||
]
|
||||
|
||||
return files, dirs
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
files, dirs = self.read_folder(
|
||||
files, dirs = await self.read_folder(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.folder_path.lstrip("/"),
|
||||
input_data.branch,
|
||||
)
|
||||
yield from (("file", file) for file in files)
|
||||
yield from (("dir", dir) for dir in dirs)
|
||||
for file in files:
|
||||
yield "file", file
|
||||
for dir in dirs:
|
||||
yield "dir", dir
|
||||
|
||||
|
||||
class GithubMakeBranchBlock(Block):
|
||||
@@ -606,32 +615,35 @@ class GithubMakeBranchBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_branch(
|
||||
async def create_branch(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
new_branch: str,
|
||||
source_branch: str,
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
# Get the SHA of the source branch
|
||||
ref_url = repo_url + f"/git/refs/heads/{source_branch}"
|
||||
response = api.get(ref_url)
|
||||
sha = response.json()["object"]["sha"]
|
||||
response = await api.get(ref_url)
|
||||
data = response.json()
|
||||
sha = data["object"]["sha"]
|
||||
|
||||
# Create the new branch
|
||||
create_ref_url = repo_url + "/git/refs"
|
||||
data = {"ref": f"refs/heads/{new_branch}", "sha": sha}
|
||||
response = api.post(create_ref_url, json=data)
|
||||
new_ref_url = repo_url + "/git/refs"
|
||||
data = {
|
||||
"ref": f"refs/heads/{new_branch}",
|
||||
"sha": sha,
|
||||
}
|
||||
response = await api.post(new_ref_url, json=data)
|
||||
return "Branch created successfully"
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
status = self.create_branch(
|
||||
status = await self.create_branch(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.new_branch,
|
||||
@@ -678,22 +690,22 @@ class GithubDeleteBranchBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def delete_branch(
|
||||
async def delete_branch(
|
||||
credentials: GithubCredentials, repo_url: str, branch: str
|
||||
) -> str:
|
||||
api = get_api(credentials)
|
||||
ref_url = repo_url + f"/git/refs/heads/{branch}"
|
||||
api.delete(ref_url)
|
||||
await api.delete(ref_url)
|
||||
return "Branch deleted successfully"
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
status = self.delete_branch(
|
||||
status = await self.delete_branch(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.branch,
|
||||
@@ -761,7 +773,7 @@ class GithubCreateFileBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_file(
|
||||
async def create_file(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
file_path: str,
|
||||
@@ -770,23 +782,18 @@ class GithubCreateFileBlock(Block):
|
||||
commit_message: str,
|
||||
) -> tuple[str, str]:
|
||||
api = get_api(credentials)
|
||||
# Convert content to base64
|
||||
content_bytes = content.encode("utf-8")
|
||||
content_base64 = base64.b64encode(content_bytes).decode("utf-8")
|
||||
|
||||
# Create the file using the GitHub API
|
||||
contents_url = f"{repo_url}/contents/{file_path}"
|
||||
contents_url = repo_url + f"/contents/{file_path}"
|
||||
content_base64 = base64.b64encode(content.encode()).decode()
|
||||
data = {
|
||||
"message": commit_message,
|
||||
"content": content_base64,
|
||||
"branch": branch,
|
||||
}
|
||||
response = api.put(contents_url, json=data)
|
||||
result = response.json()
|
||||
response = await api.put(contents_url, json=data)
|
||||
data = response.json()
|
||||
return data["content"]["html_url"], data["commit"]["sha"]
|
||||
|
||||
return result["content"]["html_url"], result["commit"]["sha"]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -794,7 +801,7 @@ class GithubCreateFileBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
url, sha = self.create_file(
|
||||
url, sha = await self.create_file(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.file_path,
|
||||
@@ -866,7 +873,7 @@ class GithubUpdateFileBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def update_file(
|
||||
async def update_file(
|
||||
credentials: GithubCredentials,
|
||||
repo_url: str,
|
||||
file_path: str,
|
||||
@@ -875,30 +882,24 @@ class GithubUpdateFileBlock(Block):
|
||||
commit_message: str,
|
||||
) -> tuple[str, str]:
|
||||
api = get_api(credentials)
|
||||
|
||||
# First get the current file to get its SHA
|
||||
contents_url = f"{repo_url}/contents/{file_path}"
|
||||
contents_url = repo_url + f"/contents/{file_path}"
|
||||
params = {"ref": branch}
|
||||
response = api.get(contents_url, params=params)
|
||||
current_file = response.json()
|
||||
response = await api.get(contents_url, params=params)
|
||||
data = response.json()
|
||||
|
||||
# Convert new content to base64
|
||||
content_bytes = content.encode("utf-8")
|
||||
content_base64 = base64.b64encode(content_bytes).decode("utf-8")
|
||||
|
||||
# Update the file
|
||||
content_base64 = base64.b64encode(content.encode()).decode()
|
||||
data = {
|
||||
"message": commit_message,
|
||||
"content": content_base64,
|
||||
"sha": current_file["sha"],
|
||||
"sha": data["sha"],
|
||||
"branch": branch,
|
||||
}
|
||||
response = api.put(contents_url, json=data)
|
||||
result = response.json()
|
||||
response = await api.put(contents_url, json=data)
|
||||
data = response.json()
|
||||
return data["content"]["html_url"], data["commit"]["sha"]
|
||||
|
||||
return result["content"]["html_url"], result["commit"]["sha"]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -906,7 +907,7 @@ class GithubUpdateFileBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
url, sha = self.update_file(
|
||||
url, sha = await self.update_file(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.file_path,
|
||||
@@ -981,7 +982,7 @@ class GithubCreateRepositoryBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_repository(
|
||||
async def create_repository(
|
||||
credentials: GithubCredentials,
|
||||
name: str,
|
||||
description: str,
|
||||
@@ -989,24 +990,19 @@ class GithubCreateRepositoryBlock(Block):
|
||||
auto_init: bool,
|
||||
gitignore_template: str,
|
||||
) -> tuple[str, str]:
|
||||
api = get_api(credentials, convert_urls=False) # Disable URL conversion
|
||||
api = get_api(credentials)
|
||||
data = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"private": private,
|
||||
"auto_init": auto_init,
|
||||
"gitignore_template": gitignore_template,
|
||||
}
|
||||
response = await api.post("https://api.github.com/user/repos", json=data)
|
||||
data = response.json()
|
||||
return data["html_url"], data["clone_url"]
|
||||
|
||||
if gitignore_template:
|
||||
data["gitignore_template"] = gitignore_template
|
||||
|
||||
# Create repository using the user endpoint
|
||||
response = api.post("https://api.github.com/user/repos", json=data)
|
||||
result = response.json()
|
||||
|
||||
return result["html_url"], result["clone_url"]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -1014,7 +1010,7 @@ class GithubCreateRepositoryBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
url, clone_url = self.create_repository(
|
||||
url, clone_url = await self.create_repository(
|
||||
credentials,
|
||||
input_data.name,
|
||||
input_data.description,
|
||||
@@ -1081,17 +1077,13 @@ class GithubListStargazersBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def list_stargazers(
|
||||
async def list_stargazers(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.StargazerItem]:
|
||||
api = get_api(credentials)
|
||||
# Add /stargazers to the repo URL to get stargazers endpoint
|
||||
stargazers_url = f"{repo_url}/stargazers"
|
||||
# Set accept header to get starred_at timestamp
|
||||
headers = {"Accept": "application/vnd.github.star+json"}
|
||||
response = api.get(stargazers_url, headers=headers)
|
||||
stargazers_url = repo_url + "/stargazers"
|
||||
response = await api.get(stargazers_url)
|
||||
data = response.json()
|
||||
|
||||
stargazers: list[GithubListStargazersBlock.Output.StargazerItem] = [
|
||||
{
|
||||
"username": stargazer["login"],
|
||||
@@ -1101,18 +1093,16 @@ class GithubListStargazersBlock(Block):
|
||||
]
|
||||
return stargazers
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
stargazers = self.list_stargazers(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("stargazer", stargazer) for stargazer in stargazers)
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
stargazers = await self.list_stargazers(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
for stargazer in stargazers:
|
||||
yield "stargazer", stargazer
|
||||
|
||||
@@ -115,7 +115,7 @@ class GithubCreateStatusBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_status(
|
||||
async def create_status(
|
||||
credentials: GithubFineGrainedAPICredentials,
|
||||
repo_url: str,
|
||||
sha: str,
|
||||
@@ -144,7 +144,9 @@ class GithubCreateStatusBlock(Block):
|
||||
data.description = description
|
||||
|
||||
status_url = f"{repo_url}/statuses/{sha}"
|
||||
response = api.post(status_url, data=data.model_dump_json(exclude_none=True))
|
||||
response = await api.post(
|
||||
status_url, data=data.model_dump_json(exclude_none=True)
|
||||
)
|
||||
result = response.json()
|
||||
|
||||
return {
|
||||
@@ -158,7 +160,7 @@ class GithubCreateStatusBlock(Block):
|
||||
"updated_at": result["updated_at"],
|
||||
}
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -166,7 +168,7 @@ class GithubCreateStatusBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self.create_status(
|
||||
result = await self.create_status(
|
||||
credentials=credentials,
|
||||
repo_url=input_data.repo_url,
|
||||
sha=input_data.sha,
|
||||
|
||||
@@ -53,7 +53,7 @@ class GitHubTriggerBase:
|
||||
description="Error message if the payload could not be processed"
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "payload", input_data.payload
|
||||
yield "triggered_by_user", input_data.payload["sender"]
|
||||
|
||||
@@ -148,8 +148,9 @@ class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
yield from super().run(input_data, **kwargs)
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "number", input_data.payload["number"]
|
||||
yield "pull_request", input_data.payload["pull_request"]
|
||||
|
||||
603
autogpt_platform/backend/backend/blocks/google/calendar.py
Normal file
603
autogpt_platform/backend/backend/blocks/google/calendar.py
Normal file
@@ -0,0 +1,603 @@
|
||||
import asyncio
|
||||
import enum
|
||||
import uuid
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Literal
|
||||
|
||||
from google.oauth2.credentials import Credentials
|
||||
from googleapiclient.discovery import build
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.settings import AppEnvironment, Settings
|
||||
|
||||
from ._auth import (
|
||||
GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
GoogleCredentials,
|
||||
GoogleCredentialsField,
|
||||
GoogleCredentialsInput,
|
||||
)
|
||||
|
||||
|
||||
class CalendarEvent(BaseModel):
|
||||
"""Structured representation of a Google Calendar event."""
|
||||
|
||||
id: str
|
||||
title: str
|
||||
start_time: str
|
||||
end_time: str
|
||||
is_all_day: bool
|
||||
location: str | None
|
||||
description: str | None
|
||||
organizer: str | None
|
||||
attendees: list[str]
|
||||
has_video_call: bool
|
||||
video_link: str | None
|
||||
calendar_link: str
|
||||
is_recurring: bool
|
||||
|
||||
|
||||
class GoogleCalendarReadEventsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GoogleCredentialsInput = GoogleCredentialsField(
|
||||
["https://www.googleapis.com/auth/calendar.readonly"]
|
||||
)
|
||||
calendar_id: str = SchemaField(
|
||||
description="Calendar ID (use 'primary' for your main calendar)",
|
||||
default="primary",
|
||||
)
|
||||
max_events: int = SchemaField(
|
||||
description="Maximum number of events to retrieve", default=10
|
||||
)
|
||||
start_time: datetime = SchemaField(
|
||||
description="Retrieve events starting from this time",
|
||||
default_factory=lambda: datetime.now(tz=timezone.utc),
|
||||
)
|
||||
time_range_days: int = SchemaField(
|
||||
description="Number of days to look ahead for events", default=30
|
||||
)
|
||||
search_term: str | None = SchemaField(
|
||||
description="Optional search term to filter events by", default=None
|
||||
)
|
||||
|
||||
page_token: str | None = SchemaField(
|
||||
description="Page token from previous request to get the next batch of events. You can use this if you have lots of events you want to process in a loop",
|
||||
default=None,
|
||||
)
|
||||
include_declined_events: bool = SchemaField(
|
||||
description="Include events you've declined", default=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
events: list[CalendarEvent] = SchemaField(
|
||||
description="List of calendar events in the requested time range",
|
||||
default_factory=list,
|
||||
)
|
||||
event: CalendarEvent = SchemaField(
|
||||
description="One of the calendar events in the requested time range"
|
||||
)
|
||||
next_page_token: str | None = SchemaField(
|
||||
description="Token for retrieving the next page of events if more exist",
|
||||
default=None,
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the request failed",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
settings = Settings()
|
||||
|
||||
# Create realistic test data for events
|
||||
test_now = datetime.now(tz=timezone.utc)
|
||||
test_tomorrow = test_now + timedelta(days=1)
|
||||
|
||||
test_event_dict = {
|
||||
"id": "event1id",
|
||||
"title": "Team Meeting",
|
||||
"start_time": test_tomorrow.strftime("%Y-%m-%d %H:%M"),
|
||||
"end_time": (test_tomorrow + timedelta(hours=1)).strftime("%Y-%m-%d %H:%M"),
|
||||
"is_all_day": False,
|
||||
"location": "Conference Room A",
|
||||
"description": "Weekly team sync",
|
||||
"organizer": "manager@example.com",
|
||||
"attendees": ["colleague1@example.com", "colleague2@example.com"],
|
||||
"has_video_call": True,
|
||||
"video_link": "https://meet.google.com/abc-defg-hij",
|
||||
"calendar_link": "https://calendar.google.com/calendar/event?eid=event1id",
|
||||
"is_recurring": True,
|
||||
}
|
||||
|
||||
super().__init__(
|
||||
id="80bc3ed1-e9a4-449e-8163-a8fc86f74f6a",
|
||||
description="Retrieves upcoming events from a Google Calendar with filtering options",
|
||||
categories={BlockCategory.PRODUCTIVITY, BlockCategory.DATA},
|
||||
input_schema=GoogleCalendarReadEventsBlock.Input,
|
||||
output_schema=GoogleCalendarReadEventsBlock.Output,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED
|
||||
or settings.config.app_env == AppEnvironment.PRODUCTION,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"calendar_id": "primary",
|
||||
"max_events": 5,
|
||||
"start_time": test_now.isoformat(),
|
||||
"time_range_days": 7,
|
||||
"search_term": None,
|
||||
"include_declined_events": False,
|
||||
"page_token": None,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("event", test_event_dict),
|
||||
("events", [test_event_dict]),
|
||||
],
|
||||
test_mock={
|
||||
"_read_calendar": lambda *args, **kwargs: {
|
||||
"items": [
|
||||
{
|
||||
"id": "event1id",
|
||||
"summary": "Team Meeting",
|
||||
"start": {
|
||||
"dateTime": test_tomorrow.isoformat(),
|
||||
"timeZone": "UTC",
|
||||
},
|
||||
"end": {
|
||||
"dateTime": (
|
||||
test_tomorrow + timedelta(hours=1)
|
||||
).isoformat(),
|
||||
"timeZone": "UTC",
|
||||
},
|
||||
"location": "Conference Room A",
|
||||
"description": "Weekly team sync",
|
||||
"organizer": {"email": "manager@example.com"},
|
||||
"attendees": [
|
||||
{"email": "colleague1@example.com"},
|
||||
{"email": "colleague2@example.com"},
|
||||
],
|
||||
"conferenceData": {
|
||||
"conferenceUrl": "https://meet.google.com/abc-defg-hij"
|
||||
},
|
||||
"htmlLink": "https://calendar.google.com/calendar/event?eid=event1id",
|
||||
"recurrence": ["RRULE:FREQ=WEEKLY;COUNT=10"],
|
||||
}
|
||||
],
|
||||
"nextPageToken": None,
|
||||
},
|
||||
"_format_events": lambda *args, **kwargs: [test_event_dict],
|
||||
},
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
service = self._build_service(credentials, **kwargs)
|
||||
|
||||
# Calculate end time based on start time and time range
|
||||
end_time = input_data.start_time + timedelta(
|
||||
days=input_data.time_range_days
|
||||
)
|
||||
|
||||
# Call Google Calendar API
|
||||
result = await asyncio.to_thread(
|
||||
self._read_calendar,
|
||||
service=service,
|
||||
calendarId=input_data.calendar_id,
|
||||
time_min=input_data.start_time.isoformat(),
|
||||
time_max=end_time.isoformat(),
|
||||
max_results=input_data.max_events,
|
||||
single_events=True,
|
||||
search_term=input_data.search_term,
|
||||
show_deleted=False,
|
||||
show_hidden=input_data.include_declined_events,
|
||||
page_token=input_data.page_token,
|
||||
)
|
||||
|
||||
# Format events into a user-friendly structure
|
||||
formatted_events = self._format_events(result.get("items", []))
|
||||
|
||||
# Include next page token if available
|
||||
if next_page_token := result.get("nextPageToken"):
|
||||
yield "next_page_token", next_page_token
|
||||
|
||||
for event in formatted_events:
|
||||
yield "event", event
|
||||
|
||||
yield "events", formatted_events
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@staticmethod
|
||||
def _build_service(credentials: GoogleCredentials, **kwargs):
|
||||
creds = Credentials(
|
||||
token=(
|
||||
credentials.access_token.get_secret_value()
|
||||
if credentials.access_token
|
||||
else None
|
||||
),
|
||||
refresh_token=(
|
||||
credentials.refresh_token.get_secret_value()
|
||||
if credentials.refresh_token
|
||||
else None
|
||||
),
|
||||
token_uri="https://oauth2.googleapis.com/token",
|
||||
client_id=Settings().secrets.google_client_id,
|
||||
client_secret=Settings().secrets.google_client_secret,
|
||||
scopes=credentials.scopes,
|
||||
)
|
||||
return build("calendar", "v3", credentials=creds)
|
||||
|
||||
def _read_calendar(
|
||||
self,
|
||||
service,
|
||||
calendarId: str,
|
||||
time_min: str,
|
||||
time_max: str,
|
||||
max_results: int,
|
||||
single_events: bool,
|
||||
search_term: str | None = None,
|
||||
show_deleted: bool = False,
|
||||
show_hidden: bool = False,
|
||||
page_token: str | None = None,
|
||||
) -> dict:
|
||||
"""Read calendar events with optional filtering."""
|
||||
calendar = service.events()
|
||||
|
||||
# Build query parameters
|
||||
params = {
|
||||
"calendarId": calendarId,
|
||||
"timeMin": time_min,
|
||||
"timeMax": time_max,
|
||||
"maxResults": max_results,
|
||||
"singleEvents": single_events,
|
||||
"orderBy": "startTime",
|
||||
"showDeleted": show_deleted,
|
||||
"showHiddenInvitations": show_hidden,
|
||||
**({"pageToken": page_token} if page_token else {}),
|
||||
}
|
||||
|
||||
# Add search term if provided
|
||||
if search_term:
|
||||
params["q"] = search_term
|
||||
|
||||
result = calendar.list(**params).execute()
|
||||
return result
|
||||
|
||||
def _format_events(self, events: list[dict]) -> list[CalendarEvent]:
|
||||
"""Format Google Calendar API events into user-friendly structure."""
|
||||
formatted_events = []
|
||||
|
||||
for event in events:
|
||||
# Determine if all-day event
|
||||
is_all_day = "date" in event.get("start", {})
|
||||
|
||||
# Format start and end times
|
||||
if is_all_day:
|
||||
start_time = event.get("start", {}).get("date", "")
|
||||
end_time = event.get("end", {}).get("date", "")
|
||||
else:
|
||||
# Convert ISO format to more readable format
|
||||
start_datetime = datetime.fromisoformat(
|
||||
event.get("start", {}).get("dateTime", "").replace("Z", "+00:00")
|
||||
)
|
||||
end_datetime = datetime.fromisoformat(
|
||||
event.get("end", {}).get("dateTime", "").replace("Z", "+00:00")
|
||||
)
|
||||
start_time = start_datetime.strftime("%Y-%m-%d %H:%M")
|
||||
end_time = end_datetime.strftime("%Y-%m-%d %H:%M")
|
||||
|
||||
# Extract attendees
|
||||
attendees = []
|
||||
for attendee in event.get("attendees", []):
|
||||
if email := attendee.get("email"):
|
||||
attendees.append(email)
|
||||
|
||||
# Check for video call link
|
||||
has_video_call = False
|
||||
video_link = None
|
||||
if conf_data := event.get("conferenceData"):
|
||||
if conf_url := conf_data.get("conferenceUrl"):
|
||||
has_video_call = True
|
||||
video_link = conf_url
|
||||
elif entry_points := conf_data.get("entryPoints", []):
|
||||
for entry in entry_points:
|
||||
if entry.get("entryPointType") == "video":
|
||||
has_video_call = True
|
||||
video_link = entry.get("uri")
|
||||
break
|
||||
|
||||
# Create formatted event
|
||||
formatted_event = CalendarEvent(
|
||||
id=event.get("id", ""),
|
||||
title=event.get("summary", "Untitled Event"),
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
is_all_day=is_all_day,
|
||||
location=event.get("location"),
|
||||
description=event.get("description"),
|
||||
organizer=event.get("organizer", {}).get("email"),
|
||||
attendees=attendees,
|
||||
has_video_call=has_video_call,
|
||||
video_link=video_link,
|
||||
calendar_link=event.get("htmlLink", ""),
|
||||
is_recurring=bool(event.get("recurrence")),
|
||||
)
|
||||
|
||||
formatted_events.append(formatted_event)
|
||||
|
||||
return formatted_events
|
||||
|
||||
|
||||
class ReminderPreset(enum.Enum):
|
||||
"""Common reminder times before an event."""
|
||||
|
||||
TEN_MINUTES = 10
|
||||
THIRTY_MINUTES = 30
|
||||
ONE_HOUR = 60
|
||||
ONE_DAY = 1440 # 24 hours in minutes
|
||||
|
||||
|
||||
class RecurrenceFrequency(enum.Enum):
|
||||
"""Frequency options for recurring events."""
|
||||
|
||||
DAILY = "DAILY"
|
||||
WEEKLY = "WEEKLY"
|
||||
MONTHLY = "MONTHLY"
|
||||
YEARLY = "YEARLY"
|
||||
|
||||
|
||||
class ExactTiming(BaseModel):
|
||||
"""Model for specifying start and end times."""
|
||||
|
||||
discriminator: Literal["exact_timing"]
|
||||
start_datetime: datetime
|
||||
end_datetime: datetime
|
||||
|
||||
|
||||
class DurationTiming(BaseModel):
|
||||
"""Model for specifying start time and duration."""
|
||||
|
||||
discriminator: Literal["duration_timing"]
|
||||
start_datetime: datetime
|
||||
duration_minutes: int
|
||||
|
||||
|
||||
class OneTimeEvent(BaseModel):
|
||||
"""Model for a one-time event."""
|
||||
|
||||
discriminator: Literal["one_time"]
|
||||
|
||||
|
||||
class RecurringEvent(BaseModel):
|
||||
"""Model for a recurring event."""
|
||||
|
||||
discriminator: Literal["recurring"]
|
||||
frequency: RecurrenceFrequency
|
||||
count: int
|
||||
|
||||
|
||||
class GoogleCalendarCreateEventBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GoogleCredentialsInput = GoogleCredentialsField(
|
||||
["https://www.googleapis.com/auth/calendar"]
|
||||
)
|
||||
# Event Details
|
||||
event_title: str = SchemaField(description="Title of the event")
|
||||
location: str | None = SchemaField(
|
||||
description="Location of the event", default=None
|
||||
)
|
||||
description: str | None = SchemaField(
|
||||
description="Description of the event", default=None
|
||||
)
|
||||
|
||||
# Timing
|
||||
timing: ExactTiming | DurationTiming = SchemaField(
|
||||
discriminator="discriminator",
|
||||
advanced=False,
|
||||
description="Specify when the event starts and ends",
|
||||
default_factory=lambda: DurationTiming(
|
||||
discriminator="duration_timing",
|
||||
start_datetime=datetime.now().replace(microsecond=0, second=0, minute=0)
|
||||
+ timedelta(hours=1),
|
||||
duration_minutes=60,
|
||||
),
|
||||
)
|
||||
|
||||
# Calendar selection
|
||||
calendar_id: str = SchemaField(
|
||||
description="Calendar ID (use 'primary' for your main calendar)",
|
||||
default="primary",
|
||||
)
|
||||
|
||||
# Guests
|
||||
guest_emails: list[str] = SchemaField(
|
||||
description="Email addresses of guests to invite", default_factory=list
|
||||
)
|
||||
send_notifications: bool = SchemaField(
|
||||
description="Send email notifications to guests", default=True
|
||||
)
|
||||
|
||||
# Extras
|
||||
add_google_meet: bool = SchemaField(
|
||||
description="Include a Google Meet video conference link", default=False
|
||||
)
|
||||
recurrence: OneTimeEvent | RecurringEvent = SchemaField(
|
||||
discriminator="discriminator",
|
||||
description="Whether the event repeats",
|
||||
default_factory=lambda: OneTimeEvent(discriminator="one_time"),
|
||||
)
|
||||
reminder_minutes: list[ReminderPreset] = SchemaField(
|
||||
description="When to send reminders before the event",
|
||||
default_factory=lambda: [ReminderPreset.TEN_MINUTES],
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
event_id: str = SchemaField(description="ID of the created event")
|
||||
event_link: str = SchemaField(
|
||||
description="Link to view the event in Google Calendar"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if event creation failed")
|
||||
|
||||
def __init__(self):
|
||||
settings = Settings()
|
||||
|
||||
super().__init__(
|
||||
id="ed2ec950-fbff-4204-94c0-023fb1d625e0",
|
||||
description="This block creates a new event in Google Calendar with customizable parameters.",
|
||||
categories={BlockCategory.PRODUCTIVITY},
|
||||
input_schema=GoogleCalendarCreateEventBlock.Input,
|
||||
output_schema=GoogleCalendarCreateEventBlock.Output,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED
|
||||
or settings.config.app_env == AppEnvironment.PRODUCTION,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"event_title": "Team Meeting",
|
||||
"location": "Conference Room A",
|
||||
"description": "Weekly team sync-up",
|
||||
"calendar_id": "primary",
|
||||
"guest_emails": ["colleague1@example.com", "colleague2@example.com"],
|
||||
"add_google_meet": True,
|
||||
"send_notifications": True,
|
||||
"reminder_minutes": [
|
||||
ReminderPreset.TEN_MINUTES.value,
|
||||
ReminderPreset.ONE_HOUR.value,
|
||||
],
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("event_id", "abc123event_id"),
|
||||
("event_link", "https://calendar.google.com/calendar/event?eid=abc123"),
|
||||
],
|
||||
test_mock={
|
||||
"_create_event": lambda *args, **kwargs: {
|
||||
"id": "abc123event_id",
|
||||
"htmlLink": "https://calendar.google.com/calendar/event?eid=abc123",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
service = self._build_service(credentials, **kwargs)
|
||||
|
||||
# Create event body
|
||||
# Get start and end times based on the timing option
|
||||
if input_data.timing.discriminator == "exact_timing":
|
||||
start_datetime = input_data.timing.start_datetime
|
||||
end_datetime = input_data.timing.end_datetime
|
||||
else: # duration_timing
|
||||
start_datetime = input_data.timing.start_datetime
|
||||
end_datetime = start_datetime + timedelta(
|
||||
minutes=input_data.timing.duration_minutes
|
||||
)
|
||||
|
||||
# Format datetimes for Google Calendar API
|
||||
start_time_str = start_datetime.isoformat()
|
||||
end_time_str = end_datetime.isoformat()
|
||||
|
||||
# Build the event body
|
||||
event_body = {
|
||||
"summary": input_data.event_title,
|
||||
"start": {"dateTime": start_time_str},
|
||||
"end": {"dateTime": end_time_str},
|
||||
}
|
||||
|
||||
# Add optional fields
|
||||
if input_data.location:
|
||||
event_body["location"] = input_data.location
|
||||
|
||||
if input_data.description:
|
||||
event_body["description"] = input_data.description
|
||||
|
||||
# Add guests
|
||||
if input_data.guest_emails:
|
||||
event_body["attendees"] = [
|
||||
{"email": email} for email in input_data.guest_emails
|
||||
]
|
||||
|
||||
# Add reminders
|
||||
if input_data.reminder_minutes:
|
||||
event_body["reminders"] = {
|
||||
"useDefault": False,
|
||||
"overrides": [
|
||||
{"method": "popup", "minutes": reminder.value}
|
||||
for reminder in input_data.reminder_minutes
|
||||
],
|
||||
}
|
||||
|
||||
# Add Google Meet
|
||||
if input_data.add_google_meet:
|
||||
event_body["conferenceData"] = {
|
||||
"createRequest": {
|
||||
"requestId": f"meet-{uuid.uuid4()}",
|
||||
"conferenceSolutionKey": {"type": "hangoutsMeet"},
|
||||
}
|
||||
}
|
||||
|
||||
# Add recurrence
|
||||
if input_data.recurrence.discriminator == "recurring":
|
||||
rule = f"RRULE:FREQ={input_data.recurrence.frequency.value}"
|
||||
rule += f";COUNT={input_data.recurrence.count}"
|
||||
event_body["recurrence"] = [rule]
|
||||
|
||||
# Create the event
|
||||
result = await asyncio.to_thread(
|
||||
self._create_event,
|
||||
service=service,
|
||||
calendar_id=input_data.calendar_id,
|
||||
event_body=event_body,
|
||||
send_notifications=input_data.send_notifications,
|
||||
conference_data_version=1 if input_data.add_google_meet else 0,
|
||||
)
|
||||
|
||||
yield "event_id", result["id"]
|
||||
yield "event_link", result["htmlLink"]
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@staticmethod
|
||||
def _build_service(credentials: GoogleCredentials, **kwargs):
|
||||
creds = Credentials(
|
||||
token=(
|
||||
credentials.access_token.get_secret_value()
|
||||
if credentials.access_token
|
||||
else None
|
||||
),
|
||||
refresh_token=(
|
||||
credentials.refresh_token.get_secret_value()
|
||||
if credentials.refresh_token
|
||||
else None
|
||||
),
|
||||
token_uri="https://oauth2.googleapis.com/token",
|
||||
client_id=Settings().secrets.google_client_id,
|
||||
client_secret=Settings().secrets.google_client_secret,
|
||||
scopes=credentials.scopes,
|
||||
)
|
||||
return build("calendar", "v3", credentials=creds)
|
||||
|
||||
def _create_event(
|
||||
self,
|
||||
service,
|
||||
calendar_id: str,
|
||||
event_body: dict,
|
||||
send_notifications: bool = False,
|
||||
conference_data_version: int = 0,
|
||||
) -> dict:
|
||||
"""Create a new event in Google Calendar."""
|
||||
calendar = service.events()
|
||||
|
||||
# Make the API call
|
||||
result = calendar.insert(
|
||||
calendarId=calendar_id,
|
||||
body=event_body,
|
||||
sendNotifications=send_notifications,
|
||||
conferenceDataVersion=conference_data_version,
|
||||
).execute()
|
||||
|
||||
return result
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import base64
|
||||
from email.utils import parseaddr
|
||||
from typing import List
|
||||
@@ -128,11 +129,13 @@ class GmailReadBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = self._build_service(credentials, **kwargs)
|
||||
messages = self._read_emails(service, input_data.query, input_data.max_results)
|
||||
service = GmailReadBlock._build_service(credentials, **kwargs)
|
||||
messages = await asyncio.to_thread(
|
||||
self._read_emails, service, input_data.query, input_data.max_results
|
||||
)
|
||||
for email in messages:
|
||||
yield "email", email
|
||||
yield "emails", messages
|
||||
@@ -286,14 +289,18 @@ class GmailSendBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GmailReadBlock._build_service(credentials, **kwargs)
|
||||
send_result = self._send_email(
|
||||
service, input_data.to, input_data.subject, input_data.body
|
||||
result = await asyncio.to_thread(
|
||||
self._send_email,
|
||||
service,
|
||||
input_data.to,
|
||||
input_data.subject,
|
||||
input_data.body,
|
||||
)
|
||||
yield "result", send_result
|
||||
yield "result", result
|
||||
|
||||
def _send_email(self, service, to: str, subject: str, body: str) -> dict:
|
||||
if not to or not subject or not body:
|
||||
@@ -358,12 +365,12 @@ class GmailListLabelsBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GmailReadBlock._build_service(credentials, **kwargs)
|
||||
labels = self._list_labels(service)
|
||||
yield "result", labels
|
||||
result = await asyncio.to_thread(self._list_labels, service)
|
||||
yield "result", result
|
||||
|
||||
def _list_labels(self, service) -> list[dict]:
|
||||
results = service.users().labels().list(userId="me").execute()
|
||||
@@ -419,11 +426,13 @@ class GmailAddLabelBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GmailReadBlock._build_service(credentials, **kwargs)
|
||||
result = self._add_label(service, input_data.message_id, input_data.label_name)
|
||||
result = await asyncio.to_thread(
|
||||
self._add_label, service, input_data.message_id, input_data.label_name
|
||||
)
|
||||
yield "result", result
|
||||
|
||||
def _add_label(self, service, message_id: str, label_name: str) -> dict:
|
||||
@@ -502,12 +511,12 @@ class GmailRemoveLabelBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GmailReadBlock._build_service(credentials, **kwargs)
|
||||
result = self._remove_label(
|
||||
service, input_data.message_id, input_data.label_name
|
||||
result = await asyncio.to_thread(
|
||||
self._remove_label, service, input_data.message_id, input_data.label_name
|
||||
)
|
||||
yield "result", result
|
||||
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import asyncio
|
||||
|
||||
from google.oauth2.credentials import Credentials
|
||||
from googleapiclient.discovery import build
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.settings import AppEnvironment, Settings
|
||||
|
||||
from ._auth import (
|
||||
GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
@@ -36,13 +38,15 @@ class GoogleSheetsReadBlock(Block):
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
settings = Settings()
|
||||
super().__init__(
|
||||
id="5724e902-3635-47e9-a108-aaa0263a4988",
|
||||
description="This block reads data from a Google Sheets spreadsheet.",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=GoogleSheetsReadBlock.Input,
|
||||
output_schema=GoogleSheetsReadBlock.Output,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED
|
||||
or settings.config.app_env == AppEnvironment.PRODUCTION,
|
||||
test_input={
|
||||
"spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
|
||||
"range": "Sheet1!A1:B2",
|
||||
@@ -66,11 +70,13 @@ class GoogleSheetsReadBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = self._build_service(credentials, **kwargs)
|
||||
data = self._read_sheet(service, input_data.spreadsheet_id, input_data.range)
|
||||
data = await asyncio.to_thread(
|
||||
self._read_sheet, service, input_data.spreadsheet_id, input_data.range
|
||||
)
|
||||
yield "result", data
|
||||
|
||||
@staticmethod
|
||||
@@ -155,11 +161,12 @@ class GoogleSheetsWriteBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GoogleSheetsReadBlock._build_service(credentials, **kwargs)
|
||||
result = self._write_sheet(
|
||||
result = await asyncio.to_thread(
|
||||
self._write_sheet,
|
||||
service,
|
||||
input_data.spreadsheet_id,
|
||||
input_data.range,
|
||||
|
||||
@@ -103,7 +103,7 @@ class GoogleMapsSearchBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
places = self.search_places(
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class GetRequest:
|
||||
@classmethod
|
||||
def get_request(
|
||||
async def get_request(
|
||||
cls, url: str, headers: Optional[dict] = None, json: bool = False
|
||||
) -> Any:
|
||||
if headers is None:
|
||||
headers = {}
|
||||
response = requests.get(url, headers=headers)
|
||||
return response.json() if json else response.text
|
||||
response = await Requests().get(url, headers=headers)
|
||||
if json:
|
||||
return response.json()
|
||||
else:
|
||||
return response.text()
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
import json
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
|
||||
from requests.exceptions import HTTPError, RequestException
|
||||
import aiofiles
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.file import (
|
||||
MediaFileType,
|
||||
get_exec_file_path,
|
||||
get_mime_type,
|
||||
store_media_file,
|
||||
)
|
||||
from backend.util.request import Requests
|
||||
|
||||
logger = logging.getLogger(name=__name__)
|
||||
|
||||
@@ -38,13 +45,21 @@ class SendWebRequestBlock(Block):
|
||||
)
|
||||
json_format: bool = SchemaField(
|
||||
title="JSON format",
|
||||
description="Whether to send and receive body as JSON",
|
||||
description="If true, send the body as JSON (unless files are also present).",
|
||||
default=True,
|
||||
)
|
||||
body: Any = SchemaField(
|
||||
description="The body of the request",
|
||||
body: dict | None = SchemaField(
|
||||
description="Form/JSON body payload. If files are supplied, this must be a mapping of form‑fields.",
|
||||
default=None,
|
||||
)
|
||||
files_name: str = SchemaField(
|
||||
description="The name of the file field in the form data.",
|
||||
default="file",
|
||||
)
|
||||
files: list[MediaFileType] = SchemaField(
|
||||
description="Mapping of *form field name* → Image url / path / base64 url.",
|
||||
default_factory=list,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: object = SchemaField(description="The response from the server")
|
||||
@@ -55,59 +70,102 @@ class SendWebRequestBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="6595ae1f-b924-42cb-9a41-551a0611c4b4",
|
||||
description="This block makes an HTTP request to the given URL.",
|
||||
description="Make an HTTP request (JSON / form / multipart).",
|
||||
categories={BlockCategory.OUTPUT},
|
||||
input_schema=SendWebRequestBlock.Input,
|
||||
output_schema=SendWebRequestBlock.Output,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
body = input_data.body
|
||||
@staticmethod
|
||||
async def _prepare_files(
|
||||
graph_exec_id: str,
|
||||
files_name: str,
|
||||
files: list[MediaFileType],
|
||||
) -> list[tuple[str, tuple[str, BytesIO, str]]]:
|
||||
"""
|
||||
Prepare files for the request by storing them and reading their content.
|
||||
Returns a list of tuples in the format:
|
||||
(files_name, (filename, BytesIO, mime_type))
|
||||
"""
|
||||
files_payload: list[tuple[str, tuple[str, BytesIO, str]]] = []
|
||||
|
||||
if input_data.json_format:
|
||||
if isinstance(body, str):
|
||||
try:
|
||||
# Try to parse as JSON first
|
||||
body = json.loads(body)
|
||||
except json.JSONDecodeError:
|
||||
# If it's not valid JSON and just plain text,
|
||||
# we should send it as plain text instead
|
||||
for media in files:
|
||||
# Normalise to a list so we can repeat the same key
|
||||
rel_path = await store_media_file(
|
||||
graph_exec_id, media, return_content=False
|
||||
)
|
||||
abs_path = get_exec_file_path(graph_exec_id, rel_path)
|
||||
async with aiofiles.open(abs_path, "rb") as f:
|
||||
content = await f.read()
|
||||
handle = BytesIO(content)
|
||||
mime = get_mime_type(abs_path)
|
||||
files_payload.append((files_name, (Path(abs_path).name, handle, mime)))
|
||||
|
||||
return files_payload
|
||||
|
||||
async def run(
|
||||
self, input_data: Input, *, graph_exec_id: str, **kwargs
|
||||
) -> BlockOutput:
|
||||
# ─── Parse/normalise body ────────────────────────────────────
|
||||
body = input_data.body
|
||||
if isinstance(body, str):
|
||||
try:
|
||||
# Validate JSON string length to prevent DoS attacks
|
||||
if len(body) > 10_000_000: # 10MB limit
|
||||
raise ValueError("JSON body too large")
|
||||
|
||||
parsed_body = json.loads(body)
|
||||
|
||||
# Validate that parsed JSON is safe (basic object/array/primitive types)
|
||||
if (
|
||||
isinstance(parsed_body, (dict, list, str, int, float, bool))
|
||||
or parsed_body is None
|
||||
):
|
||||
body = parsed_body
|
||||
else:
|
||||
# Unexpected type, treat as plain text
|
||||
input_data.json_format = False
|
||||
|
||||
try:
|
||||
response = requests.request(
|
||||
input_data.method.value,
|
||||
input_data.url,
|
||||
headers=input_data.headers,
|
||||
json=body if input_data.json_format else None,
|
||||
data=body if not input_data.json_format else None,
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
# Invalid JSON or too large – treat as form‑field value instead
|
||||
input_data.json_format = False
|
||||
|
||||
# ─── Prepare files (if any) ──────────────────────────────────
|
||||
use_files = bool(input_data.files)
|
||||
files_payload: list[tuple[str, tuple[str, BytesIO, str]]] = []
|
||||
if use_files:
|
||||
files_payload = await self._prepare_files(
|
||||
graph_exec_id, input_data.files_name, input_data.files
|
||||
)
|
||||
result = response.json() if input_data.json_format else response.text
|
||||
|
||||
# Enforce body format rules
|
||||
if use_files and input_data.json_format:
|
||||
raise ValueError(
|
||||
"json_format=True cannot be combined with file uploads; set json_format=False and put form fields in `body`."
|
||||
)
|
||||
|
||||
# ─── Execute request ─────────────────────────────────────────
|
||||
response = await Requests().request(
|
||||
input_data.method.value,
|
||||
input_data.url,
|
||||
headers=input_data.headers,
|
||||
files=files_payload if use_files else None,
|
||||
# * If files → multipart ⇒ pass form‑fields via data=
|
||||
data=body if not input_data.json_format else None,
|
||||
# * Else, choose JSON vs url‑encoded based on flag
|
||||
json=body if (input_data.json_format and not use_files) else None,
|
||||
)
|
||||
|
||||
# Decide how to parse the response
|
||||
if response.headers.get("content-type", "").startswith("application/json"):
|
||||
result = None if response.status == 204 else response.json()
|
||||
else:
|
||||
result = response.text()
|
||||
|
||||
# Yield according to status code bucket
|
||||
if 200 <= response.status < 300:
|
||||
yield "response", result
|
||||
|
||||
except HTTPError as e:
|
||||
# Handle error responses
|
||||
try:
|
||||
result = e.response.json() if input_data.json_format else str(e)
|
||||
except json.JSONDecodeError:
|
||||
result = str(e)
|
||||
|
||||
if 400 <= e.response.status_code < 500:
|
||||
yield "client_error", result
|
||||
elif 500 <= e.response.status_code < 600:
|
||||
yield "server_error", result
|
||||
else:
|
||||
error_msg = (
|
||||
"Unexpected status code "
|
||||
f"{e.response.status_code} '{e.response.reason}'"
|
||||
)
|
||||
logger.warning(error_msg)
|
||||
yield "error", error_msg
|
||||
|
||||
except RequestException as e:
|
||||
# Handle other request-related exceptions
|
||||
yield "error", str(e)
|
||||
|
||||
except Exception as e:
|
||||
# Catch any other unexpected exceptions
|
||||
yield "error", str(e)
|
||||
elif 400 <= response.status < 500:
|
||||
yield "client_error", result
|
||||
else:
|
||||
yield "server_error", result
|
||||
|
||||
@@ -5,7 +5,7 @@ from backend.blocks.hubspot._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class HubSpotCompanyBlock(Block):
|
||||
@@ -35,7 +35,7 @@ class HubSpotCompanyBlock(Block):
|
||||
output_schema=HubSpotCompanyBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
|
||||
@@ -45,7 +45,7 @@ class HubSpotCompanyBlock(Block):
|
||||
}
|
||||
|
||||
if input_data.operation == "create":
|
||||
response = requests.post(
|
||||
response = await Requests().post(
|
||||
base_url, headers=headers, json={"properties": input_data.company_data}
|
||||
)
|
||||
result = response.json()
|
||||
@@ -67,14 +67,16 @@ class HubSpotCompanyBlock(Block):
|
||||
}
|
||||
]
|
||||
}
|
||||
response = requests.post(search_url, headers=headers, json=search_data)
|
||||
result = response.json()
|
||||
yield "company", result.get("results", [{}])[0]
|
||||
search_response = await Requests().post(
|
||||
search_url, headers=headers, json=search_data
|
||||
)
|
||||
search_result = search_response.json()
|
||||
yield "search_company", search_result.get("results", [{}])[0]
|
||||
yield "status", "retrieved"
|
||||
|
||||
elif input_data.operation == "update":
|
||||
# First get company ID by domain
|
||||
search_response = requests.post(
|
||||
search_response = await Requests().post(
|
||||
f"{base_url}/search",
|
||||
headers=headers,
|
||||
json={
|
||||
@@ -91,10 +93,11 @@ class HubSpotCompanyBlock(Block):
|
||||
]
|
||||
},
|
||||
)
|
||||
company_id = search_response.json().get("results", [{}])[0].get("id")
|
||||
search_result = search_response.json()
|
||||
company_id = search_result.get("results", [{}])[0].get("id")
|
||||
|
||||
if company_id:
|
||||
response = requests.patch(
|
||||
response = await Requests().patch(
|
||||
f"{base_url}/{company_id}",
|
||||
headers=headers,
|
||||
json={"properties": input_data.company_data},
|
||||
|
||||
@@ -5,7 +5,7 @@ from backend.blocks.hubspot._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class HubSpotContactBlock(Block):
|
||||
@@ -35,7 +35,7 @@ class HubSpotContactBlock(Block):
|
||||
output_schema=HubSpotContactBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
base_url = "https://api.hubapi.com/crm/v3/objects/contacts"
|
||||
@@ -45,7 +45,7 @@ class HubSpotContactBlock(Block):
|
||||
}
|
||||
|
||||
if input_data.operation == "create":
|
||||
response = requests.post(
|
||||
response = await Requests().post(
|
||||
base_url, headers=headers, json={"properties": input_data.contact_data}
|
||||
)
|
||||
result = response.json()
|
||||
@@ -53,7 +53,6 @@ class HubSpotContactBlock(Block):
|
||||
yield "status", "created"
|
||||
|
||||
elif input_data.operation == "get":
|
||||
# Search for contact by email
|
||||
search_url = f"{base_url}/search"
|
||||
search_data = {
|
||||
"filterGroups": [
|
||||
@@ -68,13 +67,15 @@ class HubSpotContactBlock(Block):
|
||||
}
|
||||
]
|
||||
}
|
||||
response = requests.post(search_url, headers=headers, json=search_data)
|
||||
response = await Requests().post(
|
||||
search_url, headers=headers, json=search_data
|
||||
)
|
||||
result = response.json()
|
||||
yield "contact", result.get("results", [{}])[0]
|
||||
yield "status", "retrieved"
|
||||
|
||||
elif input_data.operation == "update":
|
||||
search_response = requests.post(
|
||||
search_response = await Requests().post(
|
||||
f"{base_url}/search",
|
||||
headers=headers,
|
||||
json={
|
||||
@@ -91,10 +92,11 @@ class HubSpotContactBlock(Block):
|
||||
]
|
||||
},
|
||||
)
|
||||
contact_id = search_response.json().get("results", [{}])[0].get("id")
|
||||
search_result = search_response.json()
|
||||
contact_id = search_result.get("results", [{}])[0].get("id")
|
||||
|
||||
if contact_id:
|
||||
response = requests.patch(
|
||||
response = await Requests().patch(
|
||||
f"{base_url}/{contact_id}",
|
||||
headers=headers,
|
||||
json={"properties": input_data.contact_data},
|
||||
|
||||
@@ -7,7 +7,7 @@ from backend.blocks.hubspot._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class HubSpotEngagementBlock(Block):
|
||||
@@ -42,7 +42,7 @@ class HubSpotEngagementBlock(Block):
|
||||
output_schema=HubSpotEngagementBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
base_url = "https://api.hubapi.com"
|
||||
@@ -66,7 +66,9 @@ class HubSpotEngagementBlock(Block):
|
||||
}
|
||||
}
|
||||
|
||||
response = requests.post(email_url, headers=headers, json=email_data)
|
||||
response = await Requests().post(
|
||||
email_url, headers=headers, json=email_data
|
||||
)
|
||||
result = response.json()
|
||||
yield "result", result
|
||||
yield "status", "email_sent"
|
||||
@@ -80,7 +82,9 @@ class HubSpotEngagementBlock(Block):
|
||||
|
||||
params = {"limit": 100, "after": from_date.isoformat()}
|
||||
|
||||
response = requests.get(engagement_url, headers=headers, params=params)
|
||||
response = await Requests().get(
|
||||
engagement_url, headers=headers, params=params
|
||||
)
|
||||
engagements = response.json()
|
||||
|
||||
# Process engagement metrics
|
||||
|
||||
@@ -12,7 +12,7 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -196,13 +196,13 @@ class IdeogramModelBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
seed = input_data.seed
|
||||
|
||||
# Step 1: Generate the image
|
||||
result = self.run_model(
|
||||
result = await self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
model_name=input_data.ideogram_model_name.value,
|
||||
prompt=input_data.prompt,
|
||||
@@ -217,14 +217,14 @@ class IdeogramModelBlock(Block):
|
||||
|
||||
# Step 2: Upscale the image if requested
|
||||
if input_data.upscale == UpscaleOption.AI_UPSCALE:
|
||||
result = self.upscale_image(
|
||||
result = await self.upscale_image(
|
||||
api_key=credentials.api_key,
|
||||
image_url=result,
|
||||
)
|
||||
|
||||
yield "result", result
|
||||
|
||||
def run_model(
|
||||
async def run_model(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
model_name: str,
|
||||
@@ -267,12 +267,12 @@ class IdeogramModelBlock(Block):
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(url, json=data, headers=headers)
|
||||
response = await Requests().post(url, headers=headers, json=data)
|
||||
return response.json()["data"][0]["url"]
|
||||
except RequestException as e:
|
||||
raise Exception(f"Failed to fetch image: {str(e)}")
|
||||
|
||||
def upscale_image(self, api_key: SecretStr, image_url: str):
|
||||
async def upscale_image(self, api_key: SecretStr, image_url: str):
|
||||
url = "https://api.ideogram.ai/upscale"
|
||||
headers = {
|
||||
"Api-Key": api_key.get_secret_value(),
|
||||
@@ -280,21 +280,22 @@ class IdeogramModelBlock(Block):
|
||||
|
||||
try:
|
||||
# Step 1: Download the image from the provided URL
|
||||
image_response = requests.get(image_url)
|
||||
response = await Requests().get(image_url)
|
||||
image_content = response.content
|
||||
|
||||
# Step 2: Send the downloaded image to the upscale API
|
||||
files = {
|
||||
"image_file": ("image.png", image_response.content, "image/png"),
|
||||
"image_file": ("image.png", image_content, "image/png"),
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
response = await Requests().post(
|
||||
url,
|
||||
headers=headers,
|
||||
data={"image_request": "{}"},
|
||||
files=files,
|
||||
)
|
||||
|
||||
return response.json()["data"][0]["url"]
|
||||
return (response.json())["data"][0]["url"]
|
||||
|
||||
except RequestException as e:
|
||||
raise Exception(f"Failed to upscale image: {str(e)}")
|
||||
|
||||
@@ -95,7 +95,7 @@ class AgentInputBlock(Block):
|
||||
}
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
|
||||
if input_data.value is not None:
|
||||
yield "result", input_data.value
|
||||
|
||||
@@ -186,7 +186,7 @@ class AgentOutputBlock(Block):
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, *args, **kwargs) -> BlockOutput:
|
||||
"""
|
||||
Attempts to format the recorded_value using the fmt_string if provided.
|
||||
If formatting fails or no fmt_string is given, returns the original recorded_value.
|
||||
@@ -436,7 +436,7 @@ class AgentFileInputBlock(AgentInputBlock):
|
||||
],
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -446,7 +446,7 @@ class AgentFileInputBlock(AgentInputBlock):
|
||||
if not input_data.value:
|
||||
return
|
||||
|
||||
file_path = store_media_file(
|
||||
file_path = await store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.value,
|
||||
return_content=False,
|
||||
|
||||
@@ -53,7 +53,7 @@ class StepThroughItemsBlock(Block):
|
||||
test_mock={},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
for data in [input_data.items, input_data.items_object, input_data.items_str]:
|
||||
if not data:
|
||||
continue
|
||||
|
||||
@@ -5,7 +5,7 @@ from backend.blocks.jina._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class JinaChunkingBlock(Block):
|
||||
@@ -35,7 +35,7 @@ class JinaChunkingBlock(Block):
|
||||
output_schema=JinaChunkingBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://segment.jina.ai/"
|
||||
@@ -55,7 +55,7 @@ class JinaChunkingBlock(Block):
|
||||
"max_chunk_length": str(input_data.max_chunk_length),
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
response = await Requests().post(url, headers=headers, json=data)
|
||||
result = response.json()
|
||||
|
||||
all_chunks.extend(result.get("chunks", []))
|
||||
|
||||
@@ -5,7 +5,7 @@ from backend.blocks.jina._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class JinaEmbeddingBlock(Block):
|
||||
@@ -29,7 +29,7 @@ class JinaEmbeddingBlock(Block):
|
||||
output_schema=JinaEmbeddingBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://api.jina.ai/v1/embeddings"
|
||||
@@ -38,6 +38,6 @@ class JinaEmbeddingBlock(Block):
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
data = {"input": input_data.texts, "model": input_data.model}
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
response = await Requests().post(url, headers=headers, json=data)
|
||||
embeddings = [e["embedding"] for e in response.json()["data"]]
|
||||
yield "embeddings", embeddings
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
|
||||
from backend.blocks.jina._auth import (
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
@@ -9,6 +7,7 @@ from backend.blocks.jina._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class FactCheckerBlock(Block):
|
||||
@@ -35,7 +34,7 @@ class FactCheckerBlock(Block):
|
||||
output_schema=FactCheckerBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
encoded_statement = quote(input_data.statement)
|
||||
@@ -46,8 +45,7 @@ class FactCheckerBlock(Block):
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
response = await Requests().get(url, headers=headers)
|
||||
data = response.json()
|
||||
|
||||
if "data" in data:
|
||||
|
||||
@@ -39,7 +39,7 @@ class SearchTheWebBlock(Block, GetRequest):
|
||||
test_mock={"get_request": lambda *args, **kwargs: "search content"},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Encode the search query
|
||||
@@ -51,7 +51,7 @@ class SearchTheWebBlock(Block, GetRequest):
|
||||
|
||||
# Prepend the Jina Search URL to the encoded query
|
||||
jina_search_url = f"https://s.jina.ai/{encoded_query}"
|
||||
results = self.get_request(jina_search_url, headers=headers, json=False)
|
||||
results = await self.get_request(jina_search_url, headers=headers, json=False)
|
||||
|
||||
# Output the search results
|
||||
yield "results", results
|
||||
@@ -90,7 +90,7 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
|
||||
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
if input_data.raw_content:
|
||||
@@ -103,5 +103,5 @@ class ExtractWebsiteContentBlock(Block, GetRequest):
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
|
||||
content = self.get_request(url, json=False, headers=headers)
|
||||
content = await self.get_request(url, json=False, headers=headers)
|
||||
yield "content", content
|
||||
|
||||
@@ -48,7 +48,7 @@ class LinearClient:
|
||||
raise_for_status=False,
|
||||
)
|
||||
|
||||
def _execute_graphql_request(
|
||||
async def _execute_graphql_request(
|
||||
self, query: str, variables: dict | None = None
|
||||
) -> Any:
|
||||
"""
|
||||
@@ -65,19 +65,18 @@ class LinearClient:
|
||||
if variables:
|
||||
payload["variables"] = variables
|
||||
|
||||
response = self._requests.post(self.API_URL, json=payload)
|
||||
response = await self._requests.post(self.API_URL, json=payload)
|
||||
|
||||
if not response.ok:
|
||||
|
||||
try:
|
||||
error_data = response.json()
|
||||
error_message = error_data.get("errors", [{}])[0].get("message", "")
|
||||
except json.JSONDecodeError:
|
||||
error_message = response.text
|
||||
error_message = response.text()
|
||||
|
||||
raise LinearAPIException(
|
||||
f"Linear API request failed ({response.status_code}): {error_message}",
|
||||
response.status_code,
|
||||
f"Linear API request failed ({response.status}): {error_message}",
|
||||
response.status,
|
||||
)
|
||||
|
||||
response_data = response.json()
|
||||
@@ -88,12 +87,12 @@ class LinearClient:
|
||||
]
|
||||
raise LinearAPIException(
|
||||
f"Linear API returned errors: {', '.join(error_messages)}",
|
||||
response.status_code,
|
||||
response.status,
|
||||
)
|
||||
|
||||
return response_data["data"]
|
||||
|
||||
def query(self, query: str, variables: Optional[dict] = None) -> dict:
|
||||
async def query(self, query: str, variables: Optional[dict] = None) -> dict:
|
||||
"""Executes a GraphQL query.
|
||||
|
||||
Args:
|
||||
@@ -103,9 +102,9 @@ class LinearClient:
|
||||
Returns:
|
||||
The response data.
|
||||
"""
|
||||
return self._execute_graphql_request(query, variables)
|
||||
return await self._execute_graphql_request(query, variables)
|
||||
|
||||
def mutate(self, mutation: str, variables: Optional[dict] = None) -> dict:
|
||||
async def mutate(self, mutation: str, variables: Optional[dict] = None) -> dict:
|
||||
"""Executes a GraphQL mutation.
|
||||
|
||||
Args:
|
||||
@@ -115,9 +114,11 @@ class LinearClient:
|
||||
Returns:
|
||||
The response data.
|
||||
"""
|
||||
return self._execute_graphql_request(mutation, variables)
|
||||
return await self._execute_graphql_request(mutation, variables)
|
||||
|
||||
def try_create_comment(self, issue_id: str, comment: str) -> CreateCommentResponse:
|
||||
async def try_create_comment(
|
||||
self, issue_id: str, comment: str
|
||||
) -> CreateCommentResponse:
|
||||
try:
|
||||
mutation = """
|
||||
mutation CommentCreate($input: CommentCreateInput!) {
|
||||
@@ -138,13 +139,13 @@ class LinearClient:
|
||||
}
|
||||
}
|
||||
|
||||
added_comment = self.mutate(mutation, variables)
|
||||
added_comment = await self.mutate(mutation, variables)
|
||||
# Select the commentCreate field from the mutation response
|
||||
return CreateCommentResponse(**added_comment["commentCreate"])
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
def try_get_team_by_name(self, team_name: str) -> str:
|
||||
async def try_get_team_by_name(self, team_name: str) -> str:
|
||||
try:
|
||||
query = """
|
||||
query GetTeamId($searchTerm: String!) {
|
||||
@@ -167,12 +168,12 @@ class LinearClient:
|
||||
"searchTerm": team_name,
|
||||
}
|
||||
|
||||
team_id = self.query(query, variables)
|
||||
team_id = await self.query(query, variables)
|
||||
return team_id["teams"]["nodes"][0]["id"]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
def try_create_issue(
|
||||
async def try_create_issue(
|
||||
self,
|
||||
team_id: str,
|
||||
title: str,
|
||||
@@ -211,12 +212,12 @@ class LinearClient:
|
||||
if priority:
|
||||
variables["input"]["priority"] = priority
|
||||
|
||||
added_issue = self.mutate(mutation, variables)
|
||||
added_issue = await self.mutate(mutation, variables)
|
||||
return CreateIssueResponse(**added_issue["issueCreate"])
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
def try_search_projects(self, term: str) -> list[Project]:
|
||||
async def try_search_projects(self, term: str) -> list[Project]:
|
||||
try:
|
||||
query = """
|
||||
query SearchProjects($term: String!, $includeComments: Boolean!) {
|
||||
@@ -238,14 +239,14 @@ class LinearClient:
|
||||
"includeComments": True,
|
||||
}
|
||||
|
||||
projects = self.query(query, variables)
|
||||
projects = await self.query(query, variables)
|
||||
return [
|
||||
Project(**project) for project in projects["searchProjects"]["nodes"]
|
||||
]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
def try_search_issues(self, term: str) -> list[Issue]:
|
||||
async def try_search_issues(self, term: str) -> list[Issue]:
|
||||
try:
|
||||
query = """
|
||||
query SearchIssues($term: String!, $includeComments: Boolean!) {
|
||||
@@ -266,7 +267,7 @@ class LinearClient:
|
||||
"includeComments": True,
|
||||
}
|
||||
|
||||
issues = self.query(query, variables)
|
||||
issues = await self.query(query, variables)
|
||||
return [Issue(**issue) for issue in issues["searchIssues"]["nodes"]]
|
||||
except LinearAPIException as e:
|
||||
raise e
|
||||
|
||||
@@ -54,21 +54,21 @@ class LinearCreateCommentBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_comment(
|
||||
async def create_comment(
|
||||
credentials: LinearCredentials, issue_id: str, comment: str
|
||||
) -> tuple[str, str]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: CreateCommentResponse = client.try_create_comment(
|
||||
response: CreateCommentResponse = await client.try_create_comment(
|
||||
issue_id=issue_id, comment=comment
|
||||
)
|
||||
return response.comment.id, response.comment.body
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""Execute the comment creation"""
|
||||
try:
|
||||
comment_id, comment_body = self.create_comment(
|
||||
comment_id, comment_body = await self.create_comment(
|
||||
credentials=credentials,
|
||||
issue_id=input_data.issue_id,
|
||||
comment=input_data.comment,
|
||||
|
||||
@@ -67,7 +67,7 @@ class LinearCreateIssueBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_issue(
|
||||
async def create_issue(
|
||||
credentials: LinearCredentials,
|
||||
team_name: str,
|
||||
title: str,
|
||||
@@ -76,15 +76,15 @@ class LinearCreateIssueBlock(Block):
|
||||
project_name: str | None = None,
|
||||
) -> tuple[str, str]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
team_id = client.try_get_team_by_name(team_name=team_name)
|
||||
team_id = await client.try_get_team_by_name(team_name=team_name)
|
||||
project_id: str | None = None
|
||||
if project_name:
|
||||
projects = client.try_search_projects(term=project_name)
|
||||
projects = await client.try_search_projects(term=project_name)
|
||||
if projects:
|
||||
project_id = projects[0].id
|
||||
else:
|
||||
raise LinearAPIException("Project not found", status_code=404)
|
||||
response: CreateIssueResponse = client.try_create_issue(
|
||||
response: CreateIssueResponse = await client.try_create_issue(
|
||||
team_id=team_id,
|
||||
title=title,
|
||||
description=description,
|
||||
@@ -93,12 +93,12 @@ class LinearCreateIssueBlock(Block):
|
||||
)
|
||||
return response.issue.identifier, response.issue.title
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""Execute the issue creation"""
|
||||
try:
|
||||
issue_id, issue_title = self.create_issue(
|
||||
issue_id, issue_title = await self.create_issue(
|
||||
credentials=credentials,
|
||||
team_name=input_data.team_name,
|
||||
title=input_data.title,
|
||||
@@ -168,20 +168,22 @@ class LinearSearchIssuesBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_issues(
|
||||
async def search_issues(
|
||||
credentials: LinearCredentials,
|
||||
term: str,
|
||||
) -> list[Issue]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: list[Issue] = client.try_search_issues(term=term)
|
||||
response: list[Issue] = await client.try_search_issues(term=term)
|
||||
return response
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""Execute the issue search"""
|
||||
try:
|
||||
issues = self.search_issues(credentials=credentials, term=input_data.term)
|
||||
issues = await self.search_issues(
|
||||
credentials=credentials, term=input_data.term
|
||||
)
|
||||
yield "issues", issues
|
||||
except LinearAPIException as e:
|
||||
yield "error", str(e)
|
||||
|
||||
@@ -69,20 +69,20 @@ class LinearSearchProjectsBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def search_projects(
|
||||
async def search_projects(
|
||||
credentials: LinearCredentials,
|
||||
term: str,
|
||||
) -> list[Project]:
|
||||
client = LinearClient(credentials=credentials)
|
||||
response: list[Project] = client.try_search_projects(term=term)
|
||||
response: list[Project] = await client.try_search_projects(term=term)
|
||||
return response
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: LinearCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""Execute the project search"""
|
||||
try:
|
||||
projects = self.search_projects(
|
||||
projects = await self.search_projects(
|
||||
credentials=credentials,
|
||||
term=input_data.term,
|
||||
)
|
||||
|
||||
@@ -3,15 +3,13 @@ import logging
|
||||
from abc import ABC
|
||||
from enum import Enum, EnumMeta
|
||||
from json import JSONDecodeError
|
||||
from types import MappingProxyType
|
||||
from typing import Any, Iterable, List, Literal, NamedTuple, Optional
|
||||
|
||||
import anthropic
|
||||
import ollama
|
||||
import openai
|
||||
from anthropic import NotGiven
|
||||
from anthropic.types import ToolParam
|
||||
from groq import Groq
|
||||
from groq import AsyncGroq
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
@@ -24,18 +22,20 @@ from backend.data.model import (
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util import json
|
||||
from backend.util.settings import BehaveAs, Settings
|
||||
from backend.util.logging import TruncatedLogger
|
||||
from backend.util.text import TextFormatter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = TruncatedLogger(logging.getLogger(__name__), "[LLM-Block]")
|
||||
fmt = TextFormatter()
|
||||
|
||||
LLMProviderName = Literal[
|
||||
ProviderName.AIML_API,
|
||||
ProviderName.ANTHROPIC,
|
||||
ProviderName.GROQ,
|
||||
ProviderName.OLLAMA,
|
||||
ProviderName.OPENAI,
|
||||
ProviderName.OPEN_ROUTER,
|
||||
ProviderName.LLAMA_API,
|
||||
]
|
||||
AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]]
|
||||
|
||||
@@ -71,36 +71,34 @@ class ModelMetadata(NamedTuple):
|
||||
|
||||
|
||||
class LlmModelMeta(EnumMeta):
|
||||
@property
|
||||
def __members__(self) -> MappingProxyType:
|
||||
if Settings().config.behave_as == BehaveAs.LOCAL:
|
||||
members = super().__members__
|
||||
return MappingProxyType(members)
|
||||
else:
|
||||
removed_providers = ["ollama"]
|
||||
existing_members = super().__members__
|
||||
members = {
|
||||
name: member
|
||||
for name, member in existing_members.items()
|
||||
if LlmModel[name].provider not in removed_providers
|
||||
}
|
||||
return MappingProxyType(members)
|
||||
pass
|
||||
|
||||
|
||||
class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
# OpenAI models
|
||||
O3_MINI = "o3-mini"
|
||||
O3 = "o3-2025-04-16"
|
||||
O1 = "o1"
|
||||
O1_PREVIEW = "o1-preview"
|
||||
O1_MINI = "o1-mini"
|
||||
GPT41 = "gpt-4.1-2025-04-14"
|
||||
GPT4O_MINI = "gpt-4o-mini"
|
||||
GPT4O = "gpt-4o"
|
||||
GPT4_TURBO = "gpt-4-turbo"
|
||||
GPT3_5_TURBO = "gpt-3.5-turbo"
|
||||
# Anthropic models
|
||||
CLAUDE_4_OPUS = "claude-opus-4-20250514"
|
||||
CLAUDE_4_SONNET = "claude-sonnet-4-20250514"
|
||||
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-20250219"
|
||||
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest"
|
||||
CLAUDE_3_5_HAIKU = "claude-3-5-haiku-latest"
|
||||
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
||||
# AI/ML API models
|
||||
AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo"
|
||||
AIML_API_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct"
|
||||
AIML_API_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo"
|
||||
AIML_API_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo"
|
||||
AIML_API_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo"
|
||||
# Groq models
|
||||
GEMMA2_9B = "gemma2-9b-it"
|
||||
LLAMA3_3_70B = "llama-3.3-70b-versatile"
|
||||
@@ -118,6 +116,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
OLLAMA_DOLPHIN = "dolphin-mistral:latest"
|
||||
# OpenRouter models
|
||||
GEMINI_FLASH_1_5 = "google/gemini-flash-1.5"
|
||||
GEMINI_2_5_PRO = "google/gemini-2.5-pro-preview-03-25"
|
||||
GROK_BETA = "x-ai/grok-beta"
|
||||
MISTRAL_NEMO = "mistralai/mistral-nemo"
|
||||
COHERE_COMMAND_R_08_2024 = "cohere/command-r-08-2024"
|
||||
@@ -137,6 +136,11 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
GRYPHE_MYTHOMAX_L2_13B = "gryphe/mythomax-l2-13b"
|
||||
META_LLAMA_4_SCOUT = "meta-llama/llama-4-scout"
|
||||
META_LLAMA_4_MAVERICK = "meta-llama/llama-4-maverick"
|
||||
# Llama API models
|
||||
LLAMA_API_LLAMA_4_SCOUT = "Llama-4-Scout-17B-16E-Instruct-FP8"
|
||||
LLAMA_API_LLAMA4_MAVERICK = "Llama-4-Maverick-17B-128E-Instruct-FP8"
|
||||
LLAMA_API_LLAMA3_3_8B = "Llama-3.3-8B-Instruct"
|
||||
LLAMA_API_LLAMA3_3_70B = "Llama-3.3-70B-Instruct"
|
||||
|
||||
@property
|
||||
def metadata(self) -> ModelMetadata:
|
||||
@@ -157,12 +161,14 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
|
||||
|
||||
MODEL_METADATA = {
|
||||
# https://platform.openai.com/docs/models
|
||||
LlmModel.O3: ModelMetadata("openai", 200000, 100000),
|
||||
LlmModel.O3_MINI: ModelMetadata("openai", 200000, 100000), # o3-mini-2025-01-31
|
||||
LlmModel.O1: ModelMetadata("openai", 200000, 100000), # o1-2024-12-17
|
||||
LlmModel.O1_PREVIEW: ModelMetadata(
|
||||
"openai", 128000, 32768
|
||||
), # o1-preview-2024-09-12
|
||||
LlmModel.O1_MINI: ModelMetadata("openai", 128000, 65536), # o1-mini-2024-09-12
|
||||
LlmModel.GPT41: ModelMetadata("openai", 1047576, 32768),
|
||||
LlmModel.GPT4O_MINI: ModelMetadata(
|
||||
"openai", 128000, 16384
|
||||
), # gpt-4o-mini-2024-07-18
|
||||
@@ -172,6 +178,15 @@ MODEL_METADATA = {
|
||||
), # gpt-4-turbo-2024-04-09
|
||||
LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, 4096), # gpt-3.5-turbo-0125
|
||||
# https://docs.anthropic.com/en/docs/about-claude/models
|
||||
LlmModel.CLAUDE_4_OPUS: ModelMetadata(
|
||||
"anthropic", 200000, 8192
|
||||
), # claude-4-opus-20250514
|
||||
LlmModel.CLAUDE_4_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 8192
|
||||
), # claude-4-sonnet-20250514
|
||||
LlmModel.CLAUDE_3_7_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 8192
|
||||
), # claude-3-7-sonnet-20250219
|
||||
LlmModel.CLAUDE_3_5_SONNET: ModelMetadata(
|
||||
"anthropic", 200000, 8192
|
||||
), # claude-3-5-sonnet-20241022
|
||||
@@ -181,6 +196,12 @@ MODEL_METADATA = {
|
||||
LlmModel.CLAUDE_3_HAIKU: ModelMetadata(
|
||||
"anthropic", 200000, 4096
|
||||
), # claude-3-haiku-20240307
|
||||
# https://docs.aimlapi.com/api-overview/model-database/text-models
|
||||
LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000, 8000),
|
||||
LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000, 40000),
|
||||
LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000, None),
|
||||
LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000, 2000),
|
||||
LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000, None),
|
||||
# https://console.groq.com/docs/models
|
||||
LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, None),
|
||||
LlmModel.LLAMA3_3_70B: ModelMetadata("groq", 128000, 32768),
|
||||
@@ -197,6 +218,7 @@ MODEL_METADATA = {
|
||||
LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None),
|
||||
# https://openrouter.ai/models
|
||||
LlmModel.GEMINI_FLASH_1_5: ModelMetadata("open_router", 1000000, 8192),
|
||||
LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192),
|
||||
LlmModel.GROK_BETA: ModelMetadata("open_router", 131072, 131072),
|
||||
LlmModel.MISTRAL_NEMO: ModelMetadata("open_router", 128000, 4096),
|
||||
LlmModel.COHERE_COMMAND_R_08_2024: ModelMetadata("open_router", 128000, 4096),
|
||||
@@ -220,6 +242,11 @@ MODEL_METADATA = {
|
||||
LlmModel.GRYPHE_MYTHOMAX_L2_13B: ModelMetadata("open_router", 4096, 4096),
|
||||
LlmModel.META_LLAMA_4_SCOUT: ModelMetadata("open_router", 131072, 131072),
|
||||
LlmModel.META_LLAMA_4_MAVERICK: ModelMetadata("open_router", 1048576, 1000000),
|
||||
# Llama API models
|
||||
LlmModel.LLAMA_API_LLAMA_4_SCOUT: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA4_MAVERICK: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_8B: ModelMetadata("llama_api", 128000, 4028),
|
||||
LlmModel.LLAMA_API_LLAMA3_3_70B: ModelMetadata("llama_api", 128000, 4028),
|
||||
}
|
||||
|
||||
for model in LlmModel:
|
||||
@@ -249,7 +276,7 @@ class LLMResponse(BaseModel):
|
||||
|
||||
def convert_openai_tool_fmt_to_anthropic(
|
||||
openai_tools: list[dict] | None = None,
|
||||
) -> Iterable[ToolParam] | NotGiven:
|
||||
) -> Iterable[ToolParam] | anthropic.NotGiven:
|
||||
"""
|
||||
Convert OpenAI tool format to Anthropic tool format.
|
||||
"""
|
||||
@@ -279,7 +306,14 @@ def convert_openai_tool_fmt_to_anthropic(
|
||||
return anthropic_tools
|
||||
|
||||
|
||||
def llm_call(
|
||||
def estimate_token_count(prompt_messages: list[dict]) -> int:
|
||||
char_count = sum(len(str(msg.get("content", ""))) for msg in prompt_messages)
|
||||
message_overhead = len(prompt_messages) * 4
|
||||
estimated_tokens = (char_count // 4) + message_overhead
|
||||
return int(estimated_tokens * 1.2)
|
||||
|
||||
|
||||
async def llm_call(
|
||||
credentials: APIKeyCredentials,
|
||||
llm_model: LlmModel,
|
||||
prompt: list[dict],
|
||||
@@ -287,6 +321,7 @@ def llm_call(
|
||||
max_tokens: int | None,
|
||||
tools: list[dict] | None = None,
|
||||
ollama_host: str = "localhost:11434",
|
||||
parallel_tool_calls: bool | None = None,
|
||||
) -> LLMResponse:
|
||||
"""
|
||||
Make a call to a language model.
|
||||
@@ -309,11 +344,18 @@ def llm_call(
|
||||
- completion_tokens: The number of tokens used in the completion.
|
||||
"""
|
||||
provider = llm_model.metadata.provider
|
||||
max_tokens = max_tokens or llm_model.max_output_tokens or 4096
|
||||
|
||||
# Calculate available tokens based on context window and input length
|
||||
estimated_input_tokens = estimate_token_count(prompt)
|
||||
context_window = llm_model.context_window
|
||||
model_max_output = llm_model.max_output_tokens or 4096
|
||||
user_max = max_tokens or model_max_output
|
||||
available_tokens = max(context_window - estimated_input_tokens, 0)
|
||||
max_tokens = max(min(available_tokens, model_max_output, user_max), 0)
|
||||
|
||||
if provider == "openai":
|
||||
tools_param = tools if tools else openai.NOT_GIVEN
|
||||
oai_client = openai.OpenAI(api_key=credentials.api_key.get_secret_value())
|
||||
oai_client = openai.AsyncOpenAI(api_key=credentials.api_key.get_secret_value())
|
||||
response_format = None
|
||||
|
||||
if llm_model in [LlmModel.O1_MINI, LlmModel.O1_PREVIEW]:
|
||||
@@ -326,12 +368,15 @@ def llm_call(
|
||||
elif json_format:
|
||||
response_format = {"type": "json_object"}
|
||||
|
||||
response = oai_client.chat.completions.create(
|
||||
response = await oai_client.chat.completions.create(
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
max_completion_tokens=max_tokens,
|
||||
tools=tools_param, # type: ignore
|
||||
parallel_tool_calls=(
|
||||
openai.NOT_GIVEN if parallel_tool_calls is None else parallel_tool_calls
|
||||
),
|
||||
)
|
||||
|
||||
if response.choices[0].message.tool_calls:
|
||||
@@ -379,9 +424,11 @@ def llm_call(
|
||||
messages.append({"role": p["role"], "content": p["content"]})
|
||||
last_role = p["role"]
|
||||
|
||||
client = anthropic.Anthropic(api_key=credentials.api_key.get_secret_value())
|
||||
client = anthropic.AsyncAnthropic(
|
||||
api_key=credentials.api_key.get_secret_value()
|
||||
)
|
||||
try:
|
||||
resp = client.messages.create(
|
||||
resp = await client.messages.create(
|
||||
model=llm_model.value,
|
||||
system=sysprompt,
|
||||
messages=messages,
|
||||
@@ -412,7 +459,7 @@ def llm_call(
|
||||
|
||||
if not tool_calls and resp.stop_reason == "tool_use":
|
||||
logger.warning(
|
||||
"Tool use stop reason but no tool calls found in content. %s", resp
|
||||
f"Tool use stop reason but no tool calls found in content. {resp}"
|
||||
)
|
||||
|
||||
return LLMResponse(
|
||||
@@ -435,9 +482,9 @@ def llm_call(
|
||||
if tools:
|
||||
raise ValueError("Groq does not support tools.")
|
||||
|
||||
client = Groq(api_key=credentials.api_key.get_secret_value())
|
||||
client = AsyncGroq(api_key=credentials.api_key.get_secret_value())
|
||||
response_format = {"type": "json_object"} if json_format else None
|
||||
response = client.chat.completions.create(
|
||||
response = await client.chat.completions.create(
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
response_format=response_format, # type: ignore
|
||||
@@ -455,13 +502,14 @@ def llm_call(
|
||||
if tools:
|
||||
raise ValueError("Ollama does not support tools.")
|
||||
|
||||
client = ollama.Client(host=ollama_host)
|
||||
client = ollama.AsyncClient(host=ollama_host)
|
||||
sys_messages = [p["content"] for p in prompt if p["role"] == "system"]
|
||||
usr_messages = [p["content"] for p in prompt if p["role"] != "system"]
|
||||
response = client.generate(
|
||||
response = await client.generate(
|
||||
model=llm_model.value,
|
||||
prompt=f"{sys_messages}\n\n{usr_messages}",
|
||||
stream=False,
|
||||
options={"num_ctx": max_tokens},
|
||||
)
|
||||
return LLMResponse(
|
||||
raw_response=response.get("response") or "",
|
||||
@@ -473,12 +521,12 @@ def llm_call(
|
||||
)
|
||||
elif provider == "open_router":
|
||||
tools_param = tools if tools else openai.NOT_GIVEN
|
||||
client = openai.OpenAI(
|
||||
client = openai.AsyncOpenAI(
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
response = await client.chat.completions.create(
|
||||
extra_headers={
|
||||
"HTTP-Referer": "https://agpt.co",
|
||||
"X-Title": "AutoGPT",
|
||||
@@ -518,6 +566,79 @@ def llm_call(
|
||||
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
||||
completion_tokens=response.usage.completion_tokens if response.usage else 0,
|
||||
)
|
||||
elif provider == "llama_api":
|
||||
tools_param = tools if tools else openai.NOT_GIVEN
|
||||
client = openai.AsyncOpenAI(
|
||||
base_url="https://api.llama.com/compat/v1/",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
)
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
extra_headers={
|
||||
"HTTP-Referer": "https://agpt.co",
|
||||
"X-Title": "AutoGPT",
|
||||
},
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
tools=tools_param, # type: ignore
|
||||
parallel_tool_calls=(
|
||||
openai.NOT_GIVEN if parallel_tool_calls is None else parallel_tool_calls
|
||||
),
|
||||
)
|
||||
|
||||
# If there's no response, raise an error
|
||||
if not response.choices:
|
||||
if response:
|
||||
raise ValueError(f"Llama API error: {response}")
|
||||
else:
|
||||
raise ValueError("No response from Llama API.")
|
||||
|
||||
if response.choices[0].message.tool_calls:
|
||||
tool_calls = [
|
||||
ToolContentBlock(
|
||||
id=tool.id,
|
||||
type=tool.type,
|
||||
function=ToolCall(
|
||||
name=tool.function.name, arguments=tool.function.arguments
|
||||
),
|
||||
)
|
||||
for tool in response.choices[0].message.tool_calls
|
||||
]
|
||||
else:
|
||||
tool_calls = None
|
||||
|
||||
return LLMResponse(
|
||||
raw_response=response.choices[0].message,
|
||||
prompt=prompt,
|
||||
response=response.choices[0].message.content or "",
|
||||
tool_calls=tool_calls,
|
||||
prompt_tokens=response.usage.prompt_tokens if response.usage else 0,
|
||||
completion_tokens=response.usage.completion_tokens if response.usage else 0,
|
||||
)
|
||||
elif provider == "aiml_api":
|
||||
client = openai.OpenAI(
|
||||
base_url="https://api.aimlapi.com/v2",
|
||||
api_key=credentials.api_key.get_secret_value(),
|
||||
default_headers={"X-Project": "AutoGPT"},
|
||||
)
|
||||
|
||||
completion = client.chat.completions.create(
|
||||
model=llm_model.value,
|
||||
messages=prompt, # type: ignore
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
|
||||
return LLMResponse(
|
||||
raw_response=completion.choices[0].message,
|
||||
prompt=prompt,
|
||||
response=completion.choices[0].message.content or "",
|
||||
tool_calls=None,
|
||||
prompt_tokens=completion.usage.prompt_tokens if completion.usage else 0,
|
||||
completion_tokens=(
|
||||
completion.usage.completion_tokens if completion.usage else 0
|
||||
),
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
|
||||
@@ -625,7 +746,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def llm_call(
|
||||
async def llm_call(
|
||||
self,
|
||||
credentials: APIKeyCredentials,
|
||||
llm_model: LlmModel,
|
||||
@@ -640,7 +761,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
so that it can be mocked withing the block testing framework.
|
||||
"""
|
||||
self.prompt = prompt
|
||||
return llm_call(
|
||||
return await llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=llm_model,
|
||||
prompt=prompt,
|
||||
@@ -650,7 +771,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
ollama_host=ollama_host,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
logger.debug(f"Calling LLM with input data: {input_data}")
|
||||
@@ -704,7 +825,7 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
|
||||
for retry_count in range(input_data.retry):
|
||||
try:
|
||||
llm_response = self.llm_call(
|
||||
llm_response = await self.llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=llm_model,
|
||||
prompt=prompt,
|
||||
@@ -757,6 +878,16 @@ class AIStructuredResponseGeneratorBlock(AIBlockBase):
|
||||
prompt.append({"role": "user", "content": retry_prompt})
|
||||
except Exception as e:
|
||||
logger.exception(f"Error calling LLM: {e}")
|
||||
if (
|
||||
"maximum context length" in str(e).lower()
|
||||
or "token limit" in str(e).lower()
|
||||
):
|
||||
if input_data.max_tokens is None:
|
||||
input_data.max_tokens = llm_model.max_output_tokens or 4096
|
||||
input_data.max_tokens = int(input_data.max_tokens * 0.85)
|
||||
logger.debug(
|
||||
f"Reducing max_tokens to {input_data.max_tokens} for next attempt"
|
||||
)
|
||||
retry_prompt = f"Error calling LLM: {e}"
|
||||
finally:
|
||||
self.merge_stats(
|
||||
@@ -834,17 +965,17 @@ class AITextGeneratorBlock(AIBlockBase):
|
||||
test_mock={"llm_call": lambda *args, **kwargs: "Response text"},
|
||||
)
|
||||
|
||||
def llm_call(
|
||||
async def llm_call(
|
||||
self,
|
||||
input_data: AIStructuredResponseGeneratorBlock.Input,
|
||||
credentials: APIKeyCredentials,
|
||||
) -> str:
|
||||
) -> dict:
|
||||
block = AIStructuredResponseGeneratorBlock()
|
||||
response = block.run_once(input_data, "response", credentials=credentials)
|
||||
response = await block.run_once(input_data, "response", credentials=credentials)
|
||||
self.merge_llm_stats(block)
|
||||
return response["response"]
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
object_input_data = AIStructuredResponseGeneratorBlock.Input(
|
||||
@@ -854,7 +985,8 @@ class AITextGeneratorBlock(AIBlockBase):
|
||||
},
|
||||
expected_format={},
|
||||
)
|
||||
yield "response", self.llm_call(object_input_data, credentials)
|
||||
response = await self.llm_call(object_input_data, credentials)
|
||||
yield "response", response
|
||||
yield "prompt", self.prompt
|
||||
|
||||
|
||||
@@ -936,23 +1068,27 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
for output in self._run(input_data, credentials):
|
||||
yield output
|
||||
async for output_name, output_data in self._run(input_data, credentials):
|
||||
yield output_name, output_data
|
||||
|
||||
def _run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput:
|
||||
async def _run(
|
||||
self, input_data: Input, credentials: APIKeyCredentials
|
||||
) -> BlockOutput:
|
||||
chunks = self._split_text(
|
||||
input_data.text, input_data.max_tokens, input_data.chunk_overlap
|
||||
)
|
||||
summaries = []
|
||||
|
||||
for chunk in chunks:
|
||||
chunk_summary = self._summarize_chunk(chunk, input_data, credentials)
|
||||
chunk_summary = await self._summarize_chunk(chunk, input_data, credentials)
|
||||
summaries.append(chunk_summary)
|
||||
|
||||
final_summary = self._combine_summaries(summaries, input_data, credentials)
|
||||
final_summary = await self._combine_summaries(
|
||||
summaries, input_data, credentials
|
||||
)
|
||||
yield "summary", final_summary
|
||||
yield "prompt", self.prompt
|
||||
|
||||
@@ -968,22 +1104,22 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
|
||||
return chunks
|
||||
|
||||
def llm_call(
|
||||
async def llm_call(
|
||||
self,
|
||||
input_data: AIStructuredResponseGeneratorBlock.Input,
|
||||
credentials: APIKeyCredentials,
|
||||
) -> dict:
|
||||
block = AIStructuredResponseGeneratorBlock()
|
||||
response = block.run_once(input_data, "response", credentials=credentials)
|
||||
response = await block.run_once(input_data, "response", credentials=credentials)
|
||||
self.merge_llm_stats(block)
|
||||
return response
|
||||
|
||||
def _summarize_chunk(
|
||||
async def _summarize_chunk(
|
||||
self, chunk: str, input_data: Input, credentials: APIKeyCredentials
|
||||
) -> str:
|
||||
prompt = f"Summarize the following text in a {input_data.style} form. Focus your summary on the topic of `{input_data.focus}` if present, otherwise just provide a general summary:\n\n```{chunk}```"
|
||||
|
||||
llm_response = self.llm_call(
|
||||
llm_response = await self.llm_call(
|
||||
AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt=prompt,
|
||||
credentials=input_data.credentials,
|
||||
@@ -995,7 +1131,7 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
|
||||
return llm_response["summary"]
|
||||
|
||||
def _combine_summaries(
|
||||
async def _combine_summaries(
|
||||
self, summaries: list[str], input_data: Input, credentials: APIKeyCredentials
|
||||
) -> str:
|
||||
combined_text = "\n\n".join(summaries)
|
||||
@@ -1003,7 +1139,7 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
if len(combined_text.split()) <= input_data.max_tokens:
|
||||
prompt = f"Provide a final summary of the following section summaries in a {input_data.style} form, focus your summary on the topic of `{input_data.focus}` if present:\n\n ```{combined_text}```\n\n Just respond with the final_summary in the format specified."
|
||||
|
||||
llm_response = self.llm_call(
|
||||
llm_response = await self.llm_call(
|
||||
AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt=prompt,
|
||||
credentials=input_data.credentials,
|
||||
@@ -1018,7 +1154,8 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
return llm_response["final_summary"]
|
||||
else:
|
||||
# If combined summaries are still too long, recursively summarize
|
||||
return self._run(
|
||||
block = AITextSummarizerBlock()
|
||||
return await block.run_once(
|
||||
AITextSummarizerBlock.Input(
|
||||
text=combined_text,
|
||||
credentials=input_data.credentials,
|
||||
@@ -1026,10 +1163,9 @@ class AITextSummarizerBlock(AIBlockBase):
|
||||
max_tokens=input_data.max_tokens,
|
||||
chunk_overlap=input_data.chunk_overlap,
|
||||
),
|
||||
"summary",
|
||||
credentials=credentials,
|
||||
).send(None)[
|
||||
1
|
||||
] # Get the first yielded value
|
||||
)
|
||||
|
||||
|
||||
class AIConversationBlock(AIBlockBase):
|
||||
@@ -1100,20 +1236,20 @@ class AIConversationBlock(AIBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def llm_call(
|
||||
async def llm_call(
|
||||
self,
|
||||
input_data: AIStructuredResponseGeneratorBlock.Input,
|
||||
credentials: APIKeyCredentials,
|
||||
) -> str:
|
||||
) -> dict:
|
||||
block = AIStructuredResponseGeneratorBlock()
|
||||
response = block.run_once(input_data, "response", credentials=credentials)
|
||||
response = await block.run_once(input_data, "response", credentials=credentials)
|
||||
self.merge_llm_stats(block)
|
||||
return response["response"]
|
||||
return response
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
response = self.llm_call(
|
||||
response = await self.llm_call(
|
||||
AIStructuredResponseGeneratorBlock.Input(
|
||||
prompt=input_data.prompt,
|
||||
credentials=input_data.credentials,
|
||||
@@ -1125,7 +1261,6 @@ class AIConversationBlock(AIBlockBase):
|
||||
),
|
||||
credentials=credentials,
|
||||
)
|
||||
|
||||
yield "response", response
|
||||
yield "prompt", self.prompt
|
||||
|
||||
@@ -1219,13 +1354,15 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def llm_call(
|
||||
async def llm_call(
|
||||
self,
|
||||
input_data: AIStructuredResponseGeneratorBlock.Input,
|
||||
credentials: APIKeyCredentials,
|
||||
) -> dict[str, str]:
|
||||
llm_block = AIStructuredResponseGeneratorBlock()
|
||||
response = llm_block.run_once(input_data, "response", credentials=credentials)
|
||||
response = await llm_block.run_once(
|
||||
input_data, "response", credentials=credentials
|
||||
)
|
||||
self.merge_llm_stats(llm_block)
|
||||
return response
|
||||
|
||||
@@ -1248,7 +1385,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
logger.error(f"Failed to convert string to list: {e}")
|
||||
raise ValueError("Invalid list format. Could not convert to list.")
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
logger.debug(f"Starting AIListGeneratorBlock.run with input data: {input_data}")
|
||||
@@ -1314,7 +1451,7 @@ class AIListGeneratorBlock(AIBlockBase):
|
||||
for attempt in range(input_data.max_retries):
|
||||
try:
|
||||
logger.debug("Calling LLM")
|
||||
llm_response = self.llm_call(
|
||||
llm_response = await self.llm_call(
|
||||
AIStructuredResponseGeneratorBlock.Input(
|
||||
sys_prompt=sys_prompt,
|
||||
prompt=prompt,
|
||||
|
||||
@@ -52,7 +52,7 @@ class CalculatorBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
operation = input_data.operation
|
||||
a = input_data.a
|
||||
b = input_data.b
|
||||
@@ -107,7 +107,7 @@ class CountItemsBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
collection = input_data.collection
|
||||
|
||||
try:
|
||||
|
||||
@@ -39,7 +39,7 @@ class MediaDurationBlock(Block):
|
||||
output_schema=MediaDurationBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -47,7 +47,7 @@ class MediaDurationBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input media locally
|
||||
local_media_path = store_media_file(
|
||||
local_media_path = await store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.media_in,
|
||||
return_content=False,
|
||||
@@ -105,7 +105,7 @@ class LoopVideoBlock(Block):
|
||||
output_schema=LoopVideoBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -114,7 +114,7 @@ class LoopVideoBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the input video locally
|
||||
local_video_path = store_media_file(
|
||||
local_video_path = await store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.video_in,
|
||||
return_content=False,
|
||||
@@ -146,7 +146,7 @@ class LoopVideoBlock(Block):
|
||||
looped_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# Return as data URI
|
||||
video_out = store_media_file(
|
||||
video_out = await store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=output_filename,
|
||||
return_content=input_data.output_return_type == "data_uri",
|
||||
@@ -194,7 +194,7 @@ class AddAudioToVideoBlock(Block):
|
||||
output_schema=AddAudioToVideoBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -203,12 +203,12 @@ class AddAudioToVideoBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
# 1) Store the inputs locally
|
||||
local_video_path = store_media_file(
|
||||
local_video_path = await store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.video_in,
|
||||
return_content=False,
|
||||
)
|
||||
local_audio_path = store_media_file(
|
||||
local_audio_path = await store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=input_data.audio_in,
|
||||
return_content=False,
|
||||
@@ -236,7 +236,7 @@ class AddAudioToVideoBlock(Block):
|
||||
final_clip.write_videofile(output_abspath, codec="libx264", audio_codec="aac")
|
||||
|
||||
# 5) Return either path or data URI
|
||||
video_out = store_media_file(
|
||||
video_out = await store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=output_filename,
|
||||
return_content=input_data.output_return_type == "data_uri",
|
||||
|
||||
@@ -13,7 +13,7 @@ from backend.data.model import (
|
||||
SecretField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -130,7 +130,7 @@ class PublishToMediumBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def create_post(
|
||||
async def create_post(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
author_id,
|
||||
@@ -160,18 +160,17 @@ class PublishToMediumBlock(Block):
|
||||
"notifyFollowers": notify_followers,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
response = await Requests().post(
|
||||
f"https://api.medium.com/v1/users/{author_id}/posts",
|
||||
headers=headers,
|
||||
json=data,
|
||||
)
|
||||
|
||||
return response.json()
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
response = self.create_post(
|
||||
response = await self.create_post(
|
||||
credentials.api_key,
|
||||
input_data.author_id.get_secret_value(),
|
||||
input_data.title,
|
||||
|
||||
@@ -109,7 +109,7 @@ class AddMemoryBlock(Block, Mem0Base):
|
||||
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -124,8 +124,10 @@ class AddMemoryBlock(Block, Mem0Base):
|
||||
|
||||
if isinstance(input_data.content, Conversation):
|
||||
messages = input_data.content.messages
|
||||
elif isinstance(input_data.content, Content):
|
||||
messages = [{"role": "user", "content": input_data.content.content}]
|
||||
else:
|
||||
messages = [{"role": "user", "content": input_data.content}]
|
||||
messages = [{"role": "user", "content": str(input_data.content)}]
|
||||
|
||||
params = {
|
||||
"user_id": user_id,
|
||||
@@ -152,7 +154,7 @@ class AddMemoryBlock(Block, Mem0Base):
|
||||
yield "action", "NO_CHANGE"
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(object=e)
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class SearchMemoryBlock(Block, Mem0Base):
|
||||
@@ -206,7 +208,7 @@ class SearchMemoryBlock(Block, Mem0Base):
|
||||
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -286,7 +288,7 @@ class GetAllMemoriesBlock(Block, Mem0Base):
|
||||
test_mock={"_get_client": lambda credentials: MockMemoryClient()},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
|
||||
@@ -5,7 +5,7 @@ from backend.blocks.nvidia._auth import (
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
from backend.util.type import MediaFileType
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ class NvidiaDeepfakeDetectBlock(Block):
|
||||
output_schema=NvidiaDeepfakeDetectBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: NvidiaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://ai.api.nvidia.com/v1/cv/hive/deepfake-image-detection"
|
||||
@@ -59,8 +59,7 @@ class NvidiaDeepfakeDetectBlock(Block):
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
response = await Requests().post(url, headers=headers, json=payload)
|
||||
data = response.json()
|
||||
|
||||
result = data.get("data", [{}])[0]
|
||||
|
||||
@@ -56,7 +56,7 @@ class PineconeInitBlock(Block):
|
||||
output_schema=PineconeInitBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
pc = Pinecone(api_key=credentials.api_key.get_secret_value())
|
||||
@@ -117,7 +117,7 @@ class PineconeQueryBlock(Block):
|
||||
output_schema=PineconeQueryBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -195,7 +195,7 @@ class PineconeInsertBlock(Block):
|
||||
output_schema=PineconeInsertBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
|
||||
@@ -146,7 +146,7 @@ class GetRedditPostsBlock(Block):
|
||||
subreddit = client.subreddit(input_data.subreddit)
|
||||
return subreddit.new(limit=input_data.post_limit or 10)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: RedditCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
current_time = datetime.now(tz=timezone.utc)
|
||||
@@ -207,7 +207,7 @@ class PostRedditCommentBlock(Block):
|
||||
raise ValueError("Failed to post comment.")
|
||||
return new_comment.id
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: RedditCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
yield "comment_id", self.reply_post(credentials, input_data.data)
|
||||
|
||||
@@ -2,8 +2,8 @@ import os
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from pydantic import SecretStr
|
||||
from replicate.client import Client as ReplicateClient
|
||||
from replicate.helpers import FileOutput
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
@@ -159,7 +159,7 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# If the seed is not provided, generate a random seed
|
||||
@@ -168,7 +168,7 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
seed = int.from_bytes(os.urandom(4), "big")
|
||||
|
||||
# Run the model using the provided inputs
|
||||
result = self.run_model(
|
||||
result = await self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
model_name=input_data.replicate_model_name.api_name,
|
||||
prompt=input_data.prompt,
|
||||
@@ -183,7 +183,7 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
)
|
||||
yield "result", result
|
||||
|
||||
def run_model(
|
||||
async def run_model(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
model_name,
|
||||
@@ -198,10 +198,10 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
safety_tolerance,
|
||||
):
|
||||
# Initialize Replicate client with the API key
|
||||
client = replicate.Client(api_token=api_key.get_secret_value())
|
||||
client = ReplicateClient(api_token=api_key.get_secret_value())
|
||||
|
||||
# Run the model with additional parameters
|
||||
output: FileOutput | list[FileOutput] = client.run( # type: ignore This is because they changed the return type, and didn't update the type hint! It should be overloaded depending on the value of `use_file_output` to `FileOutput | list[FileOutput]` but it's `Any | Iterator[Any]`
|
||||
output: FileOutput | list[FileOutput] = await client.async_run( # type: ignore This is because they changed the return type, and didn't update the type hint! It should be overloaded depending on the value of `use_file_output` to `FileOutput | list[FileOutput]` but it's `Any | Iterator[Any]`
|
||||
f"{model_name}",
|
||||
input={
|
||||
"prompt": prompt,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import time
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any
|
||||
|
||||
@@ -87,7 +87,7 @@ class ReadRSSFeedBlock(Block):
|
||||
def parse_feed(url: str) -> dict[str, Any]:
|
||||
return feedparser.parse(url) # type: ignore
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
keep_going = True
|
||||
start_time = datetime.now(timezone.utc) - timedelta(
|
||||
minutes=input_data.time_period
|
||||
@@ -113,4 +113,4 @@ class ReadRSSFeedBlock(Block):
|
||||
),
|
||||
)
|
||||
|
||||
time.sleep(input_data.polling_rate)
|
||||
await asyncio.sleep(input_data.polling_rate)
|
||||
|
||||
@@ -93,7 +93,7 @@ class DataSamplingBlock(Block):
|
||||
)
|
||||
self.accumulated_data = []
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
if input_data.accumulate:
|
||||
if isinstance(input_data.data, dict):
|
||||
self.accumulated_data.append(input_data.data)
|
||||
|
||||
@@ -105,7 +105,7 @@ class ScreenshotWebPageBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def take_screenshot(
|
||||
async def take_screenshot(
|
||||
credentials: APIKeyCredentials,
|
||||
graph_exec_id: str,
|
||||
url: str,
|
||||
@@ -121,11 +121,10 @@ class ScreenshotWebPageBlock(Block):
|
||||
"""
|
||||
Takes a screenshot using the ScreenshotOne API
|
||||
"""
|
||||
api = Requests(trusted_origins=["https://api.screenshotone.com"])
|
||||
api = Requests()
|
||||
|
||||
# Build API URL with parameters
|
||||
# Build API parameters
|
||||
params = {
|
||||
"access_key": credentials.api_key.get_secret_value(),
|
||||
"url": url,
|
||||
"viewport_width": viewport_width,
|
||||
"viewport_height": viewport_height,
|
||||
@@ -137,19 +136,28 @@ class ScreenshotWebPageBlock(Block):
|
||||
"cache": str(cache).lower(),
|
||||
}
|
||||
|
||||
response = api.get("https://api.screenshotone.com/take", params=params)
|
||||
# Make the API request
|
||||
# Use header-based authentication instead of query parameter
|
||||
headers = {
|
||||
"X-Access-Key": credentials.api_key.get_secret_value(),
|
||||
}
|
||||
|
||||
response = await api.get(
|
||||
"https://api.screenshotone.com/take", params=params, headers=headers
|
||||
)
|
||||
content = response.content
|
||||
|
||||
return {
|
||||
"image": store_media_file(
|
||||
"image": await store_media_file(
|
||||
graph_exec_id=graph_exec_id,
|
||||
file=MediaFileType(
|
||||
f"data:image/{format.value};base64,{b64encode(response.content).decode('utf-8')}"
|
||||
f"data:image/{format.value};base64,{b64encode(content).decode('utf-8')}"
|
||||
),
|
||||
return_content=True,
|
||||
)
|
||||
}
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -158,7 +166,7 @@ class ScreenshotWebPageBlock(Block):
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
screenshot_data = self.take_screenshot(
|
||||
screenshot_data = await self.take_screenshot(
|
||||
credentials=credentials,
|
||||
graph_exec_id=graph_exec_id,
|
||||
url=input_data.url,
|
||||
|
||||
@@ -36,10 +36,10 @@ class GetWikipediaSummaryBlock(Block, GetRequest):
|
||||
test_mock={"get_request": lambda url, json: {"extract": "summary content"}},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
topic = input_data.topic
|
||||
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
|
||||
response = self.get_request(url, json=True)
|
||||
response = await self.get_request(url, json=True)
|
||||
if "extract" not in response:
|
||||
raise RuntimeError(f"Unable to parse Wikipedia response: {response}")
|
||||
yield "summary", response["extract"]
|
||||
@@ -113,14 +113,14 @@ class GetWeatherInformationBlock(Block, GetRequest):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
units = "metric" if input_data.use_celsius else "imperial"
|
||||
api_key = credentials.api_key
|
||||
location = input_data.location
|
||||
url = f"http://api.openweathermap.org/data/2.5/weather?q={quote(location)}&appid={api_key}&units={units}"
|
||||
weather_data = self.get_request(url, json=True)
|
||||
weather_data = await self.get_request(url, json=True)
|
||||
|
||||
if "main" in weather_data and "weather" in weather_data:
|
||||
yield "temperature", str(weather_data["main"]["temp"])
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
from backend.data.block import Block
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
from ._api import Color, CustomerDetails, OrderItem, Profile
|
||||
|
||||
@@ -14,20 +14,25 @@ class Slant3DBlockBase(Block):
|
||||
def _get_headers(self, api_key: str) -> Dict[str, str]:
|
||||
return {"api-key": api_key, "Content-Type": "application/json"}
|
||||
|
||||
def _make_request(self, method: str, endpoint: str, api_key: str, **kwargs) -> Dict:
|
||||
async def _make_request(
|
||||
self, method: str, endpoint: str, api_key: str, **kwargs
|
||||
) -> Dict:
|
||||
url = f"{self.BASE_URL}/{endpoint}"
|
||||
response = requests.request(
|
||||
response = await Requests().request(
|
||||
method=method, url=url, headers=self._get_headers(api_key), **kwargs
|
||||
)
|
||||
resp = response.json()
|
||||
|
||||
if not response.ok:
|
||||
error_msg = response.json().get("error", "Unknown error")
|
||||
error_msg = resp.get("error", "Unknown error")
|
||||
raise RuntimeError(f"API request failed: {error_msg}")
|
||||
|
||||
return response.json()
|
||||
return resp
|
||||
|
||||
def _check_valid_color(self, profile: Profile, color: Color, api_key: str) -> str:
|
||||
response = self._make_request(
|
||||
async def _check_valid_color(
|
||||
self, profile: Profile, color: Color, api_key: str
|
||||
) -> str:
|
||||
response = await self._make_request(
|
||||
"GET",
|
||||
"filament",
|
||||
api_key,
|
||||
@@ -48,10 +53,12 @@ Valid colors for {profile.value} are:
|
||||
)
|
||||
return color_tag
|
||||
|
||||
def _convert_to_color(self, profile: Profile, color: Color, api_key: str) -> str:
|
||||
return self._check_valid_color(profile, color, api_key)
|
||||
async def _convert_to_color(
|
||||
self, profile: Profile, color: Color, api_key: str
|
||||
) -> str:
|
||||
return await self._check_valid_color(profile, color, api_key)
|
||||
|
||||
def _format_order_data(
|
||||
async def _format_order_data(
|
||||
self,
|
||||
customer: CustomerDetails,
|
||||
order_number: str,
|
||||
@@ -61,6 +68,7 @@ Valid colors for {profile.value} are:
|
||||
"""Helper function to format order data for API requests"""
|
||||
orders = []
|
||||
for item in items:
|
||||
color_tag = await self._convert_to_color(item.profile, item.color, api_key)
|
||||
order_data = {
|
||||
"email": customer.email,
|
||||
"phone": customer.phone,
|
||||
@@ -85,9 +93,7 @@ Valid colors for {profile.value} are:
|
||||
"order_quantity": item.quantity,
|
||||
"order_image_url": "",
|
||||
"order_sku": "NOT_USED",
|
||||
"order_item_color": self._convert_to_color(
|
||||
item.profile, item.color, api_key
|
||||
),
|
||||
"order_item_color": color_tag,
|
||||
"profile": item.profile.value,
|
||||
}
|
||||
orders.append(order_data)
|
||||
|
||||
@@ -72,11 +72,11 @@ class Slant3DFilamentBlock(Slant3DBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self._make_request(
|
||||
result = await self._make_request(
|
||||
"GET", "filament", credentials.api_key.get_secret_value()
|
||||
)
|
||||
yield "filaments", result["filaments"]
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import uuid
|
||||
from typing import List
|
||||
|
||||
import requests as baserequests
|
||||
|
||||
from backend.data.block import BlockOutput, BlockSchema
|
||||
from backend.data.model import APIKeyCredentials, SchemaField
|
||||
from backend.util import settings
|
||||
@@ -76,17 +74,17 @@ class Slant3DCreateOrderBlock(Slant3DBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
order_data = self._format_order_data(
|
||||
order_data = await self._format_order_data(
|
||||
input_data.customer,
|
||||
input_data.order_number,
|
||||
input_data.items,
|
||||
credentials.api_key.get_secret_value(),
|
||||
)
|
||||
result = self._make_request(
|
||||
result = await self._make_request(
|
||||
"POST", "order", credentials.api_key.get_secret_value(), json=order_data
|
||||
)
|
||||
yield "order_id", result["orderId"]
|
||||
@@ -162,28 +160,24 @@ class Slant3DEstimateOrderBlock(Slant3DBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
order_data = self._format_order_data(
|
||||
order_data = await self._format_order_data(
|
||||
input_data.customer,
|
||||
input_data.order_number,
|
||||
input_data.items,
|
||||
credentials.api_key.get_secret_value(),
|
||||
)
|
||||
try:
|
||||
result = self._make_request(
|
||||
"POST",
|
||||
"order/estimate",
|
||||
credentials.api_key.get_secret_value(),
|
||||
json=order_data,
|
||||
)
|
||||
yield "total_price", result["totalPrice"]
|
||||
yield "shipping_cost", result["shippingCost"]
|
||||
yield "printing_cost", result["printingCost"]
|
||||
except baserequests.HTTPError as e:
|
||||
yield "error", str(f"Error estimating order: {e} {e.response.text}")
|
||||
raise
|
||||
result = await self._make_request(
|
||||
"POST",
|
||||
"order/estimate",
|
||||
credentials.api_key.get_secret_value(),
|
||||
json=order_data,
|
||||
)
|
||||
yield "total_price", result["totalPrice"]
|
||||
yield "shipping_cost", result["shippingCost"]
|
||||
yield "printing_cost", result["printingCost"]
|
||||
|
||||
|
||||
class Slant3DEstimateShippingBlock(Slant3DBlockBase):
|
||||
@@ -246,17 +240,17 @@ class Slant3DEstimateShippingBlock(Slant3DBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
order_data = self._format_order_data(
|
||||
order_data = await self._format_order_data(
|
||||
input_data.customer,
|
||||
input_data.order_number,
|
||||
input_data.items,
|
||||
credentials.api_key.get_secret_value(),
|
||||
)
|
||||
result = self._make_request(
|
||||
result = await self._make_request(
|
||||
"POST",
|
||||
"order/estimateShipping",
|
||||
credentials.api_key.get_secret_value(),
|
||||
@@ -312,11 +306,11 @@ class Slant3DGetOrdersBlock(Slant3DBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self._make_request(
|
||||
result = await self._make_request(
|
||||
"GET", "order", credentials.api_key.get_secret_value()
|
||||
)
|
||||
yield "orders", [str(order["orderId"]) for order in result["ordersData"]]
|
||||
@@ -359,11 +353,11 @@ class Slant3DTrackingBlock(Slant3DBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self._make_request(
|
||||
result = await self._make_request(
|
||||
"GET",
|
||||
f"order/{input_data.order_id}/get-tracking",
|
||||
credentials.api_key.get_secret_value(),
|
||||
@@ -403,11 +397,11 @@ class Slant3DCancelOrderBlock(Slant3DBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self._make_request(
|
||||
result = await self._make_request(
|
||||
"DELETE",
|
||||
f"order/{input_data.order_id}",
|
||||
credentials.api_key.get_secret_value(),
|
||||
|
||||
@@ -44,11 +44,11 @@ class Slant3DSlicerBlock(Slant3DBlockBase):
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
result = self._make_request(
|
||||
result = await self._make_request(
|
||||
"POST",
|
||||
"slicer",
|
||||
credentials.api_key.get_secret_value(),
|
||||
|
||||
@@ -37,7 +37,7 @@ class Slant3DTriggerBase:
|
||||
description="Error message if payload processing failed"
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "payload", input_data.payload
|
||||
yield "order_id", input_data.payload["orderId"]
|
||||
|
||||
@@ -117,8 +117,9 @@ class Slant3DOrderWebhookBlock(Slant3DTriggerBase, Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
yield from super().run(input_data, **kwargs)
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
async for name, value in super().run(input_data, **kwargs):
|
||||
yield name, value
|
||||
|
||||
# Extract and normalize values from the payload
|
||||
yield "status", input_data.payload["status"]
|
||||
|
||||
@@ -26,10 +26,10 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
@thread_cached
|
||||
def get_database_manager_client():
|
||||
from backend.executor import DatabaseManager
|
||||
from backend.executor import DatabaseManagerClient
|
||||
from backend.util.service import get_service_client
|
||||
|
||||
return get_service_client(DatabaseManager)
|
||||
return get_service_client(DatabaseManagerClient)
|
||||
|
||||
|
||||
def _get_tool_requests(entry: dict[str, Any]) -> list[str]:
|
||||
@@ -246,6 +246,10 @@ class SmartDecisionMakerBlock(Block):
|
||||
test_credentials=llm.TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def cleanup(s: str):
|
||||
return re.sub(r"[^a-zA-Z0-9_-]", "_", s).lower()
|
||||
|
||||
@staticmethod
|
||||
def _create_block_function_signature(
|
||||
sink_node: "Node", links: list["Link"]
|
||||
@@ -266,7 +270,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
block = sink_node.block
|
||||
|
||||
tool_function: dict[str, Any] = {
|
||||
"name": re.sub(r"[^a-zA-Z0-9_-]", "_", block.name).lower(),
|
||||
"name": SmartDecisionMakerBlock.cleanup(block.name),
|
||||
"description": block.description,
|
||||
}
|
||||
|
||||
@@ -281,7 +285,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
and sink_block_input_schema.model_fields[link.sink_name].description
|
||||
else f"The {link.sink_name} of the tool"
|
||||
)
|
||||
properties[link.sink_name.lower()] = {
|
||||
properties[SmartDecisionMakerBlock.cleanup(link.sink_name)] = {
|
||||
"type": "string",
|
||||
"description": description,
|
||||
}
|
||||
@@ -326,7 +330,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
)
|
||||
|
||||
tool_function: dict[str, Any] = {
|
||||
"name": re.sub(r"[^a-zA-Z0-9_-]", "_", sink_graph_meta.name).lower(),
|
||||
"name": SmartDecisionMakerBlock.cleanup(sink_graph_meta.name),
|
||||
"description": sink_graph_meta.description,
|
||||
}
|
||||
|
||||
@@ -341,7 +345,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
in sink_block_input_schema["properties"][link.sink_name]
|
||||
else f"The {link.sink_name} of the tool"
|
||||
)
|
||||
properties[link.sink_name.lower()] = {
|
||||
properties[SmartDecisionMakerBlock.cleanup(link.sink_name)] = {
|
||||
"type": "string",
|
||||
"description": description,
|
||||
}
|
||||
@@ -413,7 +417,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
|
||||
return return_tool_functions
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
@@ -483,7 +487,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
):
|
||||
prompt.append({"role": "user", "content": prefix + input_data.prompt})
|
||||
|
||||
response = llm.llm_call(
|
||||
response = await llm.llm_call(
|
||||
credentials=credentials,
|
||||
llm_model=input_data.model,
|
||||
prompt=prompt,
|
||||
@@ -491,6 +495,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
max_tokens=input_data.max_tokens,
|
||||
tools=tool_functions,
|
||||
ollama_host=input_data.ollama_host,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
if not response.tool_calls:
|
||||
@@ -502,7 +507,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
tool_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
for arg_name, arg_value in tool_args.items():
|
||||
yield f"tools_^_{tool_name}_{arg_name}".lower(), arg_value
|
||||
yield f"tools_^_{tool_name}_~_{arg_name}", arg_value
|
||||
|
||||
response.prompt.append(response.raw_response)
|
||||
yield "conversations", response.prompt
|
||||
|
||||
@@ -27,9 +27,11 @@ class SmartLeadClient:
|
||||
def _handle_error(self, e: Exception) -> str:
|
||||
return e.__str__().replace(self.api_key, "API KEY")
|
||||
|
||||
def create_campaign(self, request: CreateCampaignRequest) -> CreateCampaignResponse:
|
||||
async def create_campaign(
|
||||
self, request: CreateCampaignRequest
|
||||
) -> CreateCampaignResponse:
|
||||
try:
|
||||
response = self.requests.post(
|
||||
response = await self.requests.post(
|
||||
self._add_auth_to_url(f"{self.API_URL}/campaigns/create"),
|
||||
json=request.model_dump(),
|
||||
)
|
||||
@@ -40,11 +42,11 @@ class SmartLeadClient:
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to create campaign: {self._handle_error(e)}")
|
||||
|
||||
def add_leads_to_campaign(
|
||||
async def add_leads_to_campaign(
|
||||
self, request: AddLeadsRequest
|
||||
) -> AddLeadsToCampaignResponse:
|
||||
try:
|
||||
response = self.requests.post(
|
||||
response = await self.requests.post(
|
||||
self._add_auth_to_url(
|
||||
f"{self.API_URL}/campaigns/{request.campaign_id}/leads"
|
||||
),
|
||||
@@ -64,7 +66,7 @@ class SmartLeadClient:
|
||||
f"Failed to add leads to campaign: {self._handle_error(e)}"
|
||||
)
|
||||
|
||||
def save_campaign_sequences(
|
||||
async def save_campaign_sequences(
|
||||
self, campaign_id: int, request: SaveSequencesRequest
|
||||
) -> SaveSequencesResponse:
|
||||
"""
|
||||
@@ -84,13 +86,13 @@ class SmartLeadClient:
|
||||
- MANUAL_PERCENTAGE: Requires variant_distribution_percentage in seq_variants
|
||||
"""
|
||||
try:
|
||||
response = self.requests.post(
|
||||
response = await self.requests.post(
|
||||
self._add_auth_to_url(
|
||||
f"{self.API_URL}/campaigns/{campaign_id}/sequences"
|
||||
),
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
return SaveSequencesResponse(**response.json())
|
||||
return SaveSequencesResponse(**(response.json()))
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Failed to save campaign sequences: {e.__str__().replace(self.api_key, 'API KEY')}"
|
||||
|
||||
@@ -80,20 +80,20 @@ class CreateCampaignBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_campaign(
|
||||
async def create_campaign(
|
||||
name: str, credentials: SmartLeadCredentials
|
||||
) -> CreateCampaignResponse:
|
||||
client = SmartLeadClient(credentials.api_key.get_secret_value())
|
||||
return client.create_campaign(CreateCampaignRequest(name=name))
|
||||
return await client.create_campaign(CreateCampaignRequest(name=name))
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: SmartLeadCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
response = self.create_campaign(input_data.name, credentials)
|
||||
response = await self.create_campaign(input_data.name, credentials)
|
||||
|
||||
yield "id", response.id
|
||||
yield "name", response.name
|
||||
@@ -193,11 +193,11 @@ class AddLeadToCampaignBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def add_leads_to_campaign(
|
||||
async def add_leads_to_campaign(
|
||||
campaign_id: int, lead_list: list[LeadInput], credentials: SmartLeadCredentials
|
||||
) -> AddLeadsToCampaignResponse:
|
||||
client = SmartLeadClient(credentials.api_key.get_secret_value())
|
||||
return client.add_leads_to_campaign(
|
||||
return await client.add_leads_to_campaign(
|
||||
AddLeadsRequest(
|
||||
campaign_id=campaign_id,
|
||||
lead_list=lead_list,
|
||||
@@ -210,14 +210,14 @@ class AddLeadToCampaignBlock(Block):
|
||||
),
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: SmartLeadCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
response = self.add_leads_to_campaign(
|
||||
response = await self.add_leads_to_campaign(
|
||||
input_data.campaign_id, input_data.lead_list, credentials
|
||||
)
|
||||
|
||||
@@ -297,22 +297,22 @@ class SaveCampaignSequencesBlock(Block):
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def save_campaign_sequences(
|
||||
async def save_campaign_sequences(
|
||||
campaign_id: int, sequences: list[Sequence], credentials: SmartLeadCredentials
|
||||
) -> SaveSequencesResponse:
|
||||
client = SmartLeadClient(credentials.api_key.get_secret_value())
|
||||
return client.save_campaign_sequences(
|
||||
return await client.save_campaign_sequences(
|
||||
campaign_id=campaign_id, request=SaveSequencesRequest(sequences=sequences)
|
||||
)
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: SmartLeadCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
response = self.save_campaign_sequences(
|
||||
response = await self.save_campaign_sequences(
|
||||
input_data.campaign_id, input_data.sequences, credentials
|
||||
)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import time
|
||||
import asyncio
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
@@ -11,7 +11,7 @@ from backend.data.model import (
|
||||
SchemaField,
|
||||
)
|
||||
from backend.integrations.providers import ProviderName
|
||||
from backend.util.request import requests
|
||||
from backend.util.request import Requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
@@ -113,26 +113,26 @@ class CreateTalkingAvatarVideoBlock(Block):
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def create_clip(self, api_key: SecretStr, payload: dict) -> dict:
|
||||
async def create_clip(self, api_key: SecretStr, payload: dict) -> dict:
|
||||
url = "https://api.d-id.com/clips"
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"content-type": "application/json",
|
||||
"authorization": f"Basic {api_key.get_secret_value()}",
|
||||
}
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
response = await Requests().post(url, json=payload, headers=headers)
|
||||
return response.json()
|
||||
|
||||
def get_clip_status(self, api_key: SecretStr, clip_id: str) -> dict:
|
||||
async def get_clip_status(self, api_key: SecretStr, clip_id: str) -> dict:
|
||||
url = f"https://api.d-id.com/clips/{clip_id}"
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"authorization": f"Basic {api_key.get_secret_value()}",
|
||||
}
|
||||
response = requests.get(url, headers=headers)
|
||||
response = await Requests().get(url, headers=headers)
|
||||
return response.json()
|
||||
|
||||
def run(
|
||||
async def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Create the clip
|
||||
@@ -153,12 +153,12 @@ class CreateTalkingAvatarVideoBlock(Block):
|
||||
"driver_id": input_data.driver_id,
|
||||
}
|
||||
|
||||
response = self.create_clip(credentials.api_key, payload)
|
||||
response = await self.create_clip(credentials.api_key, payload)
|
||||
clip_id = response["id"]
|
||||
|
||||
# Poll for clip status
|
||||
for _ in range(input_data.max_polling_attempts):
|
||||
status_response = self.get_clip_status(credentials.api_key, clip_id)
|
||||
status_response = await self.get_clip_status(credentials.api_key, clip_id)
|
||||
if status_response["status"] == "done":
|
||||
yield "video_url", status_response["result_url"]
|
||||
return
|
||||
@@ -167,6 +167,6 @@ class CreateTalkingAvatarVideoBlock(Block):
|
||||
f"Clip creation failed: {status_response.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
time.sleep(input_data.polling_interval)
|
||||
await asyncio.sleep(input_data.polling_interval)
|
||||
|
||||
raise TimeoutError("Clip creation timed out")
|
||||
|
||||
@@ -43,7 +43,7 @@ class MatchTextPatternBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
output = input_data.data or input_data.text
|
||||
flags = 0
|
||||
if not input_data.case_sensitive:
|
||||
@@ -133,7 +133,7 @@ class ExtractTextInformationBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
flags = 0
|
||||
if not input_data.case_sensitive:
|
||||
flags = flags | re.IGNORECASE
|
||||
@@ -201,7 +201,7 @@ class FillTextTemplateBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "output", formatter.format_string(input_data.format, input_data.values)
|
||||
|
||||
|
||||
@@ -232,7 +232,7 @@ class CombineTextsBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
combined_text = input_data.delimiter.join(input_data.input)
|
||||
yield "output", combined_text
|
||||
|
||||
@@ -267,7 +267,7 @@ class TextSplitBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
if len(input_data.text) == 0:
|
||||
yield "texts", []
|
||||
else:
|
||||
@@ -301,5 +301,5 @@ class TextReplaceBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "output", input_data.text.replace(input_data.old, input_data.new)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user