mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Compare commits
333 Commits
swiftyos/s
...
detached
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
47a996e75d | ||
|
|
2ff033aeb2 | ||
|
|
95b1beecaf | ||
|
|
cb5452f7ae | ||
|
|
02a60966d4 | ||
|
|
f28112f5e2 | ||
|
|
20f018b2e5 | ||
|
|
6f144ef77a | ||
|
|
bd1fa8f6c2 | ||
|
|
2121ffd06b | ||
|
|
0c2940353f | ||
|
|
d26105d382 | ||
|
|
7d48eebc78 | ||
|
|
c6b36fbad7 | ||
|
|
4aa5f53710 | ||
|
|
75f9b072a6 | ||
|
|
63af42dafb | ||
|
|
29f177e70d | ||
|
|
eeb5b4aa46 | ||
|
|
520b1d7940 | ||
|
|
f8b00e55d0 | ||
|
|
4b8087c067 | ||
|
|
d2f3f53f57 | ||
|
|
ab3643388f | ||
|
|
845c8c51e5 | ||
|
|
118fdeeb1d | ||
|
|
97d00455ef | ||
|
|
ae9bd87161 | ||
|
|
a556995d1f | ||
|
|
fd6c1d9f4f | ||
|
|
14cc21a843 | ||
|
|
772baff6db | ||
|
|
5dd151b41e | ||
|
|
86fbbae65c | ||
|
|
6bfe7ff497 | ||
|
|
effd1e35a3 | ||
|
|
4aae15d769 | ||
|
|
f62fa3e1e3 | ||
|
|
708ed9a91c | ||
|
|
951948d239 | ||
|
|
f1414550f9 | ||
|
|
c6e838da37 | ||
|
|
06b403f2b0 | ||
|
|
03f776681a | ||
|
|
3d21d54dab | ||
|
|
eef9bbe991 | ||
|
|
464b5309d7 | ||
|
|
f00654cb2c | ||
|
|
bc8ae1f542 | ||
|
|
f2816f98e9 | ||
|
|
5ee8b62d67 | ||
|
|
8b4bb27077 | ||
|
|
6954f4eb0e | ||
|
|
c14ab0c37a | ||
|
|
13da8af170 | ||
|
|
63e3244e7e | ||
|
|
19095be249 | ||
|
|
26a6bd4d10 | ||
|
|
92bfbfad57 | ||
|
|
cf43248ab8 | ||
|
|
aea6e7caed | ||
|
|
c84cc292f1 | ||
|
|
d84ddfcf1a | ||
|
|
5fa5b7104a | ||
|
|
33dd2eb919 | ||
|
|
a5734a57d5 | ||
|
|
274419d393 | ||
|
|
520d0ca0e4 | ||
|
|
84076ebee1 | ||
|
|
2e934dfff3 | ||
|
|
c1c3345bc0 | ||
|
|
fb9a543e35 | ||
|
|
e81083d9ab | ||
|
|
865e3c056d | ||
|
|
8fccf2eed3 | ||
|
|
1f34f78e4e | ||
|
|
29cff1bb4e | ||
|
|
402789d8cd | ||
|
|
6fa4b8cb11 | ||
|
|
f36d95aaa8 | ||
|
|
a660833744 | ||
|
|
e840106949 | ||
|
|
6c109adf0b | ||
|
|
bff0dc3d82 | ||
|
|
cd7dfbb8b3 | ||
|
|
a2895a2ca0 | ||
|
|
e30dac575d | ||
|
|
918538147c | ||
|
|
1e8a272ac6 | ||
|
|
1c6890486f | ||
|
|
29688758c4 | ||
|
|
2a66295a92 | ||
|
|
4db8e746d7 | ||
|
|
0551bec096 | ||
|
|
bd2f172e6d | ||
|
|
9a4ff9023d | ||
|
|
8987fdd48c | ||
|
|
6a1cea4c4e | ||
|
|
f27f596f58 | ||
|
|
ea214d9168 | ||
|
|
f9633ffb71 | ||
|
|
e140873dd4 | ||
|
|
dd0081ab35 | ||
|
|
bbbdb5665b | ||
|
|
e628a25533 | ||
|
|
52b3148196 | ||
|
|
05c76738a4 | ||
|
|
639242ac68 | ||
|
|
ce667f6287 | ||
|
|
98ab525e39 | ||
|
|
c707ee9cb6 | ||
|
|
b64c536eca | ||
|
|
5c0f979b9c | ||
|
|
b048385091 | ||
|
|
67244759c7 | ||
|
|
a3655b8a85 | ||
|
|
aafc101224 | ||
|
|
ef3f7aad18 | ||
|
|
aaa0b79f08 | ||
|
|
e907ffda6e | ||
|
|
ef7e50403e | ||
|
|
1e872406ca | ||
|
|
5ee909f687 | ||
|
|
ff1fa2af2d | ||
|
|
ee3252bdb1 | ||
|
|
4a8f3dbbb1 | ||
|
|
4a163e5b54 | ||
|
|
ca0b2311e8 | ||
|
|
d03fd930c6 | ||
|
|
603fec3467 | ||
|
|
c53c7f8dd8 | ||
|
|
ce3539ff16 | ||
|
|
ea8f164b93 | ||
|
|
3c0dea0017 | ||
|
|
9e4246602d | ||
|
|
1f0cbc6500 | ||
|
|
e6e47373ac | ||
|
|
f981a74a10 | ||
|
|
09dd391041 | ||
|
|
0b5b95eff5 | ||
|
|
4adbbc52f2 | ||
|
|
359ae8307a | ||
|
|
f719c7e70e | ||
|
|
c960bd870c | ||
|
|
dfb7cf19f7 | ||
|
|
d6ecf80197 | ||
|
|
c0f77c8e7a | ||
|
|
47759f6951 | ||
|
|
bcaf3241da | ||
|
|
c25d03e945 | ||
|
|
91edf08540 | ||
|
|
b08ad973fa | ||
|
|
a037c431cd | ||
|
|
86c544177e | ||
|
|
af9ea5bc31 | ||
|
|
25fa1bee1e | ||
|
|
c76c077522 | ||
|
|
4259ad686e | ||
|
|
9a2664be35 | ||
|
|
9070378e60 | ||
|
|
f17c20ed91 | ||
|
|
799c6e550a | ||
|
|
21100c109a | ||
|
|
b92c4774a6 | ||
|
|
3a127dc355 | ||
|
|
c19703150a | ||
|
|
3e7d0e7f1b | ||
|
|
e84910b423 | ||
|
|
44f73078f7 | ||
|
|
cd106611eb | ||
|
|
45fe26b58d | ||
|
|
ab0aaf55a9 | ||
|
|
57eb7db93c | ||
|
|
39f2745e89 | ||
|
|
f9d7ca70c6 | ||
|
|
d5ec9f898e | ||
|
|
11a4f9fdaa | ||
|
|
9b3bef7c3e | ||
|
|
6052a0aa07 | ||
|
|
111fb8e69f | ||
|
|
78f5cc4608 | ||
|
|
b223cc0233 | ||
|
|
db44d8c2ec | ||
|
|
594aa996d7 | ||
|
|
e26513f5e4 | ||
|
|
1a822683a9 | ||
|
|
7cf029da68 | ||
|
|
4d4cc60071 | ||
|
|
952f6f58ef | ||
|
|
8950021be5 | ||
|
|
151fad5ced | ||
|
|
27320d279e | ||
|
|
edaa938f99 | ||
|
|
74d8aa6d90 | ||
|
|
3ea50f3b70 | ||
|
|
3aebed61d2 | ||
|
|
26caf1c6af | ||
|
|
8b5175980f | ||
|
|
c2e036abd1 | ||
|
|
09bb57ed0d | ||
|
|
fc0c3e311f | ||
|
|
e2a848d5fb | ||
|
|
67ff738fda | ||
|
|
c995e89c26 | ||
|
|
22fff451da | ||
|
|
53dcec5dd7 | ||
|
|
ae1389760c | ||
|
|
030ea6d9a0 | ||
|
|
c7cd6dc026 | ||
|
|
b1a747a1d1 | ||
|
|
ff03619196 | ||
|
|
fa04f802b2 | ||
|
|
62aa002b37 | ||
|
|
6fef4fd66e | ||
|
|
c028dd647e | ||
|
|
308a9bc40e | ||
|
|
d8317c8257 | ||
|
|
7e63a8f94a | ||
|
|
456b1e55aa | ||
|
|
db0f1fa0ec | ||
|
|
757fb075b1 | ||
|
|
b9551db516 | ||
|
|
525b894618 | ||
|
|
b2972cc9c6 | ||
|
|
8938209d0d | ||
|
|
1e620fdb13 | ||
|
|
8a68516f0b | ||
|
|
8f5bd40791 | ||
|
|
19dc2093df | ||
|
|
ca43f71fa8 | ||
|
|
2fa4d8042c | ||
|
|
cc17147a98 | ||
|
|
75f3092ba2 | ||
|
|
4730bf6f95 | ||
|
|
88885f12fa | ||
|
|
e990a9d301 | ||
|
|
e2df6019fb | ||
|
|
6d812acb20 | ||
|
|
08a8abeb58 | ||
|
|
1321faf9db | ||
|
|
f3583e8ea6 | ||
|
|
f3a4fd2a41 | ||
|
|
643c33456e | ||
|
|
6846d8f49e | ||
|
|
8ded935e71 | ||
|
|
370e87d2e2 | ||
|
|
e908068f22 | ||
|
|
4e44a79785 | ||
|
|
9e411efd6e | ||
|
|
e4995096ef | ||
|
|
d9b8e0d273 | ||
|
|
404d0638de | ||
|
|
37607d104c | ||
|
|
2715b81ff5 | ||
|
|
27c9ec5bcd | ||
|
|
17e79ad88d | ||
|
|
7f318685af | ||
|
|
e5f5005ab8 | ||
|
|
f4dac22335 | ||
|
|
0c517216df | ||
|
|
83817cb641 | ||
|
|
e5ea62dc25 | ||
|
|
81febb6589 | ||
|
|
30a62f898c | ||
|
|
1622a4aaa8 | ||
|
|
5e386fda33 | ||
|
|
2437f58849 | ||
|
|
9f4be6f0c9 | ||
|
|
e4a9c8216f | ||
|
|
f19ed9f652 | ||
|
|
30376a8ec8 | ||
|
|
32680a549e | ||
|
|
68158de126 | ||
|
|
6f3828fc99 | ||
|
|
26b1bca033 | ||
|
|
7f6354caae | ||
|
|
5d4d2486da | ||
|
|
2c0286e411 | ||
|
|
fca8d61cc4 | ||
|
|
2b4af19799 | ||
|
|
615a9746dc | ||
|
|
ac33c1eb03 | ||
|
|
d6d2820b92 | ||
|
|
3982e20faa | ||
|
|
c029fde502 | ||
|
|
405dd1659e | ||
|
|
2d0e51fe28 | ||
|
|
6f07d24e93 | ||
|
|
9292597d56 | ||
|
|
f6eebcab6e | ||
|
|
9fe3fed1a2 | ||
|
|
769ab18cca | ||
|
|
d46219c80f | ||
|
|
97015a91ad | ||
|
|
a2ef456525 | ||
|
|
1c71351652 | ||
|
|
bd5d2b1e86 | ||
|
|
8502928a21 | ||
|
|
c1f97415fb | ||
|
|
74e677baec | ||
|
|
992989ee71 | ||
|
|
d8145c158c | ||
|
|
9ad5e1f808 | ||
|
|
7b92bae942 | ||
|
|
c03e2fb949 | ||
|
|
dbc603c6eb | ||
|
|
c582b5512a | ||
|
|
e654aa1e7a | ||
|
|
e37744b9f2 | ||
|
|
bc1df92c29 | ||
|
|
04473cad1e | ||
|
|
2a74381ae8 | ||
|
|
d42ed088dd | ||
|
|
2aed470d26 | ||
|
|
61f1d0cdb5 | ||
|
|
d742019349 | ||
|
|
fd9968683c | ||
|
|
daa054c79c | ||
|
|
fe98abf875 | ||
|
|
a6f763db24 | ||
|
|
a09fb731b5 | ||
|
|
07ae912cfe | ||
|
|
7501089bbf | ||
|
|
f811a6ffc0 | ||
|
|
5e2d29f27e | ||
|
|
72cbbbbbc9 | ||
|
|
1e3b1dad06 | ||
|
|
d3266d003d | ||
|
|
fa7b8c54ff | ||
|
|
ce124a74b5 | ||
|
|
1bc445007a | ||
|
|
6dbc0f7270 | ||
|
|
4989e3c282 |
43
.github/PULL_REQUEST_TEMPLATE.md
vendored
43
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,23 +1,38 @@
|
||||
### Background
|
||||
|
||||
<!-- Clearly explain the need for these changes: -->
|
||||
|
||||
### Changes 🏗️
|
||||
|
||||
<!-- Concisely describe all of the changes made in this pull request: -->
|
||||
|
||||
### Checklist 📋
|
||||
|
||||
### Testing 🔍
|
||||
> [!NOTE]
|
||||
Only for the new autogpt platform, currently in autogpt_platform/
|
||||
#### For code changes:
|
||||
- [ ] I have clearly listed my changes in the PR description
|
||||
- [ ] I have made a test plan
|
||||
- [ ] I have tested my changes according to the test plan:
|
||||
<!-- Put your test plan here: -->
|
||||
- [ ] ...
|
||||
|
||||
<!--
|
||||
Please make sure your changes have been tested and are in good working condition.
|
||||
Here is a list of our critical paths, if you need some inspiration on what and how to test:
|
||||
-->
|
||||
<details>
|
||||
<summary>Example test plan</summary>
|
||||
|
||||
- [ ] Create from scratch and execute an agent with at least 3 blocks
|
||||
- [ ] Import an agent from file upload, and confirm it executes correctly
|
||||
- [ ] Upload agent to marketplace
|
||||
- [ ] Import an agent from marketplace and confirm it executes correctly
|
||||
- [ ] Edit an agent from monitor, and confirm it executes correctly
|
||||
</details>
|
||||
|
||||
- Create from scratch and execute an agent with at least 3 blocks
|
||||
- Import an agent from file upload, and confirm it executes correctly
|
||||
- Upload agent to marketplace
|
||||
- Import an agent from marketplace and confirm it executes correctly
|
||||
- Edit an agent from monitor, and confirm it executes correctly
|
||||
#### For configuration changes:
|
||||
- [ ] `.env.example` is updated or already compatible with my changes
|
||||
- [ ] `docker-compose.yml` is updated or already compatible with my changes
|
||||
- [ ] I have included a list of my configuration changes in the PR description (under **Changes**)
|
||||
|
||||
<details>
|
||||
<summary>Examples of configuration changes</summary>
|
||||
|
||||
- Changing ports
|
||||
- Adding new services that need to communicate with each other
|
||||
- Secrets or environment variable changes
|
||||
- New or infrastructure changes such as databases
|
||||
</details>
|
||||
|
||||
179
.github/dependabot.yml
vendored
Normal file
179
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
version: 2
|
||||
updates:
|
||||
# autogpt_libs (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/autogpt_libs"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
# backend (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/backend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# frontend (Next.js project)
|
||||
- package-ecosystem: "npm"
|
||||
directory: "autogpt_platform/frontend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# infra (Terraform)
|
||||
- package-ecosystem: "terraform"
|
||||
directory: "autogpt_platform/infra"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# market (Poetry project)
|
||||
- package-ecosystem: "pip"
|
||||
directory: "autogpt_platform/market"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Docker
|
||||
- package-ecosystem: "docker"
|
||||
directory: "autogpt_platform/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 5
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Submodules
|
||||
- package-ecosystem: "gitsubmodule"
|
||||
directory: "autogpt_platform/supabase"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: 'pip'
|
||||
directory: "docs/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
4
.github/workflows/classic-autogpt-ci.yml
vendored
4
.github/workflows/classic-autogpt-ci.yml
vendored
@@ -2,12 +2,12 @@ name: Classic - AutoGPT CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
|
||||
@@ -5,7 +5,7 @@ on:
|
||||
- cron: 20 4 * * 1,4
|
||||
|
||||
env:
|
||||
BASE_BRANCH: development
|
||||
BASE_BRANCH: dev
|
||||
IMAGE_NAME: auto-gpt
|
||||
|
||||
jobs:
|
||||
@@ -15,46 +15,46 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
load: true # save to docker images
|
||||
# use GHA cache as read-only
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.schedule }}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
|
||||
push_forced_label:
|
||||
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ null }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
94
.github/workflows/classic-autogpt-docker-ci.yml
vendored
94
.github/workflows/classic-autogpt-docker-ci.yml
vendored
@@ -2,13 +2,13 @@ name: Classic - AutoGPT Docker CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development ]
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
- 'classic/forge/**'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpt-docker-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -34,58 +34,58 @@ jobs:
|
||||
matrix:
|
||||
build-type: [release, dev]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
- if: runner.debug
|
||||
run: |
|
||||
ls -al
|
||||
du -hs *
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=${{ matrix.build-type }}
|
||||
tags: ${{ env.IMAGE_NAME }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
|
||||
cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
|
||||
build_type: ${{ matrix.build-type }}
|
||||
build_type: ${{ matrix.build-type }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.event.after }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
|
||||
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
new_commits_json: ${{ toJSON(github.event.commits) }}
|
||||
compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -117,16 +117,16 @@ jobs:
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: classic/Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
build-args: BUILD_TYPE=dev # include pytest
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
load: true # save to docker images
|
||||
load: true # save to docker images
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: type=gha,scope=autogpt-docker-dev
|
||||
cache-to: type=gha,scope=autogpt-docker-dev,mode=max
|
||||
|
||||
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
112
.github/workflows/classic-autogpt-docker-release.yml
vendored
@@ -2,7 +2,7 @@ name: Classic - AutoGPT Docker Release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published, edited ]
|
||||
types: [published, edited]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -19,69 +19,69 @@ jobs:
|
||||
if: startsWith(github.ref, 'refs/tags/autogpt-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Log in to Docker hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
# slashes are not allowed in image tags, but can appear in git branch or tag names
|
||||
- id: sanitize_tag
|
||||
name: Sanitize image tag
|
||||
run: |
|
||||
tag=${raw_tag//\//-}
|
||||
echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
|
||||
env:
|
||||
raw_tag: ${{ github.ref_name }}
|
||||
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
- id: build
|
||||
name: Build image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: classic/
|
||||
file: Dockerfile.autogpt
|
||||
build-args: BUILD_TYPE=release
|
||||
load: true # save to docker images
|
||||
# push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
|
||||
tags: >
|
||||
${{ env.IMAGE_NAME }},
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:latest,
|
||||
${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
|
||||
labels: GIT_REVISION=${{ github.sha }}
|
||||
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
# cache layers in GitHub Actions cache to speed up builds
|
||||
cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
|
||||
cache-to: type=gha,scope=autogpt-docker-release,mode=max
|
||||
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
- name: Push image to Docker Hub
|
||||
run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
|
||||
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
- name: Generate build report
|
||||
env:
|
||||
event_name: ${{ github.event_name }}
|
||||
event_ref: ${{ github.event.ref }}
|
||||
event_ref_type: ${{ github.event.ref}}
|
||||
inputs_no_cache: ${{ inputs.no_cache }}
|
||||
|
||||
prod_branch: master
|
||||
dev_branch: development
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
|
||||
prod_branch: master
|
||||
dev_branch: dev
|
||||
repository: ${{ github.repository }}
|
||||
base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'dev' && 'dev' || 'master' }}
|
||||
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
ref_type: ${{ github.ref_type }}
|
||||
current_ref: ${{ github.ref_name }}
|
||||
commit_hash: ${{ github.sha }}
|
||||
source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
|
||||
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
github_context_json: ${{ toJSON(github) }}
|
||||
job_env_json: ${{ toJSON(env) }}
|
||||
vars_json: ${{ toJSON(vars) }}
|
||||
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
|
||||
continue-on-error: true
|
||||
|
||||
4
.github/workflows/classic-autogpts-ci.yml
vendored
4
.github/workflows/classic-autogpts-ci.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -16,7 +16,7 @@ on:
|
||||
- 'classic/setup.py'
|
||||
- '!**/*.md'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-autogpts-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
|
||||
28
.github/workflows/classic-benchmark-ci.yml
vendored
28
.github/workflows/classic-benchmark-ci.yml
vendored
@@ -2,13 +2,13 @@ name: Classic - AGBenchmark CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
- .github/workflows/classic-benchmark-ci.yml
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- 'classic/benchmark/**'
|
||||
- '!classic/benchmark/reports/**'
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
agent-name: [ forge ]
|
||||
agent-name: [forge]
|
||||
fail-fast: false
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
@@ -146,23 +146,23 @@ jobs:
|
||||
echo "Running the following command: poetry run agbenchmark --mock --category=coding"
|
||||
poetry run agbenchmark --mock --category=coding
|
||||
|
||||
echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
poetry run agbenchmark --test=WriteFile
|
||||
# echo "Running the following command: poetry run agbenchmark --test=WriteFile"
|
||||
# poetry run agbenchmark --test=WriteFile
|
||||
cd ../benchmark
|
||||
poetry install
|
||||
echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
|
||||
export BUILD_SKILL_TREE=true
|
||||
|
||||
poetry run agbenchmark --mock
|
||||
# poetry run agbenchmark --mock
|
||||
|
||||
CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
if [ ! -z "$CHANGED" ]; then
|
||||
echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
echo "$CHANGED"
|
||||
exit 1
|
||||
else
|
||||
echo "No unstaged changes."
|
||||
fi
|
||||
# CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../classic/frontend/assets)') || echo "No diffs"
|
||||
# if [ ! -z "$CHANGED" ]; then
|
||||
# echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
|
||||
# echo "$CHANGED"
|
||||
# exit 1
|
||||
# else
|
||||
# echo "No unstaged changes."
|
||||
# fi
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
|
||||
|
||||
4
.github/workflows/classic-forge-ci.yml
vendored
4
.github/workflows/classic-forge-ci.yml
vendored
@@ -2,13 +2,13 @@ name: Classic - Forge CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-forge-ci.yml'
|
||||
- 'classic/forge/**'
|
||||
|
||||
62
.github/workflows/classic-frontend-ci.yml
vendored
62
.github/workflows/classic-frontend-ci.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- development
|
||||
- dev
|
||||
- 'ci-test*' # This will match any branch that starts with "ci-test"
|
||||
paths:
|
||||
- 'classic/frontend/**'
|
||||
@@ -24,37 +24,37 @@ jobs:
|
||||
BUILD_BRANCH: ${{ format('classic-frontend-build/{0}', github.ref_name) }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
- name: Setup Flutter
|
||||
uses: subosito/flutter-action@v2
|
||||
with:
|
||||
flutter-version: '3.13.2'
|
||||
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
- name: Build Flutter to Web
|
||||
run: |
|
||||
cd classic/frontend
|
||||
flutter build web --base-href /app/
|
||||
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
# - name: Commit and Push to ${{ env.BUILD_BRANCH }}
|
||||
# if: github.event_name == 'push'
|
||||
# run: |
|
||||
# git config --local user.email "action@github.com"
|
||||
# git config --local user.name "GitHub Action"
|
||||
# git add classic/frontend/build/web
|
||||
# git checkout -B ${{ env.BUILD_BRANCH }}
|
||||
# git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
|
||||
# git push -f origin ${{ env.BUILD_BRANCH }}
|
||||
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
- name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
|
||||
if: github.event_name == 'push'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
add-paths: classic/frontend/build/web
|
||||
base: ${{ github.ref_name }}
|
||||
branch: ${{ env.BUILD_BRANCH }}
|
||||
delete-branch: true
|
||||
title: "Update frontend build in `${{ github.ref_name }}`"
|
||||
body: "This PR updates the frontend build based on commit ${{ github.sha }}."
|
||||
commit-message: "Update frontend build based on commit ${{ github.sha }}"
|
||||
|
||||
4
.github/workflows/classic-python-checks.yml
vendored
4
.github/workflows/classic-python-checks.yml
vendored
@@ -2,7 +2,7 @@ name: Classic - Python checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, development, ci-test* ]
|
||||
branches: [ master, dev, ci-test* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
@@ -11,7 +11,7 @@ on:
|
||||
- '**.py'
|
||||
- '!classic/forge/tests/vcr_cassettes'
|
||||
pull_request:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths:
|
||||
- '.github/workflows/classic-python-checks-ci.yml'
|
||||
- 'classic/original_autogpt/**'
|
||||
|
||||
5
.github/workflows/codeql.yml
vendored
5
.github/workflows/codeql.yml
vendored
@@ -13,9 +13,10 @@ name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master", "release-*" ]
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
pull_request:
|
||||
branches: [ "master", "release-*" ]
|
||||
branches: [ "master", "release-*", "dev" ]
|
||||
merge_group:
|
||||
schedule:
|
||||
- cron: '15 4 * * 0'
|
||||
|
||||
|
||||
55
.github/workflows/platform-autgpt-deploy-prod.yml
vendored
Normal file
55
.github/workflows/platform-autgpt-deploy-prod.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: AutoGPT Platform - Deploy Prod Environment
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: production
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
- name: Run Market Migrations
|
||||
working-directory: ./autogpt_platform/market
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_prod
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
57
.github/workflows/platform-autogpt-deploy-dev.yaml
vendored
Normal file
57
.github/workflows/platform-autogpt-deploy-dev.yaml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: AutoGPT Platform - Deploy Dev Environment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths:
|
||||
- 'autogpt_platform/**'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
id-token: 'write'
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
environment: develop
|
||||
name: Run migrations for AutoGPT Platform
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install prisma
|
||||
|
||||
- name: Run Backend Migrations
|
||||
working-directory: ./autogpt_platform/backend
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.BACKEND_DATABASE_URL }}
|
||||
|
||||
- name: Run Market Migrations
|
||||
working-directory: ./autogpt_platform/market
|
||||
run: |
|
||||
python -m prisma migrate deploy
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.MARKET_DATABASE_URL }}
|
||||
|
||||
trigger:
|
||||
needs: migrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deploy workflow
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.DEPLOY_TOKEN }}
|
||||
repository: Significant-Gravitas/AutoGPT_cloud_infrastructure
|
||||
event-type: build_deploy_dev
|
||||
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "repository": "${{ github.repository }}"}'
|
||||
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
56
.github/workflows/platform-autogpt-infra-ci.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: AutoGPT Platform - Infra
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths:
|
||||
- '.github/workflows/platform-autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/platform-autogpt-infra-ci.yml'
|
||||
- 'autogpt_platform/infra/**'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/infra
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: TFLint
|
||||
uses: pauloconnor/tflint-action@v0.0.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tflint_path: terraform/
|
||||
tflint_recurse: true
|
||||
tflint_changed_only: false
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.14.4
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct lint --target-branch ${{ github.event.repository.default_branch }}
|
||||
21
.github/workflows/platform-backend-ci.yml
vendored
21
.github/workflows/platform-backend-ci.yml
vendored
@@ -2,15 +2,16 @@ name: AutoGPT Platform - Backend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, development, ci-test*]
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
pull_request:
|
||||
branches: [master, development, release-*]
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-backend-ci.yml"
|
||||
- "autogpt_platform/backend/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
@@ -32,6 +33,14 @@ jobs:
|
||||
python-version: ["3.10"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: bitnami/redis:6.2
|
||||
env:
|
||||
REDIS_PASSWORD: testpassword
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -96,9 +105,9 @@ jobs:
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
poetry run pytest -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
else
|
||||
poetry run pytest -vv test
|
||||
poetry run pytest -s -vv test
|
||||
fi
|
||||
if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
env:
|
||||
@@ -107,6 +116,10 @@ jobs:
|
||||
SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
|
||||
SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
|
||||
REDIS_HOST: 'localhost'
|
||||
REDIS_PORT: '6379'
|
||||
REDIS_PASSWORD: 'testpassword'
|
||||
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
38
.github/workflows/platform-frontend-ci.yml
vendored
38
.github/workflows/platform-frontend-ci.yml
vendored
@@ -2,7 +2,7 @@ name: AutoGPT Platform - Frontend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
branches: [master, dev]
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
@@ -10,6 +10,7 @@ on:
|
||||
paths:
|
||||
- ".github/workflows/platform-frontend-ci.yml"
|
||||
- "autogpt_platform/frontend/**"
|
||||
merge_group:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -29,24 +30,37 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm install
|
||||
|
||||
- name: Check formatting with Prettier
|
||||
run: |
|
||||
npx prettier --check .
|
||||
yarn install --frozen-lockfile
|
||||
|
||||
- name: Run lint
|
||||
run: |
|
||||
npm run lint
|
||||
yarn lint
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed,
|
||||
# if set to "true" but frees about 6 GB
|
||||
tool-cache: false
|
||||
|
||||
# all of these default to true, but feel free to set to
|
||||
# "false" if necessary for your workflow
|
||||
android: false
|
||||
dotnet: false
|
||||
haskell: false
|
||||
large-packages: true
|
||||
docker-images: true
|
||||
swap-storage: true
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
@@ -56,24 +70,28 @@ jobs:
|
||||
run: |
|
||||
cp ../supabase/docker/.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
cp ../backend/.env.example ../backend/.env
|
||||
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
docker compose -f ../docker-compose.yml up -d
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm install
|
||||
yarn install --frozen-lockfile
|
||||
|
||||
- name: Setup Builder .env
|
||||
run: |
|
||||
cp .env.example .env
|
||||
|
||||
- name: Install Playwright Browsers
|
||||
run: npx playwright install --with-deps
|
||||
run: yarn playwright install --with-deps
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
npm run test
|
||||
yarn test
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: ${{ !cancelled() }}
|
||||
|
||||
126
.github/workflows/platform-market-ci.yml
vendored
Normal file
126
.github/workflows/platform-market-ci.yml
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
name: AutoGPT Platform - Backend CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master, dev, ci-test*]
|
||||
paths:
|
||||
- ".github/workflows/platform-market-ci.yml"
|
||||
- "autogpt_platform/market/**"
|
||||
pull_request:
|
||||
branches: [master, dev, release-*]
|
||||
paths:
|
||||
- ".github/workflows/platform-market-ci.yml"
|
||||
- "autogpt_platform/market/**"
|
||||
merge_group:
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('backend-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
working-directory: autogpt_platform/market
|
||||
|
||||
jobs:
|
||||
test:
|
||||
permissions:
|
||||
contents: read
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Setup Supabase
|
||||
uses: supabase/setup-cli@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pypoetry
|
||||
key: poetry-${{ runner.os }}-${{ hashFiles('autogpt_platform/market/poetry.lock') }}
|
||||
|
||||
- name: Install Poetry (Unix)
|
||||
run: |
|
||||
curl -sSL https://install.python-poetry.org | python3 -
|
||||
|
||||
if [ "${{ runner.os }}" = "macOS" ]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
fi
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install
|
||||
|
||||
- name: Generate Prisma Client
|
||||
run: poetry run prisma generate
|
||||
|
||||
- id: supabase
|
||||
name: Start Supabase
|
||||
working-directory: .
|
||||
run: |
|
||||
supabase init
|
||||
supabase start --exclude postgres-meta,realtime,storage-api,imgproxy,inbucket,studio,edge-runtime,logflare,vector,supavisor
|
||||
supabase status -o env | sed 's/="/=/; s/"$//' >> $GITHUB_OUTPUT
|
||||
# outputs:
|
||||
# DB_URL, API_URL, GRAPHQL_URL, ANON_KEY, SERVICE_ROLE_KEY, JWT_SECRET
|
||||
|
||||
- name: Run Database Migrations
|
||||
run: poetry run prisma migrate dev --name updates
|
||||
env:
|
||||
DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
|
||||
- id: lint
|
||||
name: Run Linter
|
||||
run: poetry run lint
|
||||
|
||||
# Tests comment out because they do not work with prisma mock, nor have they been updated since they were created
|
||||
# - name: Run pytest with coverage
|
||||
# run: |
|
||||
# if [[ "${{ runner.debug }}" == "1" ]]; then
|
||||
# poetry run pytest -s -vv -o log_cli=true -o log_cli_level=DEBUG test
|
||||
# else
|
||||
# poetry run pytest -s -vv test
|
||||
# fi
|
||||
# if: success() || (failure() && steps.lint.outcome == 'failure')
|
||||
# env:
|
||||
# LOG_LEVEL: ${{ runner.debug && 'DEBUG' || 'INFO' }}
|
||||
# DATABASE_URL: ${{ steps.supabase.outputs.DB_URL }}
|
||||
# SUPABASE_URL: ${{ steps.supabase.outputs.API_URL }}
|
||||
# SUPABASE_SERVICE_ROLE_KEY: ${{ steps.supabase.outputs.SERVICE_ROLE_KEY }}
|
||||
# SUPABASE_JWT_SECRET: ${{ steps.supabase.outputs.JWT_SECRET }}
|
||||
# REDIS_HOST: 'localhost'
|
||||
# REDIS_PORT: '6379'
|
||||
# REDIS_PASSWORD: 'testpassword'
|
||||
|
||||
env:
|
||||
CI: true
|
||||
PLAIN_OUTPUT: True
|
||||
RUN_ENV: local
|
||||
PORT: 8080
|
||||
|
||||
# - name: Upload coverage reports to Codecov
|
||||
# uses: codecov/codecov-action@v4
|
||||
# with:
|
||||
# token: ${{ secrets.CODECOV_TOKEN }}
|
||||
# flags: backend,${{ runner.os }}
|
||||
21
.github/workflows/repo-pr-enforce-base-branch.yml
vendored
Normal file
21
.github/workflows/repo-pr-enforce-base-branch.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Repo - Enforce dev as base branch
|
||||
on:
|
||||
pull_request_target:
|
||||
branches: [ master ]
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
check_pr_target:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Check if PR is from dev or hotfix
|
||||
if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }}
|
||||
run: |
|
||||
gh pr comment ${{ github.event.number }} --repo "$REPO" \
|
||||
--body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.'
|
||||
gh pr edit ${{ github.event.number }} --base dev --repo "$REPO"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
2
.github/workflows/repo-pr-label.yml
vendored
2
.github/workflows/repo-pr-label.yml
vendored
@@ -3,7 +3,7 @@ name: Repo - Pull Request auto-label
|
||||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master, development, release-* ]
|
||||
branches: [ master, dev, release-* ]
|
||||
paths-ignore:
|
||||
- 'classic/forge/tests/vcr_cassettes'
|
||||
- 'classic/benchmark/reports/**'
|
||||
|
||||
1
.github/workflows/repo-workflow-checker.yml
vendored
1
.github/workflows/repo-workflow-checker.yml
vendored
@@ -2,6 +2,7 @@ name: Repo - PR Status Checker
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
status-check:
|
||||
|
||||
@@ -7,13 +7,18 @@ from typing import Dict, List, Tuple
|
||||
|
||||
CHECK_INTERVAL = 30
|
||||
|
||||
|
||||
def get_environment_variables() -> Tuple[str, str, str, str, str]:
|
||||
"""Retrieve and return necessary environment variables."""
|
||||
try:
|
||||
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
|
||||
event = json.load(f)
|
||||
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
# Handle both PR and merge group events
|
||||
if "pull_request" in event:
|
||||
sha = event["pull_request"]["head"]["sha"]
|
||||
else:
|
||||
sha = os.environ["GITHUB_SHA"]
|
||||
|
||||
return (
|
||||
os.environ["GITHUB_API_URL"],
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -171,3 +171,5 @@ ig*
|
||||
.github_access_token
|
||||
LICENSE.rtf
|
||||
autogpt_platform/backend/settings.py
|
||||
/.auth
|
||||
/autogpt_platform/frontend/.auth
|
||||
|
||||
@@ -10,27 +10,126 @@ repos:
|
||||
- id: check-symlinks
|
||||
- id: debug-statements
|
||||
|
||||
- repo: https://github.com/Yelp/detect-secrets
|
||||
rev: v1.5.0
|
||||
hooks:
|
||||
- id: detect-secrets
|
||||
name: Detect secrets
|
||||
description: Detects high entropy strings that are likely to be passwords.
|
||||
files: ^autogpt_platform/
|
||||
stages: [push]
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, all dependencies need to be up-to-date.
|
||||
# It's also a good idea to check that poetry.lock is consistent with pyproject.toml.
|
||||
hooks:
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Backend
|
||||
alias: poetry-install-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend install
|
||||
# include autogpt_libs source (since it's a path dependency)
|
||||
files: ^autogpt_platform/(backend|autogpt_libs)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - AutoGPT Platform - Libs
|
||||
alias: poetry-install-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs install
|
||||
files: ^autogpt_platform/autogpt_libs/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - AutoGPT
|
||||
alias: poetry-install-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt install
|
||||
# include forge source (since it's a path dependency)
|
||||
files: ^classic/(original_autogpt|forge)/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Forge
|
||||
alias: poetry-install-classic-forge
|
||||
entry: poetry -C classic/forge install
|
||||
files: ^classic/forge/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: poetry-install
|
||||
name: Check & Install dependencies - Classic - Benchmark
|
||||
alias: poetry-install-classic-benchmark
|
||||
entry: poetry -C classic/benchmark install
|
||||
files: ^classic/benchmark/poetry\.lock$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: local
|
||||
# For proper type checking, Prisma client must be up-to-date.
|
||||
hooks:
|
||||
- id: prisma-generate
|
||||
name: Prisma Generate - AutoGPT Platform - Backend
|
||||
alias: prisma-generate-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run prisma generate'
|
||||
# include everything that triggers poetry install + the prisma schema
|
||||
files: ^autogpt_platform/((backend|autogpt_libs)/poetry\.lock|backend/schema.prisma)$
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.7.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Backend
|
||||
alias: ruff-lint-platform-backend
|
||||
files: ^autogpt_platform/backend/
|
||||
args: [--fix]
|
||||
|
||||
- id: ruff
|
||||
name: Lint (Ruff) - AutoGPT Platform - Libs
|
||||
alias: ruff-lint-platform-libs
|
||||
files: ^autogpt_platform/autogpt_libs/
|
||||
args: [--fix]
|
||||
|
||||
- repo: local
|
||||
# isort needs the context of which packages are installed to function, so we
|
||||
# can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
|
||||
hooks:
|
||||
- id: isort-autogpt
|
||||
name: Lint (isort) - AutoGPT
|
||||
entry: poetry -C classic/original_autogpt run isort
|
||||
- id: isort
|
||||
name: Lint (isort) - AutoGPT Platform - Backend
|
||||
alias: isort-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run isort -p backend
|
||||
files: ^autogpt_platform/backend/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - AutoGPT
|
||||
alias: isort-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run isort -p autogpt
|
||||
files: ^classic/original_autogpt/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-forge
|
||||
name: Lint (isort) - Forge
|
||||
entry: poetry -C classic/forge run isort
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Forge
|
||||
alias: isort-classic-forge
|
||||
entry: poetry -C classic/forge run isort -p forge
|
||||
files: ^classic/forge/
|
||||
types: [file, python]
|
||||
language: system
|
||||
|
||||
- id: isort-benchmark
|
||||
name: Lint (isort) - Benchmark
|
||||
entry: poetry -C classic/benchmark run isort
|
||||
- id: isort
|
||||
name: Lint (isort) - Classic - Benchmark
|
||||
alias: isort-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run isort -p agbenchmark
|
||||
files: ^classic/benchmark/
|
||||
types: [file, python]
|
||||
language: system
|
||||
@@ -42,7 +141,6 @@ repos:
|
||||
hooks:
|
||||
- id: black
|
||||
name: Lint (Black)
|
||||
language_version: python3.10
|
||||
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 7.0.0
|
||||
@@ -50,20 +148,20 @@ repos:
|
||||
# them separately.
|
||||
hooks:
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - AutoGPT
|
||||
alias: flake8-autogpt
|
||||
name: Lint (Flake8) - Classic - AutoGPT
|
||||
alias: flake8-classic-autogpt
|
||||
files: ^classic/original_autogpt/(autogpt|scripts|tests)/
|
||||
args: [--config=classic/original_autogpt/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Forge
|
||||
alias: flake8-forge
|
||||
name: Lint (Flake8) - Classic - Forge
|
||||
alias: flake8-classic-forge
|
||||
files: ^classic/forge/(forge|tests)/
|
||||
args: [--config=classic/forge/.flake8]
|
||||
|
||||
- id: flake8
|
||||
name: Lint (Flake8) - Benchmark
|
||||
alias: flake8-benchmark
|
||||
name: Lint (Flake8) - Classic - Benchmark
|
||||
alias: flake8-classic-benchmark
|
||||
files: ^classic/benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
|
||||
args: [--config=classic/benchmark/.flake8]
|
||||
|
||||
@@ -72,31 +170,52 @@ repos:
|
||||
# project. To trigger on poetry.lock we also reset the file `types` filter.
|
||||
hooks:
|
||||
- id: pyright
|
||||
name: Typecheck - AutoGPT
|
||||
alias: pyright-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
args: [-p, autogpt, autogpt]
|
||||
name: Typecheck - AutoGPT Platform - Backend
|
||||
alias: pyright-platform-backend
|
||||
entry: poetry -C autogpt_platform/backend run pyright
|
||||
args: [-p, autogpt_platform/backend, autogpt_platform/backend]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|(\w+\.py|poetry\.lock)$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Forge
|
||||
alias: pyright-forge
|
||||
name: Typecheck - AutoGPT Platform - Libs
|
||||
alias: pyright-platform-libs
|
||||
entry: poetry -C autogpt_platform/autogpt_libs run pyright
|
||||
args: [-p, autogpt_platform/autogpt_libs, autogpt_platform/autogpt_libs]
|
||||
files: ^autogpt_platform/autogpt_libs/(autogpt_libs/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - AutoGPT
|
||||
alias: pyright-classic-autogpt
|
||||
entry: poetry -C classic/original_autogpt run pyright
|
||||
args: [-p, classic/original_autogpt, classic/original_autogpt]
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Classic - Forge
|
||||
alias: pyright-classic-forge
|
||||
entry: poetry -C classic/forge run pyright
|
||||
args: [-p, forge, forge]
|
||||
files: ^classic/forge/(classic/forge/|poetry\.lock$)
|
||||
args: [-p, classic/forge, classic/forge]
|
||||
files: ^classic/forge/(forge/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pyright
|
||||
name: Typecheck - Benchmark
|
||||
alias: pyright-benchmark
|
||||
name: Typecheck - Classic - Benchmark
|
||||
alias: pyright-classic-benchmark
|
||||
entry: poetry -C classic/benchmark run pyright
|
||||
args: [-p, benchmark, benchmark]
|
||||
args: [-p, classic/benchmark, classic/benchmark]
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
types: [file]
|
||||
language: system
|
||||
@@ -104,23 +223,35 @@ repos:
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: pytest-autogpt
|
||||
name: Run tests - AutoGPT (excl. slow tests)
|
||||
- id: pytest
|
||||
name: Run tests - AutoGPT Platform - Backend
|
||||
alias: pytest-platform-backend
|
||||
entry: bash -c 'cd autogpt_platform/backend && poetry run pytest'
|
||||
# include autogpt_libs source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^autogpt_platform/(backend/((backend|test)/|poetry\.lock$)|autogpt_libs/(autogpt_libs/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest
|
||||
name: Run tests - Classic - AutoGPT (excl. slow tests)
|
||||
alias: pytest-classic-autogpt
|
||||
entry: bash -c 'cd classic/original_autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
|
||||
# include forge source (since it's a path dependency) but exclude *_test.py files:
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(classic/forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
files: ^(classic/original_autogpt/((autogpt|tests)/|poetry\.lock$)|classic/forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-forge
|
||||
name: Run tests - Forge (excl. slow tests)
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Forge (excl. slow tests)
|
||||
alias: pytest-classic-forge
|
||||
entry: bash -c 'cd classic/forge && poetry run pytest --cov=forge -m "not slow"'
|
||||
files: ^classic/forge/(classic/forge/|tests/|poetry\.lock$)
|
||||
files: ^classic/forge/(forge/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: pytest-benchmark
|
||||
name: Run tests - Benchmark
|
||||
- id: pytest
|
||||
name: Run tests - Classic - Benchmark
|
||||
alias: pytest-classic-benchmark
|
||||
entry: bash -c 'cd classic/benchmark && poetry run pytest --cov=benchmark'
|
||||
files: ^classic/benchmark/(agbenchmark/|tests/|poetry\.lock$)
|
||||
language: system
|
||||
|
||||
67
.vscode/launch.json
vendored
Normal file
67
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Frontend: Server Side",
|
||||
"type": "node-terminal",
|
||||
"request": "launch",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"command": "yarn dev"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Client Side",
|
||||
"type": "msedge",
|
||||
"request": "launch",
|
||||
"url": "http://localhost:3000"
|
||||
},
|
||||
{
|
||||
"name": "Frontend: Full Stack",
|
||||
"type": "node-terminal",
|
||||
|
||||
"request": "launch",
|
||||
"command": "yarn dev",
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/frontend",
|
||||
"serverReadyAction": {
|
||||
"pattern": "- Local:.+(https?://.+)",
|
||||
"uriFormat": "%s",
|
||||
"action": "debugWithEdge"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Backend",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "backend.app",
|
||||
// "env": {
|
||||
// "ENV": "dev"
|
||||
// },
|
||||
"envFile": "${workspaceFolder}/backend/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/autogpt_platform/backend"
|
||||
},
|
||||
{
|
||||
"name": "Marketplace",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"module": "autogpt_platform.market.main",
|
||||
"env": {
|
||||
"ENV": "dev"
|
||||
},
|
||||
"envFile": "${workspaceFolder}/market/.env",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/market"
|
||||
}
|
||||
],
|
||||
"compounds": [
|
||||
{
|
||||
"name": "Everything",
|
||||
"configurations": ["Backend", "Frontend: Full Stack"],
|
||||
// "preLaunchTask": "${defaultBuildTask}",
|
||||
"stopAll": true,
|
||||
"presentation": {
|
||||
"hidden": false,
|
||||
"order": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -11,7 +11,7 @@ Also check out our [🚀 Roadmap][roadmap] for information about our priorities
|
||||
[kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
|
||||
|
||||
## Contributing to the AutoGPT Platform Folder
|
||||
All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution.
|
||||
All contributions to [the autogpt_platform folder](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform) will be under our [Contribution License Agreement](https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpt_platform/Contributor%20License%20Agreement%20(CLA).md). By making a pull request contributing to this folder, you agree to the terms of our CLA for your contribution. All contributions to other folders will be under the MIT license.
|
||||
|
||||
## In short
|
||||
1. Avoid duplicate work, issues, PRs etc.
|
||||
|
||||
8
LICENSE
8
LICENSE
@@ -1,7 +1,13 @@
|
||||
All portions of this repository are under one of two licenses. The majority of the AutoGPT repository is under the MIT License below. The autogpt_platform folder is under the
|
||||
Polyform Shield License.
|
||||
|
||||
|
||||
MIT License
|
||||
|
||||
|
||||
Copyright (c) 2023 Toran Bruce Richards
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
@@ -9,9 +15,11 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
|
||||
21
README.md
21
README.md
@@ -35,7 +35,7 @@ The AutoGPT frontend is where users interact with our powerful AI automation pla
|
||||
|
||||
**Monitoring and Analytics:** Keep track of your agents' performance and gain insights to continually improve your automation processes.
|
||||
|
||||
[Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
|
||||
[Read this guide](https://docs.agpt.co/platform/new_blocks/) to learn how to build your own custom blocks.
|
||||
|
||||
### 💽 AutoGPT Server
|
||||
|
||||
@@ -65,6 +65,7 @@ Here are two examples of what you can do with AutoGPT:
|
||||
These examples show just a glimpse of what you can achieve with AutoGPT! You can create customized workflows to build agents for any use case.
|
||||
|
||||
---
|
||||
### Mission and Licencing
|
||||
Our mission is to provide the tools, so that you can focus on what matters:
|
||||
|
||||
- 🏗️ **Building** - Lay the foundation for something amazing.
|
||||
@@ -77,6 +78,13 @@ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI i
|
||||
 | 
|
||||
**🚀 [Contributing](CONTRIBUTING.md)**
|
||||
|
||||
**Licensing:**
|
||||
|
||||
MIT License: The majority of the AutoGPT repository is under the MIT License.
|
||||
|
||||
Polyform Shield License: This license applies to the autogpt_platform folder.
|
||||
|
||||
For more information, see https://agpt.co/blog/introducing-the-autogpt-platform
|
||||
|
||||
---
|
||||
## 🤖 AutoGPT Classic
|
||||
@@ -101,7 +109,7 @@ This guide will walk you through the process of creating your own agent and usin
|
||||
|
||||
📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
|
||||
 | 
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
|
||||
📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/classic/benchmark) about the Benchmark
|
||||
|
||||
### 💻 UI
|
||||
|
||||
@@ -150,6 +158,8 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
|
||||
|
||||
---
|
||||
|
||||
## Stars stats
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
|
||||
<picture>
|
||||
@@ -159,3 +169,10 @@ To maintain a uniform standard and ensure seamless compatibility with many curre
|
||||
</picture>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
|
||||
## ⚡ Contributors
|
||||
|
||||
<a href="https://github.com/Significant-Gravitas/AutoGPT/graphs/contributors" alt="View Contributors">
|
||||
<img src="https://contrib.rocks/image?repo=Significant-Gravitas/AutoGPT&max=1000&columns=10" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
47
SECURITY.md
Normal file
47
SECURITY.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
We take the security of our project seriously. If you believe you have found a security vulnerability, please report it to us privately. **Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**
|
||||
|
||||
> **Important Note**: Any code within the `classic/` folder is considered legacy, unsupported, and out of scope for security reports. We will not address security vulnerabilities in this deprecated code.
|
||||
|
||||
Instead, please report them via:
|
||||
- [GitHub Security Advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new)
|
||||
- [Huntr.dev](https://huntr.com/repos/significant-gravitas/autogpt) - where you may be eligible for a bounty
|
||||
|
||||
### Reporting Process
|
||||
1. **Submit Report**: Use one of the above channels to submit your report
|
||||
2. **Response Time**: Our team will acknowledge receipt of your report within 14 business days.
|
||||
3. **Collaboration**: We will collaborate with you to understand and validate the issue
|
||||
4. **Resolution**: We will work on a fix and coordinate the release process
|
||||
|
||||
### Disclosure Policy
|
||||
- Please provide detailed reports with reproducible steps
|
||||
- Include the version/commit hash where you discovered the vulnerability
|
||||
- Allow us a 90-day security fix window before any public disclosure
|
||||
- Share any potential mitigations or workarounds if known
|
||||
|
||||
## Supported Versions
|
||||
Only the following versions are eligible for security updates:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|-----------|
|
||||
| Latest release on master branch | ✅ |
|
||||
| Development commits (pre-master) | ✅ |
|
||||
| Classic folder (deprecated) | ❌ |
|
||||
| All other versions | ❌ |
|
||||
|
||||
## Security Best Practices
|
||||
When using this project:
|
||||
1. Always use the latest stable version
|
||||
2. Review security advisories before updating
|
||||
3. Follow our security documentation and guidelines
|
||||
4. Keep your dependencies up to date
|
||||
5. Do not use code from the `classic/` folder as it is deprecated and unsupported
|
||||
|
||||
## Past Security Advisories
|
||||
For a list of past security advisories, please visit our [Security Advisory Page](https://github.com/Significant-Gravitas/AutoGPT/security/advisories) and [Huntr Disclosures Page](https://huntr.com/repos/significant-gravitas/autogpt).
|
||||
|
||||
---
|
||||
Last updated: November 2024
|
||||
2
autogpt_platform/.gitignore
vendored
Normal file
2
autogpt_platform/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
@@ -46,9 +46,9 @@ To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
6. Run the following command:
|
||||
```
|
||||
cp .env.example .env
|
||||
cp .env.example .env.local
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env` in the `frontend` directory. You can modify the `.env` within this folder to add your own environment variables for the frontend application.
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
7. Run the following command:
|
||||
```
|
||||
@@ -149,6 +149,3 @@ To persist data for PostgreSQL and Redis, you can modify the `docker-compose.yml
|
||||
3. Save the file and run `docker compose up -d` to apply the changes.
|
||||
|
||||
This configuration will create named volumes for PostgreSQL and Redis, ensuring that your data persists across container restarts.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import hashlib
|
||||
import secrets
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class APIKeyContainer(NamedTuple):
|
||||
"""Container for API key parts."""
|
||||
|
||||
raw: str
|
||||
prefix: str
|
||||
postfix: str
|
||||
hash: str
|
||||
|
||||
|
||||
class APIKeyManager:
|
||||
PREFIX: str = "agpt_"
|
||||
PREFIX_LENGTH: int = 8
|
||||
POSTFIX_LENGTH: int = 8
|
||||
|
||||
def generate_api_key(self) -> APIKeyContainer:
|
||||
"""Generate a new API key with all its parts."""
|
||||
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
|
||||
return APIKeyContainer(
|
||||
raw=raw_key,
|
||||
prefix=raw_key[: self.PREFIX_LENGTH],
|
||||
postfix=raw_key[-self.POSTFIX_LENGTH :],
|
||||
hash=hashlib.sha256(raw_key.encode()).hexdigest(),
|
||||
)
|
||||
|
||||
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
|
||||
"""Verify if a provided API key matches the stored hash."""
|
||||
if not provided_key.startswith(self.PREFIX):
|
||||
return False
|
||||
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
||||
@@ -1,7 +1,8 @@
|
||||
import fastapi
|
||||
|
||||
from .config import Settings
|
||||
from .middleware import auth_middleware
|
||||
from .models import User
|
||||
from .models import DEFAULT_USER_ID, User
|
||||
|
||||
|
||||
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
|
||||
@@ -16,8 +17,12 @@ def requires_admin_user(
|
||||
|
||||
def verify_user(payload: dict | None, admin_only: bool) -> User:
|
||||
if not payload:
|
||||
if Settings.ENABLE_AUTH:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=401, detail="Authorization header is missing"
|
||||
)
|
||||
# This handles the case when authentication is disabled
|
||||
payload = {"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1a", "role": "admin"}
|
||||
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
|
||||
|
||||
user_id = payload.get("sub")
|
||||
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
|
||||
DEFAULT_EMAIL = "default@example.com"
|
||||
|
||||
|
||||
# Using dataclass here to avoid adding dependency on pydantic
|
||||
@dataclass(frozen=True)
|
||||
|
||||
@@ -0,0 +1,167 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
import logging
|
||||
from functools import wraps
|
||||
from typing import Any, Awaitable, Callable, Dict, Optional, TypeVar, Union, cast
|
||||
|
||||
import ldclient
|
||||
from fastapi import HTTPException
|
||||
from ldclient import Context, LDClient
|
||||
from ldclient.config import Config
|
||||
from typing_extensions import ParamSpec
|
||||
|
||||
from .config import SETTINGS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
P = ParamSpec("P")
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def get_client() -> LDClient:
|
||||
"""Get the LaunchDarkly client singleton."""
|
||||
return ldclient.get()
|
||||
|
||||
|
||||
def initialize_launchdarkly() -> None:
|
||||
sdk_key = SETTINGS.launch_darkly_sdk_key
|
||||
logger.debug(
|
||||
f"Initializing LaunchDarkly with SDK key: {'present' if sdk_key else 'missing'}"
|
||||
)
|
||||
|
||||
if not sdk_key:
|
||||
logger.warning("LaunchDarkly SDK key not configured")
|
||||
return
|
||||
|
||||
config = Config(sdk_key)
|
||||
ldclient.set_config(config)
|
||||
|
||||
if ldclient.get().is_initialized():
|
||||
logger.info("LaunchDarkly client initialized successfully")
|
||||
else:
|
||||
logger.error("LaunchDarkly client failed to initialize")
|
||||
|
||||
|
||||
def shutdown_launchdarkly() -> None:
|
||||
"""Shutdown the LaunchDarkly client."""
|
||||
if ldclient.get().is_initialized():
|
||||
ldclient.get().close()
|
||||
logger.info("LaunchDarkly client closed successfully")
|
||||
|
||||
|
||||
def create_context(
|
||||
user_id: str, additional_attributes: Optional[Dict[str, Any]] = None
|
||||
) -> Context:
|
||||
"""Create LaunchDarkly context with optional additional attributes."""
|
||||
builder = Context.builder(str(user_id)).kind("user")
|
||||
if additional_attributes:
|
||||
for key, value in additional_attributes.items():
|
||||
builder.set(key, value)
|
||||
return builder.build()
|
||||
|
||||
|
||||
def feature_flag(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""
|
||||
Decorator for feature flag protected endpoints.
|
||||
"""
|
||||
|
||||
def decorator(
|
||||
func: Callable[P, Union[T, Awaitable[T]]]
|
||||
) -> Callable[P, Union[T, Awaitable[T]]]:
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
context = create_context(str(user_id))
|
||||
is_enabled = get_client().variation(flag_key, context, default)
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
result = func(*args, **kwargs)
|
||||
if asyncio.iscoroutine(result):
|
||||
return await result
|
||||
return cast(T, result)
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
||||
try:
|
||||
user_id = kwargs.get("user_id")
|
||||
if not user_id:
|
||||
raise ValueError("user_id is required")
|
||||
|
||||
if not get_client().is_initialized():
|
||||
logger.warning(
|
||||
f"LaunchDarkly not initialized, using default={default}"
|
||||
)
|
||||
is_enabled = default
|
||||
else:
|
||||
context = create_context(str(user_id))
|
||||
is_enabled = get_client().variation(flag_key, context, default)
|
||||
|
||||
if not is_enabled:
|
||||
raise HTTPException(status_code=404, detail="Feature not available")
|
||||
|
||||
return cast(T, func(*args, **kwargs))
|
||||
except Exception as e:
|
||||
logger.error(f"Error evaluating feature flag {flag_key}: {e}")
|
||||
raise
|
||||
|
||||
return cast(
|
||||
Callable[P, Union[T, Awaitable[T]]],
|
||||
async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper,
|
||||
)
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def percentage_rollout(
|
||||
flag_key: str,
|
||||
default: bool = False,
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""Decorator for percentage-based rollouts."""
|
||||
return feature_flag(flag_key, default)
|
||||
|
||||
|
||||
def beta_feature(
|
||||
flag_key: Optional[str] = None,
|
||||
unauthorized_response: Any = {"message": "Not available in beta"},
|
||||
) -> Callable[
|
||||
[Callable[P, Union[T, Awaitable[T]]]], Callable[P, Union[T, Awaitable[T]]]
|
||||
]:
|
||||
"""Decorator for beta features."""
|
||||
actual_key = f"beta-{flag_key}" if flag_key else "beta"
|
||||
return feature_flag(actual_key, False)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mock_flag_variation(flag_key: str, return_value: Any):
|
||||
"""Context manager for testing feature flags."""
|
||||
original_variation = get_client().variation
|
||||
get_client().variation = lambda key, context, default: (
|
||||
return_value if key == flag_key else original_variation(key, context, default)
|
||||
)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
get_client().variation = original_variation
|
||||
@@ -0,0 +1,44 @@
|
||||
import pytest
|
||||
from autogpt_libs.feature_flag.client import feature_flag, mock_flag_variation
|
||||
from ldclient import LDClient
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ld_client(mocker):
|
||||
client = mocker.Mock(spec=LDClient)
|
||||
mocker.patch("ldclient.get", return_value=client)
|
||||
client.is_initialized.return_value = True
|
||||
return client
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_enabled(ld_client):
|
||||
ld_client.variation.return_value = True
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == "success"
|
||||
ld_client.variation.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feature_flag_unauthorized_response(ld_client):
|
||||
ld_client.variation.return_value = False
|
||||
|
||||
@feature_flag("test-flag")
|
||||
async def test_function(user_id: str):
|
||||
return "success"
|
||||
|
||||
result = test_function(user_id="test-user")
|
||||
assert result == {"error": "disabled"}
|
||||
|
||||
|
||||
def test_mock_flag_variation(ld_client):
|
||||
with mock_flag_variation("test-flag", True):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
|
||||
with mock_flag_variation("test-flag", False):
|
||||
assert ld_client.variation("test-flag", None, False)
|
||||
@@ -0,0 +1,15 @@
|
||||
from pydantic import Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
launch_darkly_sdk_key: str = Field(
|
||||
default="",
|
||||
description="The Launch Darkly SDK key",
|
||||
validation_alias="LAUNCH_DARKLY_SDK_KEY"
|
||||
)
|
||||
|
||||
model_config = SettingsConfigDict(case_sensitive=True, extra="ignore")
|
||||
|
||||
|
||||
SETTINGS = Settings()
|
||||
@@ -6,6 +6,7 @@ from pathlib import Path
|
||||
|
||||
from pydantic import Field, field_validator
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from .filters import BelowLevelFilter
|
||||
from .formatters import AGPTFormatter, StructuredLoggingFormatter
|
||||
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
from .store import SupabaseIntegrationCredentialsStore
|
||||
from .types import APIKeyCredentials, OAuth2Credentials
|
||||
|
||||
__all__ = [
|
||||
"SupabaseIntegrationCredentialsStore",
|
||||
"APIKeyCredentials",
|
||||
"OAuth2Credentials",
|
||||
]
|
||||
@@ -1,145 +0,0 @@
|
||||
import secrets
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import cast
|
||||
|
||||
from supabase import Client
|
||||
|
||||
from .types import (
|
||||
Credentials,
|
||||
OAuth2Credentials,
|
||||
OAuthState,
|
||||
UserMetadata,
|
||||
UserMetadataRaw,
|
||||
)
|
||||
|
||||
|
||||
class SupabaseIntegrationCredentialsStore:
|
||||
def __init__(self, supabase: Client):
|
||||
self.supabase = supabase
|
||||
|
||||
def add_creds(self, user_id: str, credentials: Credentials) -> None:
|
||||
if self.get_creds_by_id(user_id, credentials.id):
|
||||
raise ValueError(
|
||||
f"Can not re-create existing credentials with ID {credentials.id} "
|
||||
f"for user with ID {user_id}"
|
||||
)
|
||||
self._set_user_integration_creds(
|
||||
user_id, [*self.get_all_creds(user_id), credentials]
|
||||
)
|
||||
|
||||
def get_all_creds(self, user_id: str) -> list[Credentials]:
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
return UserMetadata.model_validate(user_metadata).integration_credentials
|
||||
|
||||
def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None:
|
||||
credentials = self.get_all_creds(user_id)
|
||||
return next((c for c in credentials if c.id == credentials_id), None)
|
||||
|
||||
def get_creds_by_provider(self, user_id: str, provider: str) -> list[Credentials]:
|
||||
credentials = self.get_all_creds(user_id)
|
||||
return [c for c in credentials if c.provider == provider]
|
||||
|
||||
def get_authorized_providers(self, user_id: str) -> list[str]:
|
||||
credentials = self.get_all_creds(user_id)
|
||||
return list(set(c.provider for c in credentials))
|
||||
|
||||
def update_creds(self, user_id: str, updated: Credentials) -> None:
|
||||
current = self.get_creds_by_id(user_id, updated.id)
|
||||
if not current:
|
||||
raise ValueError(
|
||||
f"Credentials with ID {updated.id} "
|
||||
f"for user with ID {user_id} not found"
|
||||
)
|
||||
if type(current) is not type(updated):
|
||||
raise TypeError(
|
||||
f"Can not update credentials with ID {updated.id} "
|
||||
f"from type {type(current)} "
|
||||
f"to type {type(updated)}"
|
||||
)
|
||||
|
||||
# Ensure no scopes are removed when updating credentials
|
||||
if (
|
||||
isinstance(updated, OAuth2Credentials)
|
||||
and isinstance(current, OAuth2Credentials)
|
||||
and not set(updated.scopes).issuperset(current.scopes)
|
||||
):
|
||||
raise ValueError(
|
||||
f"Can not update credentials with ID {updated.id} "
|
||||
f"and scopes {current.scopes} "
|
||||
f"to more restrictive set of scopes {updated.scopes}"
|
||||
)
|
||||
|
||||
# Update the credentials
|
||||
updated_credentials_list = [
|
||||
updated if c.id == updated.id else c for c in self.get_all_creds(user_id)
|
||||
]
|
||||
self._set_user_integration_creds(user_id, updated_credentials_list)
|
||||
|
||||
def delete_creds_by_id(self, user_id: str, credentials_id: str) -> None:
|
||||
filtered_credentials = [
|
||||
c for c in self.get_all_creds(user_id) if c.id != credentials_id
|
||||
]
|
||||
self._set_user_integration_creds(user_id, filtered_credentials)
|
||||
|
||||
async def store_state_token(self, user_id: str, provider: str) -> str:
|
||||
token = secrets.token_urlsafe(32)
|
||||
expires_at = datetime.now(timezone.utc) + timedelta(minutes=10)
|
||||
|
||||
state = OAuthState(
|
||||
token=token, provider=provider, expires_at=int(expires_at.timestamp())
|
||||
)
|
||||
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
oauth_states = user_metadata.get("integration_oauth_states", [])
|
||||
oauth_states.append(state.model_dump())
|
||||
user_metadata["integration_oauth_states"] = oauth_states
|
||||
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": user_metadata}
|
||||
)
|
||||
|
||||
return token
|
||||
|
||||
async def verify_state_token(self, user_id: str, token: str, provider: str) -> bool:
|
||||
user_metadata = self._get_user_metadata(user_id)
|
||||
oauth_states = user_metadata.get("integration_oauth_states", [])
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
valid_state = next(
|
||||
(
|
||||
state
|
||||
for state in oauth_states
|
||||
if state["token"] == token
|
||||
and state["provider"] == provider
|
||||
and state["expires_at"] > now.timestamp()
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
if valid_state:
|
||||
# Remove the used state
|
||||
oauth_states.remove(valid_state)
|
||||
user_metadata["integration_oauth_states"] = oauth_states
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": user_metadata}
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _set_user_integration_creds(
|
||||
self, user_id: str, credentials: list[Credentials]
|
||||
) -> None:
|
||||
raw_metadata = self._get_user_metadata(user_id)
|
||||
raw_metadata.update(
|
||||
{"integration_credentials": [c.model_dump() for c in credentials]}
|
||||
)
|
||||
self.supabase.auth.admin.update_user_by_id(
|
||||
user_id, {"user_metadata": raw_metadata}
|
||||
)
|
||||
|
||||
def _get_user_metadata(self, user_id: str) -> UserMetadataRaw:
|
||||
response = self.supabase.auth.admin.get_user_by_id(user_id)
|
||||
if not response.user:
|
||||
raise ValueError(f"User with ID {user_id} not found")
|
||||
return cast(UserMetadataRaw, response.user.user_metadata)
|
||||
@@ -1,69 +0,0 @@
|
||||
from typing import Annotated, Any, Literal, Optional, TypedDict
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field, SecretStr, field_serializer
|
||||
|
||||
|
||||
class _BaseCredentials(BaseModel):
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
provider: str
|
||||
title: Optional[str]
|
||||
|
||||
@field_serializer("*")
|
||||
def dump_secret_strings(value: Any, _info):
|
||||
if isinstance(value, SecretStr):
|
||||
return value.get_secret_value()
|
||||
return value
|
||||
|
||||
|
||||
class OAuth2Credentials(_BaseCredentials):
|
||||
type: Literal["oauth2"] = "oauth2"
|
||||
username: Optional[str]
|
||||
"""Username of the third-party service user that these credentials belong to"""
|
||||
access_token: SecretStr
|
||||
access_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the access token expires (if at all)"""
|
||||
refresh_token: Optional[SecretStr]
|
||||
refresh_token_expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the refresh token expires (if at all)"""
|
||||
scopes: list[str]
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
def bearer(self) -> str:
|
||||
return f"Bearer {self.access_token.get_secret_value()}"
|
||||
|
||||
|
||||
class APIKeyCredentials(_BaseCredentials):
|
||||
type: Literal["api_key"] = "api_key"
|
||||
api_key: SecretStr
|
||||
expires_at: Optional[int]
|
||||
"""Unix timestamp (seconds) indicating when the API key expires (if at all)"""
|
||||
|
||||
def bearer(self) -> str:
|
||||
return f"Bearer {self.api_key.get_secret_value()}"
|
||||
|
||||
|
||||
Credentials = Annotated[
|
||||
OAuth2Credentials | APIKeyCredentials,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
|
||||
CredentialsType = Literal["api_key", "oauth2"]
|
||||
|
||||
|
||||
class OAuthState(BaseModel):
|
||||
token: str
|
||||
provider: str
|
||||
expires_at: int
|
||||
"""Unix timestamp (seconds) indicating when this OAuth state expires"""
|
||||
|
||||
|
||||
class UserMetadata(BaseModel):
|
||||
integration_credentials: list[Credentials] = Field(default_factory=list)
|
||||
integration_oauth_states: list[OAuthState] = Field(default_factory=list)
|
||||
|
||||
|
||||
class UserMetadataRaw(TypedDict, total=False):
|
||||
integration_credentials: list[dict]
|
||||
integration_oauth_states: list[dict]
|
||||
20
autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py
Normal file
20
autogpt_platform/autogpt_libs/autogpt_libs/utils/cache.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import threading
|
||||
from typing import Callable, ParamSpec, TypeVar
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def thread_cached(func: Callable[P, R]) -> Callable[P, R]:
|
||||
thread_local = threading.local()
|
||||
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
cache = getattr(thread_local, "cache", None)
|
||||
if cache is None:
|
||||
cache = thread_local.cache = {}
|
||||
key = (args, tuple(sorted(kwargs.items())))
|
||||
if key not in cache:
|
||||
cache[key] = func(*args, **kwargs)
|
||||
return cache[key]
|
||||
|
||||
return wrapper
|
||||
@@ -0,0 +1,56 @@
|
||||
from contextlib import contextmanager
|
||||
from threading import Lock
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from expiringdict import ExpiringDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from redis import Redis
|
||||
from redis.lock import Lock as RedisLock
|
||||
|
||||
|
||||
class RedisKeyedMutex:
|
||||
"""
|
||||
This class provides a mutex that can be locked and unlocked by a specific key,
|
||||
using Redis as a distributed locking provider.
|
||||
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
|
||||
in case the key is not unlocked for a specified duration, to prevent memory leaks.
|
||||
"""
|
||||
|
||||
def __init__(self, redis: "Redis", timeout: int | None = 60):
|
||||
self.redis = redis
|
||||
self.timeout = timeout
|
||||
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
|
||||
max_len=6000, max_age_seconds=self.timeout
|
||||
)
|
||||
self.locks_lock = Lock()
|
||||
|
||||
@contextmanager
|
||||
def locked(self, key: Any):
|
||||
lock = self.acquire(key)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
def acquire(self, key: Any) -> "RedisLock":
|
||||
"""Acquires and returns a lock with the given key"""
|
||||
with self.locks_lock:
|
||||
if key not in self.locks:
|
||||
self.locks[key] = self.redis.lock(
|
||||
str(key), self.timeout, thread_local=False
|
||||
)
|
||||
lock = self.locks[key]
|
||||
lock.acquire()
|
||||
return lock
|
||||
|
||||
def release(self, key: Any):
|
||||
if lock := self.locks.get(key):
|
||||
lock.release()
|
||||
|
||||
def release_all_locks(self):
|
||||
"""Call this on process termination to ensure all locks are released"""
|
||||
self.locks_lock.acquire(blocking=False)
|
||||
for lock in self.locks.values():
|
||||
if lock.locked() and lock.owned():
|
||||
lock.release()
|
||||
440
autogpt_platform/autogpt_libs/poetry.lock
generated
440
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -377,6 +377,20 @@ files = [
|
||||
[package.extras]
|
||||
test = ["pytest (>=6)"]
|
||||
|
||||
[[package]]
|
||||
name = "expiringdict"
|
||||
version = "1.2.2"
|
||||
description = "Dictionary with auto-expiring values for caching purposes"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "expiringdict-1.2.2-py3-none-any.whl", hash = "sha256:09a5d20bc361163e6432a874edd3179676e935eb81b925eccef48d409a8a45e8"},
|
||||
{file = "expiringdict-1.2.2.tar.gz", hash = "sha256:300fb92a7e98f15b05cf9a856c1415b3bc4f2e132be07daa326da6414c23ee09"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
tests = ["coverage", "coveralls", "dill", "mock", "nose"]
|
||||
|
||||
[[package]]
|
||||
name = "frozenlist"
|
||||
version = "1.4.1"
|
||||
@@ -569,13 +583,13 @@ grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"]
|
||||
|
||||
[[package]]
|
||||
name = "google-cloud-logging"
|
||||
version = "3.11.2"
|
||||
version = "3.11.3"
|
||||
description = "Stackdriver Logging API client library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "google_cloud_logging-3.11.2-py2.py3-none-any.whl", hash = "sha256:0a755f04f184fbe77ad608258dc283a032485ebb4d0e2b2501964059ee9c898f"},
|
||||
{file = "google_cloud_logging-3.11.2.tar.gz", hash = "sha256:4897441c2b74f6eda9181c23a8817223b6145943314a821d64b729d30766cb2b"},
|
||||
{file = "google_cloud_logging-3.11.3-py2.py3-none-any.whl", hash = "sha256:b8ec23f2998f76a58f8492db26a0f4151dd500425c3f08448586b85972f3c494"},
|
||||
{file = "google_cloud_logging-3.11.3.tar.gz", hash = "sha256:0a73cd94118875387d4535371d9e9426861edef8e44fba1261e86782d5b8d54f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -612,17 +626,17 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"]
|
||||
|
||||
[[package]]
|
||||
name = "gotrue"
|
||||
version = "2.8.1"
|
||||
version = "2.10.0"
|
||||
description = "Python Client Library for Supabase Auth"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "gotrue-2.8.1-py3-none-any.whl", hash = "sha256:97dff077d71cca629f046c35ba34fae132b69c55fe271651766ddcf6d8132468"},
|
||||
{file = "gotrue-2.8.1.tar.gz", hash = "sha256:644d0096c4c390f7e36d9cb05271a7091c01e7dc6d506eb117b8fe8fc48eb8d9"},
|
||||
{file = "gotrue-2.10.0-py3-none-any.whl", hash = "sha256:768e58207488e5184ffbdc4351b7280d913daf97962f4e9f2cca05c80004b042"},
|
||||
{file = "gotrue-2.10.0.tar.gz", hash = "sha256:4edf4c251da3535f2b044e23deba221e848ca1210c17d0c7a9b19f79a1e3f3c0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
pydantic = ">=1.10,<3"
|
||||
|
||||
[[package]]
|
||||
@@ -840,6 +854,17 @@ doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linke
|
||||
perf = ["ipython"]
|
||||
test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "iniconfig"
|
||||
version = "2.0.0"
|
||||
description = "brain-dead simple config-ini parsing"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
|
||||
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "multidict"
|
||||
version = "6.1.0"
|
||||
@@ -970,22 +995,37 @@ files = [
|
||||
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pluggy"
|
||||
version = "1.5.0"
|
||||
description = "plugin and hook calling mechanisms for python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
|
||||
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["pre-commit", "tox"]
|
||||
testing = ["pytest", "pytest-benchmark"]
|
||||
|
||||
[[package]]
|
||||
name = "postgrest"
|
||||
version = "0.16.11"
|
||||
version = "0.18.0"
|
||||
description = "PostgREST client for Python. This library provides an ORM interface to PostgREST."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "postgrest-0.16.11-py3-none-any.whl", hash = "sha256:22fb6b817ace1f68aa648fd4ce0f56d2786c9260fa4ed2cb9046191231a682b8"},
|
||||
{file = "postgrest-0.16.11.tar.gz", hash = "sha256:10af51b4c39e288ad7df2db92d6a61fb3c4683131b40561f473e3de116e83fa5"},
|
||||
{file = "postgrest-0.18.0-py3-none-any.whl", hash = "sha256:200baad0d23fee986b3a0ffd3e07bfe0cdd40e09760f11e8e13a6c0c2376d5fa"},
|
||||
{file = "postgrest-0.18.0.tar.gz", hash = "sha256:29c1a94801a17eb9ad590189993fe5a7a6d8c1bfc11a3c9d0ce7ba146454ebb3"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
deprecation = ">=2.1.0,<3.0.0"
|
||||
httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
pydantic = ">=1.9,<3.0"
|
||||
strenum = ">=0.4.9,<0.5.0"
|
||||
strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""}
|
||||
|
||||
[[package]]
|
||||
name = "proto-plus"
|
||||
@@ -1031,6 +1071,7 @@ description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"},
|
||||
{file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"},
|
||||
]
|
||||
|
||||
@@ -1041,6 +1082,7 @@ description = "A collection of ASN.1-based protocols modules"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"},
|
||||
{file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"},
|
||||
]
|
||||
|
||||
@@ -1049,22 +1091,19 @@ pyasn1 = ">=0.4.6,<0.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.9.1"
|
||||
version = "2.10.2"
|
||||
description = "Data validation using Python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"},
|
||||
{file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"},
|
||||
{file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"},
|
||||
{file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
annotated-types = ">=0.6.0"
|
||||
pydantic-core = "2.23.3"
|
||||
typing-extensions = [
|
||||
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
|
||||
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
|
||||
]
|
||||
pydantic-core = "2.27.1"
|
||||
typing-extensions = ">=4.12.2"
|
||||
|
||||
[package.extras]
|
||||
email = ["email-validator (>=2.0.0)"]
|
||||
@@ -1072,100 +1111,111 @@ timezone = ["tzdata"]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-core"
|
||||
version = "2.23.3"
|
||||
version = "2.27.1"
|
||||
description = "Core functionality for Pydantic validation and serialization"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"},
|
||||
{file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"},
|
||||
{file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"},
|
||||
{file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"},
|
||||
{file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"},
|
||||
{file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"},
|
||||
{file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"},
|
||||
{file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"},
|
||||
{file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"},
|
||||
{file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"},
|
||||
{file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"},
|
||||
{file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"},
|
||||
{file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"},
|
||||
{file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"},
|
||||
{file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"},
|
||||
{file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"},
|
||||
{file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"},
|
||||
{file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"},
|
||||
{file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"},
|
||||
{file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"},
|
||||
{file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"},
|
||||
{file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"},
|
||||
{file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"},
|
||||
{file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"},
|
||||
{file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"},
|
||||
{file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"},
|
||||
{file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"},
|
||||
{file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"},
|
||||
{file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"},
|
||||
{file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"},
|
||||
{file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"},
|
||||
{file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"},
|
||||
{file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"},
|
||||
{file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"},
|
||||
{file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"},
|
||||
{file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"},
|
||||
{file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"},
|
||||
{file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"},
|
||||
{file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"},
|
||||
{file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"},
|
||||
{file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"},
|
||||
{file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"},
|
||||
{file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"},
|
||||
{file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"},
|
||||
{file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"},
|
||||
{file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1173,13 +1223,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-settings"
|
||||
version = "2.5.2"
|
||||
version = "2.6.1"
|
||||
description = "Settings management using Pydantic"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pydantic_settings-2.5.2-py3-none-any.whl", hash = "sha256:2c912e55fd5794a59bf8c832b9de832dcfdf4778d79ff79b708744eed499a907"},
|
||||
{file = "pydantic_settings-2.5.2.tar.gz", hash = "sha256:f90b139682bee4d2065273d5185d71d37ea46cfe57e1b5ae184fc6a0b2484ca0"},
|
||||
{file = "pydantic_settings-2.6.1-py3-none-any.whl", hash = "sha256:7fb0637c786a558d3103436278a7c4f1cfd29ba8973238a50c5bb9a55387da87"},
|
||||
{file = "pydantic_settings-2.6.1.tar.gz", hash = "sha256:e0f92546d8a9923cb8941689abf85d6601a8c19a23e97a34b2964a2e3f813ca0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1193,13 +1243,13 @@ yaml = ["pyyaml (>=6.0.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pyjwt"
|
||||
version = "2.9.0"
|
||||
version = "2.10.0"
|
||||
description = "JSON Web Token implementation in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
python-versions = ">=3.9"
|
||||
files = [
|
||||
{file = "PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850"},
|
||||
{file = "pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c"},
|
||||
{file = "PyJWT-2.10.0-py3-none-any.whl", hash = "sha256:543b77207db656de204372350926bed5a86201c4cbff159f623f79c7bb487a15"},
|
||||
{file = "pyjwt-2.10.0.tar.gz", hash = "sha256:7628a7eb7938959ac1b26e819a1df0fd3259505627b575e4bad6d08f76db695c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -1208,6 +1258,63 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte
|
||||
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
|
||||
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "8.3.3"
|
||||
description = "pytest: simple powerful testing with Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"},
|
||||
{file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
|
||||
iniconfig = "*"
|
||||
packaging = "*"
|
||||
pluggy = ">=1.5,<2"
|
||||
tomli = {version = ">=1", markers = "python_version < \"3.11\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-asyncio"
|
||||
version = "0.24.0"
|
||||
description = "Pytest support for asyncio"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b"},
|
||||
{file = "pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pytest = ">=8.2,<9"
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
|
||||
testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-mock"
|
||||
version = "3.14.0"
|
||||
description = "Thin-wrapper around the mock package for easier use with pytest"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
|
||||
{file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
pytest = ">=6.2.5"
|
||||
|
||||
[package.extras]
|
||||
dev = ["pre-commit", "pytest-asyncio", "tox"]
|
||||
|
||||
[[package]]
|
||||
name = "python-dateutil"
|
||||
version = "2.9.0.post0"
|
||||
@@ -1253,6 +1360,24 @@ python-dateutil = ">=2.8.1,<3.0.0"
|
||||
typing-extensions = ">=4.12.2,<5.0.0"
|
||||
websockets = ">=11,<13"
|
||||
|
||||
[[package]]
|
||||
name = "redis"
|
||||
version = "5.2.0"
|
||||
description = "Python client for Redis database and key-value store"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "redis-5.2.0-py3-none-any.whl", hash = "sha256:ae174f2bb3b1bf2b09d54bf3e51fbc1469cf6c10aa03e21141f51969801a7897"},
|
||||
{file = "redis-5.2.0.tar.gz", hash = "sha256:0b1087665a771b1ff2e003aa5bdd354f15a70c9e25d5a7dbf9c722c16528a7b0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""}
|
||||
|
||||
[package.extras]
|
||||
hiredis = ["hiredis (>=3.0.0)"]
|
||||
ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==23.2.1)", "requests (>=2.31.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.3"
|
||||
@@ -1288,6 +1413,33 @@ files = [
|
||||
[package.dependencies]
|
||||
pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.8.0"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.8.0-py3-none-linux_armv6l.whl", hash = "sha256:fcb1bf2cc6706adae9d79c8d86478677e3bbd4ced796ccad106fd4776d395fea"},
|
||||
{file = "ruff-0.8.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:295bb4c02d58ff2ef4378a1870c20af30723013f441c9d1637a008baaf928c8b"},
|
||||
{file = "ruff-0.8.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7b1f1c76b47c18fa92ee78b60d2d20d7e866c55ee603e7d19c1e991fad933a9a"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb0d4f250a7711b67ad513fde67e8870109e5ce590a801c3722580fe98c33a99"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e55cce9aa93c5d0d4e3937e47b169035c7e91c8655b0974e61bb79cf398d49c"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f4cd64916d8e732ce6b87f3f5296a8942d285bbbc161acee7fe561134af64f9"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c5c1466be2a2ebdf7c5450dd5d980cc87c8ba6976fb82582fea18823da6fa362"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2dabfd05b96b7b8f2da00d53c514eea842bff83e41e1cceb08ae1966254a51df"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:facebdfe5a5af6b1588a1d26d170635ead6892d0e314477e80256ef4a8470cf3"},
|
||||
{file = "ruff-0.8.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87a8e86bae0dbd749c815211ca11e3a7bd559b9710746c559ed63106d382bd9c"},
|
||||
{file = "ruff-0.8.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:85e654f0ded7befe2d61eeaf3d3b1e4ef3894469cd664ffa85006c7720f1e4a2"},
|
||||
{file = "ruff-0.8.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:83a55679c4cb449fa527b8497cadf54f076603cc36779b2170b24f704171ce70"},
|
||||
{file = "ruff-0.8.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:812e2052121634cf13cd6fddf0c1871d0ead1aad40a1a258753c04c18bb71bbd"},
|
||||
{file = "ruff-0.8.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:780d5d8523c04202184405e60c98d7595bdb498c3c6abba3b6d4cdf2ca2af426"},
|
||||
{file = "ruff-0.8.0-py3-none-win32.whl", hash = "sha256:5fdb6efecc3eb60bba5819679466471fd7d13c53487df7248d6e27146e985468"},
|
||||
{file = "ruff-0.8.0-py3-none-win_amd64.whl", hash = "sha256:582891c57b96228d146725975fbb942e1f30a0c4ba19722e692ca3eb25cc9b4f"},
|
||||
{file = "ruff-0.8.0-py3-none-win_arm64.whl", hash = "sha256:ba93e6294e9a737cd726b74b09a6972e36bb511f9a102f1d9a7e1ce94dd206a6"},
|
||||
{file = "ruff-0.8.0.tar.gz", hash = "sha256:a7ccfe6331bf8c8dad715753e157457faf7351c2b69f62f32c165c2dbcbacd44"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "six"
|
||||
version = "1.16.0"
|
||||
@@ -1312,19 +1464,18 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "storage3"
|
||||
version = "0.7.7"
|
||||
version = "0.9.0"
|
||||
description = "Supabase Storage client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "storage3-0.7.7-py3-none-any.whl", hash = "sha256:ed80a2546cd0b5c22e2c30ea71096db6c99268daf2958c603488e7d72efb8426"},
|
||||
{file = "storage3-0.7.7.tar.gz", hash = "sha256:9fba680cf761d139ad764f43f0e91c245d1ce1af2cc3afe716652f835f48f83e"},
|
||||
{file = "storage3-0.9.0-py3-none-any.whl", hash = "sha256:8b2fb91f0c61583a2f4eac74a8bae67e00d41ff38095c8a6cd3f2ce5e0ab76e7"},
|
||||
{file = "storage3-0.9.0.tar.gz", hash = "sha256:e16697f60894c94e1d9df0d2e4af783c1b3f7dd08c9013d61978825c624188c4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
python-dateutil = ">=2.8.2,<3.0.0"
|
||||
typing-extensions = ">=4.2.0,<5.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "strenum"
|
||||
@@ -1344,36 +1495,47 @@ test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"]
|
||||
|
||||
[[package]]
|
||||
name = "supabase"
|
||||
version = "2.7.4"
|
||||
version = "2.10.0"
|
||||
description = "Supabase client for Python."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "supabase-2.7.4-py3-none-any.whl", hash = "sha256:01815fbc30cac753933d4a44a2529fd13cb7634b56c705c65b12a02c8e75982b"},
|
||||
{file = "supabase-2.7.4.tar.gz", hash = "sha256:5a979c7711b3c5ce688514fa0afc015780522569494e1a9a9d25d03b7c3d654b"},
|
||||
{file = "supabase-2.10.0-py3-none-any.whl", hash = "sha256:183fb23c04528593f8f81c24ceb8178f3a56bff40fec7ed873b6c55ebc2e420a"},
|
||||
{file = "supabase-2.10.0.tar.gz", hash = "sha256:9ac095f8947bf60780e67c0edcbab53e2db3f6f3f022329397b093500bf2607c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
gotrue = ">=1.3,<3.0"
|
||||
httpx = ">=0.24,<0.28"
|
||||
postgrest = ">=0.14,<0.17.0"
|
||||
gotrue = ">=2.10.0,<3.0.0"
|
||||
httpx = ">=0.26,<0.28"
|
||||
postgrest = ">=0.18,<0.19"
|
||||
realtime = ">=2.0.0,<3.0.0"
|
||||
storage3 = ">=0.5.3,<0.8.0"
|
||||
supafunc = ">=0.3.1,<0.6.0"
|
||||
storage3 = ">=0.9.0,<0.10.0"
|
||||
supafunc = ">=0.7.0,<0.8.0"
|
||||
|
||||
[[package]]
|
||||
name = "supafunc"
|
||||
version = "0.5.1"
|
||||
version = "0.7.0"
|
||||
description = "Library for Supabase Functions"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8"
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "supafunc-0.5.1-py3-none-any.whl", hash = "sha256:b05e99a2b41270211a3f90ec843c04c5f27a5618f2d2d2eb8e07f41eb962a910"},
|
||||
{file = "supafunc-0.5.1.tar.gz", hash = "sha256:1ae9dce6bd935939c561650e86abb676af9665ecf5d4ffc1c7ec3c4932c84334"},
|
||||
{file = "supafunc-0.7.0-py3-none-any.whl", hash = "sha256:4160260dc02bdd906be1e2ffd7cb3ae8b74ae437c892bb475352b6a99d9ff8eb"},
|
||||
{file = "supafunc-0.7.0.tar.gz", hash = "sha256:5b1c415fba1395740b2b4eedd1d786384bd58b98f6333a11ba7889820a48b6a7"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = {version = ">=0.24,<0.28", extras = ["http2"]}
|
||||
httpx = {version = ">=0.26,<0.28", extras = ["http2"]}
|
||||
|
||||
[[package]]
|
||||
name = "tomli"
|
||||
version = "2.1.0"
|
||||
description = "A lil' TOML parser"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"},
|
||||
{file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
@@ -1690,4 +1852,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "e9b6e5d877eeb9c9f1ebc69dead1985d749facc160afbe61f3bf37e9a6e35aa5"
|
||||
content-hash = "54bf6e076ec4d09be2307f07240018459dd6594efdc55a2dc2dc1d673184587e"
|
||||
|
||||
@@ -8,14 +8,27 @@ packages = [{ include = "autogpt_libs" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
colorama = "^0.4.6"
|
||||
google-cloud-logging = "^3.8.0"
|
||||
pydantic = "^2.8.2"
|
||||
pydantic-settings = "^2.5.2"
|
||||
pyjwt = "^2.8.0"
|
||||
expiringdict = "^1.2.2"
|
||||
google-cloud-logging = "^3.11.3"
|
||||
pydantic = "^2.10.2"
|
||||
pydantic-settings = "^2.6.1"
|
||||
pyjwt = "^2.10.0"
|
||||
pytest-asyncio = "^0.24.0"
|
||||
pytest-mock = "^3.14.0"
|
||||
python = ">=3.10,<4.0"
|
||||
python-dotenv = "^1.0.1"
|
||||
supabase = "^2.7.2"
|
||||
supabase = "^2.10.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
redis = "^5.2.0"
|
||||
ruff = "^0.8.0"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
[tool.ruff.lint]
|
||||
extend-select = ["I"] # sort dependencies
|
||||
|
||||
@@ -7,24 +7,37 @@ PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
|
||||
BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
|
||||
# generate using `from cryptography.fernet import Fernet;Fernet.generate_key().decode()`
|
||||
ENCRYPTION_KEY='dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw='
|
||||
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=password
|
||||
|
||||
ENABLE_CREDIT=false
|
||||
APP_ENV="local"
|
||||
# What environment things should be logged under: local dev or prod
|
||||
APP_ENV=local
|
||||
# What environment to behave as: "local" or "cloud"
|
||||
BEHAVE_AS=local
|
||||
PYRO_HOST=localhost
|
||||
SENTRY_DSN=
|
||||
|
||||
## User auth with Supabase is required for any of the 3rd party integrations with auth to work.
|
||||
ENABLE_AUTH=false
|
||||
ENABLE_AUTH=true
|
||||
SUPABASE_URL=http://localhost:8000
|
||||
SUPABASE_SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
|
||||
# For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow for integrations to work.
|
||||
## For local development, you may need to set FRONTEND_BASE_URL for the OAuth flow
|
||||
## for integrations to work. Defaults to the value of PLATFORM_BASE_URL if not set.
|
||||
# FRONTEND_BASE_URL=http://localhost:3000
|
||||
|
||||
## PLATFORM_BASE_URL must be set to a *publicly accessible* URL pointing to your backend
|
||||
## to use the platform's webhook-related functionality.
|
||||
## If you are developing locally, you can use something like ngrok to get a publc URL
|
||||
## and tunnel it to your locally running backend.
|
||||
PLATFORM_BASE_URL=https://your-public-url-here
|
||||
|
||||
## == INTEGRATION CREDENTIALS == ##
|
||||
# Each set of server side credentials is required for the corresponding 3rd party
|
||||
# integration to work.
|
||||
@@ -36,12 +49,22 @@ SUPABASE_JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
GITHUB_CLIENT_ID=
|
||||
GITHUB_CLIENT_SECRET=
|
||||
|
||||
# Google OAuth App server credentials - https://console.cloud.google.com/apis/credentials, and enable gmail api and set scopes
|
||||
# https://console.cloud.google.com/apis/credentials/consent ?project=<your_project_id>
|
||||
|
||||
# You'll need to add/enable the following scopes (minimum):
|
||||
# https://console.developers.google.com/apis/api/gmail.googleapis.com/overview ?project=<your_project_id>
|
||||
# https://console.cloud.google.com/apis/library/sheets.googleapis.com/ ?project=<your_project_id>
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
|
||||
## ===== OPTIONAL API KEYS ===== ##
|
||||
|
||||
# LLM
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
OPEN_ROUTER_API_KEY=
|
||||
|
||||
# Reddit
|
||||
REDDIT_CLIENT_ID=
|
||||
@@ -80,6 +103,9 @@ GOOGLE_MAPS_API_KEY=
|
||||
# Replicate
|
||||
REPLICATE_API_KEY=
|
||||
|
||||
# Ideogram
|
||||
IDEOGRAM_API_KEY=
|
||||
|
||||
# Logging Configuration
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_CLOUD_LOGGING=false
|
||||
|
||||
5
autogpt_platform/backend/.gitignore
vendored
5
autogpt_platform/backend/.gitignore
vendored
@@ -5,4 +5,7 @@ dev.db-journal
|
||||
build/
|
||||
config.json
|
||||
secrets/*
|
||||
!secrets/.gitkeep
|
||||
!secrets/.gitkeep
|
||||
|
||||
*.ignore.*
|
||||
*.ign.*
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.11-slim-buster AS builder
|
||||
FROM python:3.11.10-slim-bookworm AS builder
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
@@ -8,7 +8,7 @@ WORKDIR /app
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev gettext libz-dev libssl-dev postgresql-client git \
|
||||
&& apt-get install -y build-essential curl ffmpeg wget libcurl4-gnutls-dev libexpat1-dev libpq5 gettext libz-dev libssl-dev postgresql-client git \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -35,7 +35,7 @@ COPY autogpt_platform/backend/schema.prisma ./
|
||||
RUN poetry config virtualenvs.create false \
|
||||
&& poetry run prisma generate
|
||||
|
||||
FROM python:3.11-slim-buster AS server_dependencies
|
||||
FROM python:3.11.10-slim-bookworm AS server_dependencies
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
|
||||
5. Generate the Prisma client
|
||||
|
||||
```sh
|
||||
poetry run prisma generate --schema postgres/schema.prisma
|
||||
poetry run prisma generate
|
||||
```
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
|
||||
|
||||
```sh
|
||||
cd ../backend
|
||||
prisma migrate dev --schema postgres/schema.prisma
|
||||
prisma migrate deploy
|
||||
```
|
||||
|
||||
## Running The Server
|
||||
|
||||
@@ -58,17 +58,18 @@ We use the Poetry to manage the dependencies. To set up the project, follow thes
|
||||
6. Migrate the database. Be careful because this deletes current data in the database.
|
||||
|
||||
```sh
|
||||
docker compose up db redis -d
|
||||
poetry run prisma migrate dev
|
||||
docker compose up db -d
|
||||
poetry run prisma migrate deploy
|
||||
```
|
||||
|
||||
## Running The Server
|
||||
|
||||
### Starting the server without Docker
|
||||
|
||||
Run the following command to build the dockerfiles:
|
||||
Run the following command to run database in docker but the application locally:
|
||||
|
||||
```sh
|
||||
docker compose --profile local up deps --build --detach
|
||||
poetry run app
|
||||
```
|
||||
|
||||
|
||||
@@ -24,10 +24,12 @@ def main(**kwargs):
|
||||
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
|
||||
"""
|
||||
|
||||
from backend.executor import ExecutionManager, ExecutionScheduler
|
||||
from backend.server import AgentServer, WebsocketServer
|
||||
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
|
||||
from backend.server.rest_api import AgentServer
|
||||
from backend.server.ws_api import WebsocketServer
|
||||
|
||||
run_processes(
|
||||
DatabaseManager(),
|
||||
ExecutionManager(),
|
||||
ExecutionScheduler(),
|
||||
WebsocketServer(),
|
||||
|
||||
@@ -2,6 +2,7 @@ import importlib
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Type, TypeVar
|
||||
|
||||
from backend.data.block import Block
|
||||
|
||||
@@ -24,28 +25,31 @@ for module in modules:
|
||||
AVAILABLE_MODULES.append(module)
|
||||
|
||||
# Load all Block instances from the available modules
|
||||
AVAILABLE_BLOCKS = {}
|
||||
AVAILABLE_BLOCKS: dict[str, Type[Block]] = {}
|
||||
|
||||
|
||||
def all_subclasses(clz):
|
||||
subclasses = clz.__subclasses__()
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
def all_subclasses(cls: Type[T]) -> list[Type[T]]:
|
||||
subclasses = cls.__subclasses__()
|
||||
for subclass in subclasses:
|
||||
subclasses += all_subclasses(subclass)
|
||||
return subclasses
|
||||
|
||||
|
||||
for cls in all_subclasses(Block):
|
||||
name = cls.__name__
|
||||
for block_cls in all_subclasses(Block):
|
||||
name = block_cls.__name__
|
||||
|
||||
if cls.__name__.endswith("Base"):
|
||||
if block_cls.__name__.endswith("Base"):
|
||||
continue
|
||||
|
||||
if not cls.__name__.endswith("Block"):
|
||||
if not block_cls.__name__.endswith("Block"):
|
||||
raise ValueError(
|
||||
f"Block class {cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
|
||||
f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
|
||||
)
|
||||
|
||||
block = cls()
|
||||
block = block_cls.create()
|
||||
|
||||
if not isinstance(block.id, str) or len(block.id) != 36:
|
||||
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
|
||||
@@ -53,15 +57,26 @@ for cls in all_subclasses(Block):
|
||||
if block.id in AVAILABLE_BLOCKS:
|
||||
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
|
||||
|
||||
# Prevent duplicate field name in input_schema and output_schema
|
||||
duplicate_field_names = set(block.input_schema.model_fields.keys()) & set(
|
||||
block.output_schema.model_fields.keys()
|
||||
)
|
||||
if duplicate_field_names:
|
||||
input_schema = block.input_schema.model_fields
|
||||
output_schema = block.output_schema.model_fields
|
||||
|
||||
# Make sure `error` field is a string in the output schema
|
||||
if "error" in output_schema and output_schema["error"].annotation is not str:
|
||||
raise ValueError(
|
||||
f"{block.name} has duplicate field names in input_schema and output_schema: {duplicate_field_names}"
|
||||
f"{block.name} `error` field in output_schema must be a string"
|
||||
)
|
||||
|
||||
# Make sure all fields in input_schema and output_schema are annotated and has a value
|
||||
for field_name, field in [*input_schema.items(), *output_schema.items()]:
|
||||
if field.annotation is None:
|
||||
raise ValueError(
|
||||
f"{block.name} has a field {field_name} that is not annotated"
|
||||
)
|
||||
if field.json_schema_extra is None:
|
||||
raise ValueError(
|
||||
f"{block.name} has a field {field_name} not defined as SchemaField"
|
||||
)
|
||||
|
||||
for field in block.input_schema.model_fields.values():
|
||||
if field.annotation is bool and field.default not in (True, False):
|
||||
raise ValueError(f"{block.name} has a boolean field with no default value")
|
||||
@@ -69,6 +84,6 @@ for cls in all_subclasses(Block):
|
||||
if block.disabled:
|
||||
continue
|
||||
|
||||
AVAILABLE_BLOCKS[block.id] = block
|
||||
AVAILABLE_BLOCKS[block.id] = block_cls
|
||||
|
||||
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]
|
||||
|
||||
100
autogpt_platform/backend/backend/blocks/agent.py
Normal file
100
autogpt_platform/backend/backend/blocks/agent.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import logging
|
||||
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockInput,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
BlockType,
|
||||
get_block,
|
||||
)
|
||||
from backend.data.execution import ExecutionStatus
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_executor_manager_client():
|
||||
from backend.executor import ExecutionManager
|
||||
from backend.util.service import get_service_client
|
||||
|
||||
return get_service_client(ExecutionManager)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_event_bus():
|
||||
from backend.data.execution import RedisExecutionEventBus
|
||||
|
||||
return RedisExecutionEventBus()
|
||||
|
||||
|
||||
class AgentExecutorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
user_id: str = SchemaField(description="User ID")
|
||||
graph_id: str = SchemaField(description="Graph ID")
|
||||
graph_version: int = SchemaField(description="Graph Version")
|
||||
|
||||
data: BlockInput = SchemaField(description="Input data for the graph")
|
||||
input_schema: dict = SchemaField(description="Input schema for the graph")
|
||||
output_schema: dict = SchemaField(description="Output schema for the graph")
|
||||
|
||||
class Output(BlockSchema):
|
||||
pass
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="e189baac-8c20-45a1-94a7-55177ea42565",
|
||||
description="Executes an existing agent inside your agent",
|
||||
input_schema=AgentExecutorBlock.Input,
|
||||
output_schema=AgentExecutorBlock.Output,
|
||||
block_type=BlockType.AGENT,
|
||||
categories={BlockCategory.AGENT},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
executor_manager = get_executor_manager_client()
|
||||
event_bus = get_event_bus()
|
||||
|
||||
graph_exec = executor_manager.add_execution(
|
||||
graph_id=input_data.graph_id,
|
||||
graph_version=input_data.graph_version,
|
||||
user_id=input_data.user_id,
|
||||
data=input_data.data,
|
||||
)
|
||||
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
|
||||
logger.info(f"Starting execution of {log_id}")
|
||||
|
||||
for event in event_bus.listen(
|
||||
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
|
||||
):
|
||||
logger.info(
|
||||
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
|
||||
)
|
||||
|
||||
if not event.node_id:
|
||||
if event.status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
|
||||
logger.info(f"Execution {log_id} ended with status {event.status}")
|
||||
break
|
||||
else:
|
||||
continue
|
||||
|
||||
if not event.block_id:
|
||||
logger.warning(f"{log_id} received event without block_id {event}")
|
||||
continue
|
||||
|
||||
block = get_block(event.block_id)
|
||||
if not block or block.block_type != BlockType.OUTPUT:
|
||||
continue
|
||||
|
||||
output_name = event.input_data.get("name")
|
||||
if not output_name:
|
||||
logger.warning(f"{log_id} produced an output with no name {event}")
|
||||
continue
|
||||
|
||||
for output_data in event.output_data.get("output", []):
|
||||
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
|
||||
yield output_name, output_data
|
||||
@@ -0,0 +1,326 @@
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from pydantic import SecretStr
|
||||
from replicate.helpers import FileOutput
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
|
||||
class ImageSize(str, Enum):
|
||||
"""
|
||||
Semantic sizes that map reliably across all models
|
||||
"""
|
||||
|
||||
SQUARE = "square" # For profile pictures, icons, etc.
|
||||
LANDSCAPE = "landscape" # For traditional photos, scenes
|
||||
PORTRAIT = "portrait" # For vertical photos, portraits
|
||||
WIDE = "wide" # For cinematic, desktop wallpapers
|
||||
TALL = "tall" # For mobile wallpapers, stories
|
||||
|
||||
|
||||
# Mapping semantic sizes to model-specific formats
|
||||
SIZE_TO_SD_RATIO = {
|
||||
ImageSize.SQUARE: "1:1",
|
||||
ImageSize.LANDSCAPE: "4:3",
|
||||
ImageSize.PORTRAIT: "3:4",
|
||||
ImageSize.WIDE: "16:9",
|
||||
ImageSize.TALL: "9:16",
|
||||
}
|
||||
|
||||
SIZE_TO_FLUX_RATIO = {
|
||||
ImageSize.SQUARE: "1:1",
|
||||
ImageSize.LANDSCAPE: "4:3",
|
||||
ImageSize.PORTRAIT: "3:4",
|
||||
ImageSize.WIDE: "16:9",
|
||||
ImageSize.TALL: "9:16",
|
||||
}
|
||||
|
||||
SIZE_TO_FLUX_DIMENSIONS = {
|
||||
ImageSize.SQUARE: (1024, 1024),
|
||||
ImageSize.LANDSCAPE: (1365, 1024),
|
||||
ImageSize.PORTRAIT: (1024, 1365),
|
||||
ImageSize.WIDE: (1440, 810), # Adjusted to maintain 16:9 within 1440 limit
|
||||
ImageSize.TALL: (810, 1440), # Adjusted to maintain 9:16 within 1440 limit
|
||||
}
|
||||
|
||||
SIZE_TO_RECRAFT_DIMENSIONS = {
|
||||
ImageSize.SQUARE: "1024x1024",
|
||||
ImageSize.LANDSCAPE: "1365x1024",
|
||||
ImageSize.PORTRAIT: "1024x1365",
|
||||
ImageSize.WIDE: "1536x1024",
|
||||
ImageSize.TALL: "1024x1536",
|
||||
}
|
||||
|
||||
|
||||
class ImageStyle(str, Enum):
|
||||
"""
|
||||
Complete set of supported styles
|
||||
"""
|
||||
|
||||
ANY = "any"
|
||||
# Realistic image styles
|
||||
REALISTIC = "realistic_image"
|
||||
REALISTIC_BW = "realistic_image/b_and_w"
|
||||
REALISTIC_HDR = "realistic_image/hdr"
|
||||
REALISTIC_NATURAL = "realistic_image/natural_light"
|
||||
REALISTIC_STUDIO = "realistic_image/studio_portrait"
|
||||
REALISTIC_ENTERPRISE = "realistic_image/enterprise"
|
||||
REALISTIC_HARD_FLASH = "realistic_image/hard_flash"
|
||||
REALISTIC_MOTION_BLUR = "realistic_image/motion_blur"
|
||||
# Digital illustration styles
|
||||
DIGITAL_ART = "digital_illustration"
|
||||
PIXEL_ART = "digital_illustration/pixel_art"
|
||||
HAND_DRAWN = "digital_illustration/hand_drawn"
|
||||
GRAIN = "digital_illustration/grain"
|
||||
SKETCH = "digital_illustration/infantile_sketch"
|
||||
POSTER = "digital_illustration/2d_art_poster"
|
||||
POSTER_2 = "digital_illustration/2d_art_poster_2"
|
||||
HANDMADE_3D = "digital_illustration/handmade_3d"
|
||||
HAND_DRAWN_OUTLINE = "digital_illustration/hand_drawn_outline"
|
||||
ENGRAVING_COLOR = "digital_illustration/engraving_color"
|
||||
|
||||
|
||||
class ImageGenModel(str, Enum):
|
||||
"""
|
||||
Available model providers
|
||||
"""
|
||||
|
||||
FLUX = "Flux 1.1 Pro"
|
||||
FLUX_ULTRA = "Flux 1.1 Pro Ultra"
|
||||
RECRAFT = "Recraft v3"
|
||||
SD3_5 = "Stable Diffusion 3.5 Medium"
|
||||
|
||||
|
||||
class AIImageGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[Literal["replicate"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="replicate",
|
||||
supported_credential_types={"api_key"},
|
||||
description="Enter your Replicate API key to access the image generation API. You can obtain an API key from https://replicate.com/account/api-tokens.",
|
||||
)
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="Text prompt for image generation",
|
||||
placeholder="e.g., 'A red panda using a laptop in a snowy forest'",
|
||||
title="Prompt",
|
||||
)
|
||||
model: ImageGenModel = SchemaField(
|
||||
description="The AI model to use for image generation",
|
||||
default=ImageGenModel.SD3_5,
|
||||
title="Model",
|
||||
)
|
||||
size: ImageSize = SchemaField(
|
||||
description=(
|
||||
"Format of the generated image:\n"
|
||||
"- Square: Perfect for profile pictures, icons\n"
|
||||
"- Landscape: Traditional photo format\n"
|
||||
"- Portrait: Vertical photos, portraits\n"
|
||||
"- Wide: Cinematic format, desktop wallpapers\n"
|
||||
"- Tall: Mobile wallpapers, social media stories"
|
||||
),
|
||||
default=ImageSize.SQUARE,
|
||||
title="Image Format",
|
||||
)
|
||||
style: ImageStyle = SchemaField(
|
||||
description="Visual style for the generated image",
|
||||
default=ImageStyle.ANY,
|
||||
title="Image Style",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
image_url: str = SchemaField(description="URL of the generated image")
|
||||
error: str = SchemaField(description="Error message if generation failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ed1ae7a0-b770-4089-b520-1f0005fad19a",
|
||||
description="Generate images using various AI models through a unified interface",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIImageGeneratorBlock.Input,
|
||||
output_schema=AIImageGeneratorBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"prompt": "An octopus using a laptop in a snowy forest with 'AutoGPT' clearly visible on the screen",
|
||||
"model": ImageGenModel.RECRAFT,
|
||||
"size": ImageSize.SQUARE,
|
||||
"style": ImageStyle.REALISTIC,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"image_url",
|
||||
"https://replicate.delivery/generated-image.webp",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_run_client": lambda *args, **kwargs: "https://replicate.delivery/generated-image.webp"
|
||||
},
|
||||
)
|
||||
|
||||
def _run_client(
|
||||
self, credentials: APIKeyCredentials, model_name: str, input_params: dict
|
||||
):
|
||||
try:
|
||||
# Initialize Replicate client
|
||||
client = replicate.Client(api_token=credentials.api_key.get_secret_value())
|
||||
|
||||
# Run the model with input parameters
|
||||
output = client.run(model_name, input=input_params, wait=False)
|
||||
|
||||
# Process output
|
||||
if isinstance(output, list) and len(output) > 0:
|
||||
if isinstance(output[0], FileOutput):
|
||||
result_url = output[0].url
|
||||
else:
|
||||
result_url = output[0]
|
||||
elif isinstance(output, FileOutput):
|
||||
result_url = output.url
|
||||
elif isinstance(output, str):
|
||||
result_url = output
|
||||
else:
|
||||
result_url = None
|
||||
|
||||
return result_url
|
||||
|
||||
except TypeError as e:
|
||||
raise TypeError(f"Error during model execution: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Unexpected error during model execution: {e}")
|
||||
|
||||
def generate_image(self, input_data: Input, credentials: APIKeyCredentials):
|
||||
try:
|
||||
# Handle style-based prompt modification for models without native style support
|
||||
modified_prompt = input_data.prompt
|
||||
if input_data.model not in [ImageGenModel.RECRAFT]:
|
||||
style_prefix = self._style_to_prompt_prefix(input_data.style)
|
||||
modified_prompt = f"{style_prefix} {modified_prompt}".strip()
|
||||
|
||||
if input_data.model == ImageGenModel.SD3_5:
|
||||
# Use Stable Diffusion 3.5 with aspect ratio
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"aspect_ratio": SIZE_TO_SD_RATIO[input_data.size],
|
||||
"output_format": "webp",
|
||||
"output_quality": 90,
|
||||
"steps": 40,
|
||||
"cfg_scale": 7.0,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials,
|
||||
"stability-ai/stable-diffusion-3.5-medium",
|
||||
input_params,
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.FLUX:
|
||||
# Use Flux-specific dimensions with 'jpg' format to avoid ReplicateError
|
||||
width, height = SIZE_TO_FLUX_DIMENSIONS[input_data.size]
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"aspect_ratio": SIZE_TO_FLUX_RATIO[input_data.size],
|
||||
"output_format": "jpg", # Set to jpg for Flux models
|
||||
"output_quality": 90,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials, "black-forest-labs/flux-1.1-pro", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.FLUX_ULTRA:
|
||||
width, height = SIZE_TO_FLUX_DIMENSIONS[input_data.size]
|
||||
input_params = {
|
||||
"prompt": modified_prompt,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"aspect_ratio": SIZE_TO_FLUX_RATIO[input_data.size],
|
||||
"output_format": "jpg",
|
||||
"output_quality": 90,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials, "black-forest-labs/flux-1.1-pro-ultra", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
elif input_data.model == ImageGenModel.RECRAFT:
|
||||
input_params = {
|
||||
"prompt": input_data.prompt,
|
||||
"size": SIZE_TO_RECRAFT_DIMENSIONS[input_data.size],
|
||||
"style": input_data.style.value,
|
||||
}
|
||||
output = self._run_client(
|
||||
credentials, "recraft-ai/recraft-v3", input_params
|
||||
)
|
||||
return output
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to generate image: {str(e)}")
|
||||
|
||||
def _style_to_prompt_prefix(self, style: ImageStyle) -> str:
|
||||
"""
|
||||
Convert a style enum to a prompt prefix for models without native style support.
|
||||
"""
|
||||
if style == ImageStyle.ANY:
|
||||
return ""
|
||||
|
||||
style_map = {
|
||||
ImageStyle.REALISTIC: "photorealistic",
|
||||
ImageStyle.REALISTIC_BW: "black and white photograph",
|
||||
ImageStyle.REALISTIC_HDR: "HDR photograph",
|
||||
ImageStyle.REALISTIC_NATURAL: "natural light photograph",
|
||||
ImageStyle.REALISTIC_STUDIO: "studio portrait photograph",
|
||||
ImageStyle.REALISTIC_ENTERPRISE: "enterprise photograph",
|
||||
ImageStyle.REALISTIC_HARD_FLASH: "hard flash photograph",
|
||||
ImageStyle.REALISTIC_MOTION_BLUR: "motion blur photograph",
|
||||
ImageStyle.DIGITAL_ART: "digital art",
|
||||
ImageStyle.PIXEL_ART: "pixel art",
|
||||
ImageStyle.HAND_DRAWN: "hand drawn illustration",
|
||||
ImageStyle.GRAIN: "grainy digital illustration",
|
||||
ImageStyle.SKETCH: "sketchy illustration",
|
||||
ImageStyle.POSTER: "2D art poster",
|
||||
ImageStyle.POSTER_2: "alternate 2D art poster",
|
||||
ImageStyle.HANDMADE_3D: "handmade 3D illustration",
|
||||
ImageStyle.HAND_DRAWN_OUTLINE: "hand drawn outline illustration",
|
||||
ImageStyle.ENGRAVING_COLOR: "color engraving illustration",
|
||||
}
|
||||
|
||||
style_text = style_map.get(style, "")
|
||||
return f"{style_text} of" if style_text else ""
|
||||
|
||||
def run(self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs):
|
||||
try:
|
||||
url = self.generate_image(input_data, credentials)
|
||||
if url:
|
||||
yield "image_url", url
|
||||
else:
|
||||
yield "error", "Image generation returned an empty result."
|
||||
except Exception as e:
|
||||
# Capture and return only the message of the exception, avoiding serialization of non-serializable objects
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
# Test credentials stay the same
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="replicate",
|
||||
api_key=SecretStr("mock-replicate-api-key"),
|
||||
title="Mock Replicate API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
228
autogpt_platform/backend/backend/blocks/ai_music_generator.py
Normal file
228
autogpt_platform/backend/backend/blocks/ai_music_generator.py
Normal file
@@ -0,0 +1,228 @@
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="replicate",
|
||||
api_key=SecretStr("mock-replicate-api-key"),
|
||||
title="Mock Replicate API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
# Model version enum
|
||||
class MusicGenModelVersion(str, Enum):
|
||||
STEREO_LARGE = "stereo-large"
|
||||
MELODY_LARGE = "melody-large"
|
||||
LARGE = "large"
|
||||
|
||||
|
||||
# Audio format enum
|
||||
class AudioFormat(str, Enum):
|
||||
WAV = "wav"
|
||||
MP3 = "mp3"
|
||||
|
||||
|
||||
# Normalization strategy enum
|
||||
class NormalizationStrategy(str, Enum):
|
||||
LOUDNESS = "loudness"
|
||||
CLIP = "clip"
|
||||
PEAK = "peak"
|
||||
RMS = "rms"
|
||||
|
||||
|
||||
class AIMusicGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: CredentialsMetaInput[Literal["replicate"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="replicate",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Replicate integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="A description of the music you want to generate",
|
||||
placeholder="e.g., 'An upbeat electronic dance track with heavy bass'",
|
||||
title="Prompt",
|
||||
)
|
||||
music_gen_model_version: MusicGenModelVersion = SchemaField(
|
||||
description="Model to use for generation",
|
||||
default=MusicGenModelVersion.STEREO_LARGE,
|
||||
title="Model Version",
|
||||
)
|
||||
duration: int = SchemaField(
|
||||
description="Duration of the generated audio in seconds",
|
||||
default=8,
|
||||
title="Duration",
|
||||
)
|
||||
temperature: float = SchemaField(
|
||||
description="Controls the 'conservativeness' of the sampling process. Higher temperature means more diversity",
|
||||
default=1.0,
|
||||
title="Temperature",
|
||||
)
|
||||
top_k: int = SchemaField(
|
||||
description="Reduces sampling to the k most likely tokens",
|
||||
default=250,
|
||||
title="Top K",
|
||||
)
|
||||
top_p: float = SchemaField(
|
||||
description="Reduces sampling to tokens with cumulative probability of p. When set to 0 (default), top_k sampling is used",
|
||||
default=0.0,
|
||||
title="Top P",
|
||||
)
|
||||
classifier_free_guidance: int = SchemaField(
|
||||
description="Increases the influence of inputs on the output. Higher values produce lower-variance outputs that adhere more closely to inputs",
|
||||
default=3,
|
||||
title="Classifier Free Guidance",
|
||||
)
|
||||
output_format: AudioFormat = SchemaField(
|
||||
description="Output format for generated audio",
|
||||
default=AudioFormat.WAV,
|
||||
title="Output Format",
|
||||
)
|
||||
normalization_strategy: NormalizationStrategy = SchemaField(
|
||||
description="Strategy for normalizing audio",
|
||||
default=NormalizationStrategy.LOUDNESS,
|
||||
title="Normalization Strategy",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: str = SchemaField(description="URL of the generated audio file")
|
||||
error: str = SchemaField(description="Error message if the model run failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="44f6c8ad-d75c-4ae1-8209-aad1c0326928",
|
||||
description="This block generates music using Meta's MusicGen model on Replicate.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=AIMusicGeneratorBlock.Input,
|
||||
output_schema=AIMusicGeneratorBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"prompt": "An upbeat electronic dance track with heavy bass",
|
||||
"music_gen_model_version": MusicGenModelVersion.STEREO_LARGE,
|
||||
"duration": 8,
|
||||
"temperature": 1.0,
|
||||
"top_k": 250,
|
||||
"top_p": 0.0,
|
||||
"classifier_free_guidance": 3,
|
||||
"output_format": AudioFormat.WAV,
|
||||
"normalization_strategy": NormalizationStrategy.LOUDNESS,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
"https://replicate.com/output/generated-audio-url.wav",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"run_model": lambda api_key, music_gen_model_version, prompt, duration, temperature, top_k, top_p, classifier_free_guidance, output_format, normalization_strategy: "https://replicate.com/output/generated-audio-url.wav",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
max_retries = 3
|
||||
retry_delay = 5 # seconds
|
||||
last_error = None
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
logger.debug(
|
||||
f"[AIMusicGeneratorBlock] - Running model (attempt {attempt + 1})"
|
||||
)
|
||||
result = self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
music_gen_model_version=input_data.music_gen_model_version,
|
||||
prompt=input_data.prompt,
|
||||
duration=input_data.duration,
|
||||
temperature=input_data.temperature,
|
||||
top_k=input_data.top_k,
|
||||
top_p=input_data.top_p,
|
||||
classifier_free_guidance=input_data.classifier_free_guidance,
|
||||
output_format=input_data.output_format,
|
||||
normalization_strategy=input_data.normalization_strategy,
|
||||
)
|
||||
if result and result != "No output received":
|
||||
yield "result", result
|
||||
return
|
||||
else:
|
||||
last_error = "Model returned empty or invalid response"
|
||||
raise ValueError(last_error)
|
||||
except Exception as e:
|
||||
last_error = f"Unexpected error: {str(e)}"
|
||||
logger.error(f"[AIMusicGeneratorBlock] - Error: {last_error}")
|
||||
if attempt < max_retries - 1:
|
||||
time.sleep(retry_delay)
|
||||
continue
|
||||
|
||||
# If we've exhausted all retries, yield the error
|
||||
yield "error", f"Failed after {max_retries} attempts. Last error: {last_error}"
|
||||
|
||||
def run_model(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
music_gen_model_version: MusicGenModelVersion,
|
||||
prompt: str,
|
||||
duration: int,
|
||||
temperature: float,
|
||||
top_k: int,
|
||||
top_p: float,
|
||||
classifier_free_guidance: int,
|
||||
output_format: AudioFormat,
|
||||
normalization_strategy: NormalizationStrategy,
|
||||
):
|
||||
# Initialize Replicate client with the API key
|
||||
client = replicate.Client(api_token=api_key.get_secret_value())
|
||||
|
||||
# Run the model with parameters
|
||||
output = client.run(
|
||||
"meta/musicgen:671ac645ce5e552cc63a54a2bbff63fcf798043055d2dac5fc9e36a837eedcfb",
|
||||
input={
|
||||
"prompt": prompt,
|
||||
"music_gen_model_version": music_gen_model_version,
|
||||
"duration": duration,
|
||||
"temperature": temperature,
|
||||
"top_k": top_k,
|
||||
"top_p": top_p,
|
||||
"classifier_free_guidance": classifier_free_guidance,
|
||||
"output_format": output_format,
|
||||
"normalization_strategy": normalization_strategy,
|
||||
},
|
||||
)
|
||||
|
||||
# Handle the output
|
||||
if isinstance(output, list) and len(output) > 0:
|
||||
result_url = output[0] # If output is a list, get the first element
|
||||
elif isinstance(output, str):
|
||||
result_url = output # If output is a string, use it directly
|
||||
else:
|
||||
result_url = (
|
||||
"No output received" # Fallback message if output is not as expected
|
||||
)
|
||||
|
||||
return result_url
|
||||
@@ -1,13 +1,32 @@
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
from typing import Literal
|
||||
|
||||
import requests
|
||||
from pydantic import Field
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="revid",
|
||||
api_key=SecretStr("mock-revid-api-key"),
|
||||
title="Mock Revid API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class AudioTrack(str, Enum):
|
||||
@@ -121,18 +140,25 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class AIShortformVideoCreatorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
api_key: BlockSecret = SecretField(
|
||||
key="revid_api_key",
|
||||
description="Your revid.ai API key",
|
||||
placeholder="Enter your revid.ai API key",
|
||||
credentials: CredentialsMetaInput[Literal["revid"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="revid",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The revid.ai integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
)
|
||||
script: str = SchemaField(
|
||||
description="""1. Use short and punctuated sentences\n\n2. Use linebreaks to create a new clip\n\n3. Text outside of brackets is spoken by the AI, and [text between brackets] will be used to guide the visual generation. For example, [close-up of a cat] will show a close-up of a cat.""",
|
||||
placeholder="[close-up of a cat] Meow!",
|
||||
)
|
||||
ratio: str = Field(description="Aspect ratio of the video", default="9 / 16")
|
||||
resolution: str = Field(description="Resolution of the video", default="720p")
|
||||
frame_rate: int = Field(description="Frame rate of the video", default=60)
|
||||
ratio: str = SchemaField(
|
||||
description="Aspect ratio of the video", default="9 / 16"
|
||||
)
|
||||
resolution: str = SchemaField(
|
||||
description="Resolution of the video", default="720p"
|
||||
)
|
||||
frame_rate: int = SchemaField(description="Frame rate of the video", default=60)
|
||||
generation_preset: GenerationPreset = SchemaField(
|
||||
description="Generation preset for visual style - only effects AI generated visuals",
|
||||
default=GenerationPreset.LEONARDO,
|
||||
@@ -155,8 +181,8 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
video_url: str = Field(description="The URL of the created video")
|
||||
error: Optional[str] = Field(description="Error message if the request failed")
|
||||
video_url: str = SchemaField(description="The URL of the created video")
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -166,7 +192,7 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
input_schema=AIShortformVideoCreatorBlock.Input,
|
||||
output_schema=AIShortformVideoCreatorBlock.Output,
|
||||
test_input={
|
||||
"api_key": "test_api_key",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"script": "[close-up of a cat] Meow!",
|
||||
"ratio": "9 / 16",
|
||||
"resolution": "720p",
|
||||
@@ -188,35 +214,37 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
"create_video": lambda api_key, payload: {"pid": "test_pid"},
|
||||
"wait_for_video": lambda api_key, pid, webhook_token, max_wait_time=1000: "https://example.com/video.mp4",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def create_webhook(self):
|
||||
url = "https://webhook.site/token"
|
||||
headers = {"Accept": "application/json", "Content-Type": "application/json"}
|
||||
response = requests.post(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
webhook_data = response.json()
|
||||
return webhook_data["uuid"], f"https://webhook.site/{webhook_data['uuid']}"
|
||||
|
||||
def create_video(self, api_key: str, payload: dict) -> dict:
|
||||
def create_video(self, api_key: SecretStr, payload: dict) -> dict:
|
||||
url = "https://www.revid.ai/api/public/v2/render"
|
||||
headers = {"key": api_key}
|
||||
headers = {"key": api_key.get_secret_value()}
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
logger.debug(
|
||||
f"API Response Status Code: {response.status_code}, Content: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def check_video_status(self, api_key: str, pid: str) -> dict:
|
||||
def check_video_status(self, api_key: SecretStr, pid: str) -> dict:
|
||||
url = f"https://www.revid.ai/api/public/v2/status?pid={pid}"
|
||||
headers = {"key": api_key}
|
||||
headers = {"key": api_key.get_secret_value()}
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def wait_for_video(
|
||||
self, api_key: str, pid: str, webhook_token: str, max_wait_time: int = 1000
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
pid: str,
|
||||
webhook_token: str,
|
||||
max_wait_time: int = 1000,
|
||||
) -> str:
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < max_wait_time:
|
||||
@@ -238,70 +266,59 @@ class AIShortformVideoCreatorBlock(Block):
|
||||
logger.error("Video creation timed out")
|
||||
raise TimeoutError("Video creation timed out")
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
# Create a new Webhook.site URL
|
||||
webhook_token, webhook_url = self.create_webhook()
|
||||
logger.debug(f"Webhook URL: {webhook_url}")
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Create a new Webhook.site URL
|
||||
webhook_token, webhook_url = self.create_webhook()
|
||||
logger.debug(f"Webhook URL: {webhook_url}")
|
||||
|
||||
audio_url = input_data.background_music.audio_url
|
||||
audio_url = input_data.background_music.audio_url
|
||||
|
||||
payload = {
|
||||
"frameRate": input_data.frame_rate,
|
||||
"resolution": input_data.resolution,
|
||||
"frameDurationMultiplier": 18,
|
||||
"webhook": webhook_url,
|
||||
"creationParams": {
|
||||
"mediaType": input_data.video_style,
|
||||
"captionPresetName": "Wrap 1",
|
||||
"selectedVoice": input_data.voice.voice_id,
|
||||
"hasEnhancedGeneration": True,
|
||||
"generationPreset": input_data.generation_preset.name,
|
||||
"selectedAudio": input_data.background_music,
|
||||
"origin": "/create",
|
||||
"inputText": input_data.script,
|
||||
"flowType": "text-to-video",
|
||||
"slug": "create-tiktok-video",
|
||||
"hasToGenerateVoice": True,
|
||||
"hasToTranscript": False,
|
||||
"hasToSearchMedia": True,
|
||||
"hasAvatar": False,
|
||||
"hasWebsiteRecorder": False,
|
||||
"hasTextSmallAtBottom": False,
|
||||
"ratio": input_data.ratio,
|
||||
"sourceType": "contentScraping",
|
||||
"selectedStoryStyle": {"value": "custom", "label": "Custom"},
|
||||
"hasToGenerateVideos": input_data.video_style
|
||||
!= VisualMediaType.STOCK_VIDEOS,
|
||||
"audioUrl": audio_url,
|
||||
},
|
||||
}
|
||||
payload = {
|
||||
"frameRate": input_data.frame_rate,
|
||||
"resolution": input_data.resolution,
|
||||
"frameDurationMultiplier": 18,
|
||||
"webhook": webhook_url,
|
||||
"creationParams": {
|
||||
"mediaType": input_data.video_style,
|
||||
"captionPresetName": "Wrap 1",
|
||||
"selectedVoice": input_data.voice.voice_id,
|
||||
"hasEnhancedGeneration": True,
|
||||
"generationPreset": input_data.generation_preset.name,
|
||||
"selectedAudio": input_data.background_music,
|
||||
"origin": "/create",
|
||||
"inputText": input_data.script,
|
||||
"flowType": "text-to-video",
|
||||
"slug": "create-tiktok-video",
|
||||
"hasToGenerateVoice": True,
|
||||
"hasToTranscript": False,
|
||||
"hasToSearchMedia": True,
|
||||
"hasAvatar": False,
|
||||
"hasWebsiteRecorder": False,
|
||||
"hasTextSmallAtBottom": False,
|
||||
"ratio": input_data.ratio,
|
||||
"sourceType": "contentScraping",
|
||||
"selectedStoryStyle": {"value": "custom", "label": "Custom"},
|
||||
"hasToGenerateVideos": input_data.video_style
|
||||
!= VisualMediaType.STOCK_VIDEOS,
|
||||
"audioUrl": audio_url,
|
||||
},
|
||||
}
|
||||
|
||||
logger.debug("Creating video...")
|
||||
response = self.create_video(input_data.api_key.get_secret_value(), payload)
|
||||
pid = response.get("pid")
|
||||
logger.debug("Creating video...")
|
||||
response = self.create_video(credentials.api_key, payload)
|
||||
pid = response.get("pid")
|
||||
|
||||
if not pid:
|
||||
logger.error(
|
||||
f"Failed to create video: No project ID returned. API Response: {response}"
|
||||
)
|
||||
yield "error", "Failed to create video: No project ID returned"
|
||||
else:
|
||||
logger.debug(
|
||||
f"Video created with project ID: {pid}. Waiting for completion..."
|
||||
)
|
||||
video_url = self.wait_for_video(
|
||||
input_data.api_key.get_secret_value(), pid, webhook_token
|
||||
)
|
||||
logger.debug(f"Video ready: {video_url}")
|
||||
yield "video_url", video_url
|
||||
|
||||
except requests.RequestException as e:
|
||||
logger.exception("Error creating video")
|
||||
yield "error", f"Error creating video: {str(e)}"
|
||||
except ValueError as e:
|
||||
logger.exception("Error in video creation process")
|
||||
yield "error", str(e)
|
||||
except TimeoutError as e:
|
||||
logger.exception("Video creation timed out")
|
||||
yield "error", str(e)
|
||||
if not pid:
|
||||
logger.error(
|
||||
f"Failed to create video: No project ID returned. API Response: {response}"
|
||||
)
|
||||
raise RuntimeError("Failed to create video: No project ID returned")
|
||||
else:
|
||||
logger.debug(
|
||||
f"Video created with project ID: {pid}. Waiting for completion..."
|
||||
)
|
||||
video_url = self.wait_for_video(credentials.api_key, pid, webhook_token)
|
||||
logger.debug(f"Video ready: {video_url}")
|
||||
yield "video_url", video_url
|
||||
|
||||
@@ -2,7 +2,6 @@ import re
|
||||
from typing import Any, List
|
||||
|
||||
from jinja2 import BaseLoader, Environment
|
||||
from pydantic import Field
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
|
||||
from backend.data.model import SchemaField
|
||||
@@ -19,18 +18,18 @@ class StoreValueBlock(Block):
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
input: Any = Field(
|
||||
input: Any = SchemaField(
|
||||
description="Trigger the block to produce the output. "
|
||||
"The value is only used when `data` is None."
|
||||
)
|
||||
data: Any = Field(
|
||||
data: Any = SchemaField(
|
||||
description="The constant data to be retained in the block. "
|
||||
"This value is passed as `output`.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
output: Any
|
||||
output: Any = SchemaField(description="The stored data retained in the block.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -56,10 +55,10 @@ class StoreValueBlock(Block):
|
||||
|
||||
class PrintToConsoleBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str
|
||||
text: str = SchemaField(description="The text to print to the console.")
|
||||
|
||||
class Output(BlockSchema):
|
||||
status: str
|
||||
status: str = SchemaField(description="The status of the print operation.")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -79,12 +78,14 @@ class PrintToConsoleBlock(Block):
|
||||
|
||||
class FindInDictionaryBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
input: Any = Field(description="Dictionary to lookup from")
|
||||
key: str | int = Field(description="Key to lookup in the dictionary")
|
||||
input: Any = SchemaField(description="Dictionary to lookup from")
|
||||
key: str | int = SchemaField(description="Key to lookup in the dictionary")
|
||||
|
||||
class Output(BlockSchema):
|
||||
output: Any = Field(description="Value found for the given key")
|
||||
missing: Any = Field(description="Value of the input that missing the key")
|
||||
output: Any = SchemaField(description="Value found for the given key")
|
||||
missing: Any = SchemaField(
|
||||
description="Value of the input that missing the key"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -142,11 +143,17 @@ class AgentInputBlock(Block):
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
value: Any = SchemaField(description="The value to be passed as input.")
|
||||
name: str = SchemaField(description="The name of the input.")
|
||||
description: str = SchemaField(
|
||||
value: Any = SchemaField(
|
||||
description="The value to be passed as input.",
|
||||
default=None,
|
||||
)
|
||||
title: str | None = SchemaField(
|
||||
description="The title of the input.", default=None, advanced=True
|
||||
)
|
||||
description: str | None = SchemaField(
|
||||
description="The description of the input.",
|
||||
default="",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
placeholder_values: List[Any] = SchemaField(
|
||||
@@ -159,6 +166,16 @@ class AgentInputBlock(Block):
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
advanced: bool = SchemaField(
|
||||
description="Whether to show the input in the advanced section, if the field is not required.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
secret: bool = SchemaField(
|
||||
description="Whether the input should be treated as a secret.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: Any = SchemaField(description="The value passed as input.")
|
||||
@@ -191,6 +208,7 @@ class AgentInputBlock(Block):
|
||||
],
|
||||
categories={BlockCategory.INPUT, BlockCategory.BASIC},
|
||||
block_type=BlockType.INPUT,
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
@@ -201,28 +219,27 @@ class AgentOutputBlock(Block):
|
||||
"""
|
||||
Records the output of the graph for users to see.
|
||||
|
||||
Attributes:
|
||||
recorded_value: The value to be recorded as output.
|
||||
name: The name of the output.
|
||||
description: The description of the output.
|
||||
fmt_string: The format string to be used to format the recorded_value.
|
||||
|
||||
Outputs:
|
||||
output: The formatted recorded_value if fmt_string is provided and the recorded_value
|
||||
can be formatted, otherwise the raw recorded_value.
|
||||
|
||||
Behavior:
|
||||
If fmt_string is provided and the recorded_value is of a type that can be formatted,
|
||||
the block attempts to format the recorded_value using the fmt_string.
|
||||
If formatting fails or no fmt_string is provided, the raw recorded_value is output.
|
||||
If `format` is provided and the `value` is of a type that can be formatted,
|
||||
the block attempts to format the recorded_value using the `format`.
|
||||
If formatting fails or no `format` is provided, the raw `value` is output.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
value: Any = SchemaField(description="The value to be recorded as output.")
|
||||
value: Any = SchemaField(
|
||||
description="The value to be recorded as output.",
|
||||
default=None,
|
||||
advanced=False,
|
||||
)
|
||||
name: str = SchemaField(description="The name of the output.")
|
||||
description: str = SchemaField(
|
||||
title: str | None = SchemaField(
|
||||
description="The title of the output.",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
description: str | None = SchemaField(
|
||||
description="The description of the output.",
|
||||
default="",
|
||||
default=None,
|
||||
advanced=True,
|
||||
)
|
||||
format: str = SchemaField(
|
||||
@@ -230,6 +247,16 @@ class AgentOutputBlock(Block):
|
||||
default="",
|
||||
advanced=True,
|
||||
)
|
||||
advanced: bool = SchemaField(
|
||||
description="Whether to treat the output as advanced.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
secret: bool = SchemaField(
|
||||
description="Whether the output should be treated as a secret.",
|
||||
default=False,
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
output: Any = SchemaField(description="The value recorded as output.")
|
||||
@@ -237,7 +264,7 @@ class AgentOutputBlock(Block):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="363ae599-353e-4804-937e-b2ee3cef3da4",
|
||||
description=("Stores the output of the graph for users to see."),
|
||||
description="Stores the output of the graph for users to see.",
|
||||
input_schema=AgentOutputBlock.Input,
|
||||
output_schema=AgentOutputBlock.Output,
|
||||
test_input=[
|
||||
@@ -267,6 +294,7 @@ class AgentOutputBlock(Block):
|
||||
],
|
||||
categories={BlockCategory.OUTPUT, BlockCategory.BASIC},
|
||||
block_type=BlockType.OUTPUT,
|
||||
static_output=True,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
@@ -287,16 +315,26 @@ class AgentOutputBlock(Block):
|
||||
|
||||
class AddToDictionaryBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
dictionary: dict | None = SchemaField(
|
||||
default=None,
|
||||
dictionary: dict[Any, Any] = SchemaField(
|
||||
default={},
|
||||
description="The dictionary to add the entry to. If not provided, a new dictionary will be created.",
|
||||
placeholder='{"key1": "value1", "key2": "value2"}',
|
||||
)
|
||||
key: str = SchemaField(
|
||||
description="The key for the new entry.", placeholder="new_key"
|
||||
default="",
|
||||
description="The key for the new entry.",
|
||||
placeholder="new_key",
|
||||
advanced=False,
|
||||
)
|
||||
value: Any = SchemaField(
|
||||
description="The value for the new entry.", placeholder="new_value"
|
||||
default=None,
|
||||
description="The value for the new entry.",
|
||||
placeholder="new_value",
|
||||
advanced=False,
|
||||
)
|
||||
entries: dict[Any, Any] = SchemaField(
|
||||
default={},
|
||||
description="The entries to add to the dictionary. This is the batch version of the `key` and `value` fields.",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -319,6 +357,10 @@ class AddToDictionaryBlock(Block):
|
||||
"value": "new_value",
|
||||
},
|
||||
{"key": "first_key", "value": "first_value"},
|
||||
{
|
||||
"dictionary": {"existing_key": "existing_value"},
|
||||
"entries": {"new_key": "new_value", "first_key": "first_value"},
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
@@ -326,41 +368,49 @@ class AddToDictionaryBlock(Block):
|
||||
{"existing_key": "existing_value", "new_key": "new_value"},
|
||||
),
|
||||
("updated_dictionary", {"first_key": "first_value"}),
|
||||
(
|
||||
"updated_dictionary",
|
||||
{
|
||||
"existing_key": "existing_value",
|
||||
"new_key": "new_value",
|
||||
"first_key": "first_value",
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
# If no dictionary is provided, create a new one
|
||||
if input_data.dictionary is None:
|
||||
updated_dict = {}
|
||||
else:
|
||||
# Create a copy of the input dictionary to avoid modifying the original
|
||||
updated_dict = input_data.dictionary.copy()
|
||||
updated_dict = input_data.dictionary.copy()
|
||||
|
||||
# Add the new key-value pair
|
||||
if input_data.value is not None and input_data.key:
|
||||
updated_dict[input_data.key] = input_data.value
|
||||
|
||||
yield "updated_dictionary", updated_dict
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to add entry to dictionary: {str(e)}"
|
||||
for key, value in input_data.entries.items():
|
||||
updated_dict[key] = value
|
||||
|
||||
yield "updated_dictionary", updated_dict
|
||||
|
||||
|
||||
class AddToListBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
list: List[Any] | None = SchemaField(
|
||||
default=None,
|
||||
list: List[Any] = SchemaField(
|
||||
default=[],
|
||||
advanced=False,
|
||||
description="The list to add the entry to. If not provided, a new list will be created.",
|
||||
placeholder='[1, "string", {"key": "value"}]',
|
||||
)
|
||||
entry: Any = SchemaField(
|
||||
description="The entry to add to the list. Can be of any type (string, int, dict, etc.).",
|
||||
placeholder='{"new_key": "new_value"}',
|
||||
advanced=False,
|
||||
default=None,
|
||||
)
|
||||
entries: List[Any] = SchemaField(
|
||||
default=[],
|
||||
description="The entries to add to the list. This is the batch version of the `entry` field.",
|
||||
advanced=True,
|
||||
)
|
||||
position: int | None = SchemaField(
|
||||
default=None,
|
||||
description="The position to insert the new entry. If not provided, the entry will be appended to the end of the list.",
|
||||
placeholder="0",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -384,6 +434,12 @@ class AddToListBlock(Block):
|
||||
},
|
||||
{"entry": "first_entry"},
|
||||
{"list": ["a", "b", "c"], "entry": "d"},
|
||||
{
|
||||
"entry": "e",
|
||||
"entries": ["f", "g"],
|
||||
"list": ["a", "b"],
|
||||
"position": 1,
|
||||
},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
@@ -397,27 +453,22 @@ class AddToListBlock(Block):
|
||||
),
|
||||
("updated_list", ["first_entry"]),
|
||||
("updated_list", ["a", "b", "c", "d"]),
|
||||
("updated_list", ["a", "f", "g", "e", "b"]),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
# If no list is provided, create a new one
|
||||
if input_data.list is None:
|
||||
updated_list = []
|
||||
else:
|
||||
# Create a copy of the input list to avoid modifying the original
|
||||
updated_list = input_data.list.copy()
|
||||
entries_added = input_data.entries.copy()
|
||||
if input_data.entry:
|
||||
entries_added.append(input_data.entry)
|
||||
|
||||
# Add the new entry
|
||||
if input_data.position is None:
|
||||
updated_list.append(input_data.entry)
|
||||
else:
|
||||
updated_list.insert(input_data.position, input_data.entry)
|
||||
updated_list = input_data.list.copy()
|
||||
if (pos := input_data.position) is not None:
|
||||
updated_list = updated_list[:pos] + entries_added + updated_list[pos:]
|
||||
else:
|
||||
updated_list += entries_added
|
||||
|
||||
yield "updated_list", updated_list
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to add entry to list: {str(e)}"
|
||||
yield "updated_list", updated_list
|
||||
|
||||
|
||||
class NoteBlock(Block):
|
||||
|
||||
@@ -3,6 +3,7 @@ import re
|
||||
from typing import Type
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class BlockInstallationBlock(Block):
|
||||
@@ -15,11 +16,17 @@ class BlockInstallationBlock(Block):
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
code: str
|
||||
code: str = SchemaField(
|
||||
description="Python code of the block to be installed",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
success: str
|
||||
error: str
|
||||
success: str = SchemaField(
|
||||
description="Success message if the block is installed successfully",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the block installation fails",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -37,14 +44,12 @@ class BlockInstallationBlock(Block):
|
||||
if search := re.search(r"class (\w+)\(Block\):", code):
|
||||
class_name = search.group(1)
|
||||
else:
|
||||
yield "error", "No class found in the code."
|
||||
return
|
||||
raise RuntimeError("No class found in the code.")
|
||||
|
||||
if search := re.search(r"id=\"(\w+-\w+-\w+-\w+-\w+)\"", code):
|
||||
file_name = search.group(1)
|
||||
else:
|
||||
yield "error", "No UUID found in the code."
|
||||
return
|
||||
raise RuntimeError("No UUID found in the code.")
|
||||
|
||||
block_dir = os.path.dirname(__file__)
|
||||
file_path = f"{block_dir}/{file_name}.py"
|
||||
@@ -63,4 +68,4 @@ class BlockInstallationBlock(Block):
|
||||
yield "success", "Block installed successfully."
|
||||
except Exception as e:
|
||||
os.remove(file_path)
|
||||
yield "error", f"[Code]\n{code}\n\n[Error]\n{str(e)}"
|
||||
raise RuntimeError(f"[Code]\n{code}\n\n[Error]\n{str(e)}")
|
||||
|
||||
@@ -71,11 +71,24 @@ class ConditionBlock(Block):
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
value1 = input_data.value1
|
||||
operator = input_data.operator
|
||||
|
||||
value1 = input_data.value1
|
||||
if isinstance(value1, str):
|
||||
try:
|
||||
value1 = float(value1.strip())
|
||||
except ValueError:
|
||||
value1 = value1.strip()
|
||||
|
||||
value2 = input_data.value2
|
||||
if isinstance(value2, str):
|
||||
try:
|
||||
value2 = float(value2.strip())
|
||||
except ValueError:
|
||||
value2 = value2.strip()
|
||||
|
||||
yes_value = input_data.yes_value if input_data.yes_value is not None else value1
|
||||
no_value = input_data.no_value if input_data.no_value is not None else value1
|
||||
no_value = input_data.no_value if input_data.no_value is not None else value2
|
||||
|
||||
comparison_funcs = {
|
||||
ComparisonOperator.EQUAL: lambda a, b: a == b,
|
||||
@@ -86,17 +99,11 @@ class ConditionBlock(Block):
|
||||
ComparisonOperator.LESS_THAN_OR_EQUAL: lambda a, b: a <= b,
|
||||
}
|
||||
|
||||
try:
|
||||
result = comparison_funcs[operator](value1, value2)
|
||||
result = comparison_funcs[operator](value1, value2)
|
||||
|
||||
yield "result", result
|
||||
yield "result", result
|
||||
|
||||
if result:
|
||||
yield "yes_output", yes_value
|
||||
else:
|
||||
yield "no_output", no_value
|
||||
|
||||
except Exception:
|
||||
yield "result", None
|
||||
yield "yes_output", None
|
||||
yield "no_output", None
|
||||
if result:
|
||||
yield "yes_output", yes_value
|
||||
else:
|
||||
yield "no_output", no_value
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class WordCharacterCountBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(
|
||||
description="Input text to count words and characters",
|
||||
placeholder="Enter your text here",
|
||||
advanced=False,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
word_count: int = SchemaField(description="Number of words in the input text")
|
||||
character_count: int = SchemaField(
|
||||
description="Number of characters in the input text"
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the counting operation failed"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="ab2a782d-22cf-4587-8a70-55b59b3f9f90",
|
||||
description="Counts the number of words and characters in a given text.",
|
||||
categories={BlockCategory.TEXT},
|
||||
input_schema=WordCharacterCountBlock.Input,
|
||||
output_schema=WordCharacterCountBlock.Output,
|
||||
test_input={"text": "Hello, how are you?"},
|
||||
test_output=[("word_count", 4), ("character_count", 19)],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
text = input_data.text
|
||||
word_count = len(text.split())
|
||||
character_count = len(text)
|
||||
|
||||
yield "word_count", word_count
|
||||
yield "character_count", character_count
|
||||
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
@@ -1,21 +1,49 @@
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import ContributorDetails
|
||||
from backend.data.model import ContributorDetails, SchemaField
|
||||
|
||||
|
||||
class ReadCsvBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
contents: str
|
||||
delimiter: str = ","
|
||||
quotechar: str = '"'
|
||||
escapechar: str = "\\"
|
||||
has_header: bool = True
|
||||
skip_rows: int = 0
|
||||
strip: bool = True
|
||||
skip_columns: list[str] = []
|
||||
contents: str = SchemaField(
|
||||
description="The contents of the CSV file to read",
|
||||
placeholder="a, b, c\n1,2,3\n4,5,6",
|
||||
)
|
||||
delimiter: str = SchemaField(
|
||||
description="The delimiter used in the CSV file",
|
||||
default=",",
|
||||
)
|
||||
quotechar: str = SchemaField(
|
||||
description="The character used to quote fields",
|
||||
default='"',
|
||||
)
|
||||
escapechar: str = SchemaField(
|
||||
description="The character used to escape the delimiter",
|
||||
default="\\",
|
||||
)
|
||||
has_header: bool = SchemaField(
|
||||
description="Whether the CSV file has a header row",
|
||||
default=True,
|
||||
)
|
||||
skip_rows: int = SchemaField(
|
||||
description="The number of rows to skip from the start of the file",
|
||||
default=0,
|
||||
)
|
||||
strip: bool = SchemaField(
|
||||
description="Whether to strip whitespace from the values",
|
||||
default=True,
|
||||
)
|
||||
skip_columns: list[str] = SchemaField(
|
||||
description="The columns to skip from the start of the row",
|
||||
default=[],
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
row: dict[str, str]
|
||||
all_data: list[dict[str, str]]
|
||||
row: dict[str, str] = SchemaField(
|
||||
description="The data produced from each row in the CSV file"
|
||||
)
|
||||
all_data: list[dict[str, str]] = SchemaField(
|
||||
description="All the data in the CSV file as a list of rows"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -24,7 +52,7 @@ class ReadCsvBlock(Block):
|
||||
output_schema=ReadCsvBlock.Output,
|
||||
description="Reads a CSV file and outputs the data as a list of dictionaries and individual rows via rows.",
|
||||
contributors=[ContributorDetails(name="Nicholas Tindle")],
|
||||
categories={BlockCategory.TEXT},
|
||||
categories={BlockCategory.TEXT, BlockCategory.DATA},
|
||||
test_input={
|
||||
"contents": "a, b, c\n1,2,3\n4,5,6",
|
||||
},
|
||||
|
||||
@@ -35,8 +35,5 @@ This is a "quoted" string.""",
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
decoded_text = codecs.decode(input_data.text, "unicode_escape")
|
||||
yield "decoded_text", decoded_text
|
||||
except Exception as e:
|
||||
yield "error", f"Error decoding text: {str(e)}"
|
||||
decoded_text = codecs.decode(input_data.text, "unicode_escape")
|
||||
yield "decoded_text", decoded_text
|
||||
|
||||
@@ -1,28 +1,56 @@
|
||||
import asyncio
|
||||
from typing import Literal
|
||||
|
||||
import aiohttp
|
||||
import discord
|
||||
from pydantic import Field
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SecretField
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
DiscordCredentials = CredentialsMetaInput[Literal["discord"], Literal["api_key"]]
|
||||
|
||||
|
||||
def DiscordCredentialsField() -> DiscordCredentials:
|
||||
return CredentialsField(
|
||||
description="Discord bot token",
|
||||
provider="discord",
|
||||
supported_credential_types={"api_key"},
|
||||
)
|
||||
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="discord",
|
||||
api_key=SecretStr("test_api_key"),
|
||||
title="Mock Discord API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class ReadDiscordMessagesBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
discord_bot_token: BlockSecret = SecretField(
|
||||
key="discord_bot_token", description="Discord bot token"
|
||||
)
|
||||
continuous_read: bool = Field(
|
||||
description="Whether to continuously read messages", default=True
|
||||
)
|
||||
credentials: DiscordCredentials = DiscordCredentialsField()
|
||||
|
||||
class Output(BlockSchema):
|
||||
message_content: str = Field(description="The content of the message received")
|
||||
channel_name: str = Field(
|
||||
message_content: str = SchemaField(
|
||||
description="The content of the message received"
|
||||
)
|
||||
channel_name: str = SchemaField(
|
||||
description="The name of the channel the message was received from"
|
||||
)
|
||||
username: str = Field(
|
||||
username: str = SchemaField(
|
||||
description="The username of the user who sent the message"
|
||||
)
|
||||
|
||||
@@ -33,7 +61,11 @@ class ReadDiscordMessagesBlock(Block):
|
||||
output_schema=ReadDiscordMessagesBlock.Output, # Assign output schema
|
||||
description="Reads messages from a Discord channel using a bot token.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
test_input={"discord_bot_token": "test_token", "continuous_read": False},
|
||||
test_input={
|
||||
"continuous_read": False,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"message_content",
|
||||
@@ -47,7 +79,7 @@ class ReadDiscordMessagesBlock(Block):
|
||||
},
|
||||
)
|
||||
|
||||
async def run_bot(self, token: str):
|
||||
async def run_bot(self, token: SecretStr):
|
||||
intents = discord.Intents.default()
|
||||
intents.message_content = True
|
||||
|
||||
@@ -80,19 +112,20 @@ class ReadDiscordMessagesBlock(Block):
|
||||
|
||||
await client.close()
|
||||
|
||||
await client.start(token)
|
||||
await client.start(token.get_secret_value())
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
while True:
|
||||
for output_name, output_value in self.__run(input_data):
|
||||
for output_name, output_value in self.__run(input_data, credentials):
|
||||
yield output_name, output_value
|
||||
if not input_data.continuous_read:
|
||||
break
|
||||
break
|
||||
|
||||
def __run(self, input_data: Input) -> BlockOutput:
|
||||
def __run(self, input_data: Input, credentials: APIKeyCredentials) -> BlockOutput:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
future = self.run_bot(input_data.discord_bot_token.get_secret_value())
|
||||
future = self.run_bot(credentials.api_key)
|
||||
|
||||
# If it's a Future (mock), set the result
|
||||
if isinstance(future, asyncio.Future):
|
||||
@@ -131,16 +164,16 @@ class ReadDiscordMessagesBlock(Block):
|
||||
|
||||
class SendDiscordMessageBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
discord_bot_token: BlockSecret = SecretField(
|
||||
key="discord_bot_token", description="Discord bot token"
|
||||
credentials: DiscordCredentials = DiscordCredentialsField()
|
||||
message_content: str = SchemaField(
|
||||
description="The content of the message received"
|
||||
)
|
||||
message_content: str = Field(description="The content of the message received")
|
||||
channel_name: str = Field(
|
||||
channel_name: str = SchemaField(
|
||||
description="The name of the channel the message was received from"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
status: str = Field(
|
||||
status: str = SchemaField(
|
||||
description="The status of the operation (e.g., 'Message sent', 'Error')"
|
||||
)
|
||||
|
||||
@@ -152,14 +185,15 @@ class SendDiscordMessageBlock(Block):
|
||||
description="Sends a message to a Discord channel using a bot token.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
test_input={
|
||||
"discord_bot_token": "YOUR_DISCORD_BOT_TOKEN",
|
||||
"channel_name": "general",
|
||||
"message_content": "Hello, Discord!",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[("status", "Message sent")],
|
||||
test_mock={
|
||||
"send_message": lambda token, channel_name, message_content: asyncio.Future()
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
async def send_message(self, token: str, channel_name: str, message_content: str):
|
||||
@@ -189,11 +223,13 @@ class SendDiscordMessageBlock(Block):
|
||||
"""Splits a message into chunks not exceeding the Discord limit."""
|
||||
return [message[i : i + limit] for i in range(0, len(message), limit)]
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
future = self.send_message(
|
||||
input_data.discord_bot_token.get_secret_value(),
|
||||
credentials.api_key.get_secret_value(),
|
||||
input_data.channel_name,
|
||||
input_data.message_content,
|
||||
)
|
||||
|
||||
@@ -2,17 +2,17 @@ import smtplib
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
|
||||
|
||||
class EmailCredentials(BaseModel):
|
||||
smtp_server: str = Field(
|
||||
smtp_server: str = SchemaField(
|
||||
default="smtp.gmail.com", description="SMTP server address"
|
||||
)
|
||||
smtp_port: int = Field(default=25, description="SMTP port number")
|
||||
smtp_port: int = SchemaField(default=25, description="SMTP port number")
|
||||
smtp_username: BlockSecret = SecretField(key="smtp_username")
|
||||
smtp_password: BlockSecret = SecretField(key="smtp_password")
|
||||
|
||||
@@ -30,7 +30,7 @@ class SendEmailBlock(Block):
|
||||
body: str = SchemaField(
|
||||
description="Body of the email", placeholder="Enter the email body"
|
||||
)
|
||||
creds: EmailCredentials = Field(
|
||||
creds: EmailCredentials = SchemaField(
|
||||
description="SMTP credentials",
|
||||
default=EmailCredentials(),
|
||||
)
|
||||
@@ -43,6 +43,7 @@ class SendEmailBlock(Block):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
disabled=True,
|
||||
id="4335878a-394e-4e67-adf2-919877ff49ae",
|
||||
description="This block sends an email using the provided SMTP credentials.",
|
||||
categories={BlockCategory.OUTPUT},
|
||||
@@ -67,35 +68,28 @@ class SendEmailBlock(Block):
|
||||
def send_email(
|
||||
creds: EmailCredentials, to_email: str, subject: str, body: str
|
||||
) -> str:
|
||||
try:
|
||||
smtp_server = creds.smtp_server
|
||||
smtp_port = creds.smtp_port
|
||||
smtp_username = creds.smtp_username.get_secret_value()
|
||||
smtp_password = creds.smtp_password.get_secret_value()
|
||||
smtp_server = creds.smtp_server
|
||||
smtp_port = creds.smtp_port
|
||||
smtp_username = creds.smtp_username.get_secret_value()
|
||||
smtp_password = creds.smtp_password.get_secret_value()
|
||||
|
||||
msg = MIMEMultipart()
|
||||
msg["From"] = smtp_username
|
||||
msg["To"] = to_email
|
||||
msg["Subject"] = subject
|
||||
msg.attach(MIMEText(body, "plain"))
|
||||
msg = MIMEMultipart()
|
||||
msg["From"] = smtp_username
|
||||
msg["To"] = to_email
|
||||
msg["Subject"] = subject
|
||||
msg.attach(MIMEText(body, "plain"))
|
||||
|
||||
with smtplib.SMTP(smtp_server, smtp_port) as server:
|
||||
server.starttls()
|
||||
server.login(smtp_username, smtp_password)
|
||||
server.sendmail(smtp_username, to_email, msg.as_string())
|
||||
with smtplib.SMTP(smtp_server, smtp_port) as server:
|
||||
server.starttls()
|
||||
server.login(smtp_username, smtp_password)
|
||||
server.sendmail(smtp_username, to_email, msg.as_string())
|
||||
|
||||
return "Email sent successfully"
|
||||
except Exception as e:
|
||||
return f"Failed to send email: {str(e)}"
|
||||
return "Email sent successfully"
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
status = self.send_email(
|
||||
yield "status", self.send_email(
|
||||
input_data.creds,
|
||||
input_data.to_email,
|
||||
input_data.subject,
|
||||
input_data.body,
|
||||
)
|
||||
if "successfully" in status:
|
||||
yield "status", status
|
||||
else:
|
||||
yield "error", status
|
||||
|
||||
36
autogpt_platform/backend/backend/blocks/fal/_auth.py
Normal file
36
autogpt_platform/backend/backend/blocks/fal/_auth.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
|
||||
FalCredentials = APIKeyCredentials
|
||||
FalCredentialsInput = CredentialsMetaInput[
|
||||
Literal["fal"],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="fal",
|
||||
api_key=SecretStr("mock-fal-api-key"),
|
||||
title="Mock FAL API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
|
||||
|
||||
def FalCredentialsField() -> FalCredentialsInput:
|
||||
"""
|
||||
Creates a FAL credentials input on a block.
|
||||
"""
|
||||
return CredentialsField(
|
||||
provider="fal",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The FAL integration can be used with an API Key.",
|
||||
)
|
||||
@@ -0,0 +1,199 @@
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
from typing import Any, Dict
|
||||
|
||||
import httpx
|
||||
|
||||
from backend.blocks.fal._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
FalCredentials,
|
||||
FalCredentialsField,
|
||||
FalCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FalModel(str, Enum):
|
||||
MOCHI = "fal-ai/mochi-v1"
|
||||
LUMA = "fal-ai/luma-dream-machine"
|
||||
|
||||
|
||||
class AIVideoGeneratorBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
prompt: str = SchemaField(
|
||||
description="Description of the video to generate.",
|
||||
placeholder="A dog running in a field.",
|
||||
)
|
||||
model: FalModel = SchemaField(
|
||||
title="FAL Model",
|
||||
default=FalModel.MOCHI,
|
||||
description="The FAL model to use for video generation.",
|
||||
)
|
||||
credentials: FalCredentialsInput = FalCredentialsField()
|
||||
|
||||
class Output(BlockSchema):
|
||||
video_url: str = SchemaField(description="The URL of the generated video.")
|
||||
error: str = SchemaField(
|
||||
description="Error message if video generation failed."
|
||||
)
|
||||
logs: list[str] = SchemaField(
|
||||
description="Generation progress logs.", optional=True
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="530cf046-2ce0-4854-ae2c-659db17c7a46",
|
||||
description="Generate videos using FAL AI models.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=self.Input,
|
||||
output_schema=self.Output,
|
||||
test_input={
|
||||
"prompt": "A dog running in a field.",
|
||||
"model": FalModel.MOCHI,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[("video_url", "https://fal.media/files/example/video.mp4")],
|
||||
test_mock={
|
||||
"generate_video": lambda *args, **kwargs: "https://fal.media/files/example/video.mp4"
|
||||
},
|
||||
)
|
||||
|
||||
def _get_headers(self, api_key: str) -> Dict[str, str]:
|
||||
"""Get headers for FAL API requests."""
|
||||
return {
|
||||
"Authorization": f"Key {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
def _submit_request(
|
||||
self, url: str, headers: Dict[str, str], data: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Submit a request to the FAL API."""
|
||||
try:
|
||||
response = httpx.post(url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except httpx.HTTPError as e:
|
||||
logger.error(f"FAL API request failed: {str(e)}")
|
||||
raise RuntimeError(f"Failed to submit request: {str(e)}")
|
||||
|
||||
def _poll_status(self, status_url: str, headers: Dict[str, str]) -> Dict[str, Any]:
|
||||
"""Poll the status endpoint until completion or failure."""
|
||||
try:
|
||||
response = httpx.get(status_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except httpx.HTTPError as e:
|
||||
logger.error(f"Failed to get status: {str(e)}")
|
||||
raise RuntimeError(f"Failed to get status: {str(e)}")
|
||||
|
||||
def generate_video(self, input_data: Input, credentials: FalCredentials) -> str:
|
||||
"""Generate video using the specified FAL model."""
|
||||
base_url = "https://queue.fal.run"
|
||||
api_key = credentials.api_key.get_secret_value()
|
||||
headers = self._get_headers(api_key)
|
||||
|
||||
# Submit generation request
|
||||
submit_url = f"{base_url}/{input_data.model.value}"
|
||||
submit_data = {"prompt": input_data.prompt}
|
||||
|
||||
seen_logs = set()
|
||||
|
||||
try:
|
||||
# Submit request to queue
|
||||
submit_response = httpx.post(submit_url, headers=headers, json=submit_data)
|
||||
submit_response.raise_for_status()
|
||||
request_data = submit_response.json()
|
||||
|
||||
# Get request_id and urls from initial response
|
||||
request_id = request_data.get("request_id")
|
||||
status_url = request_data.get("status_url")
|
||||
result_url = request_data.get("response_url")
|
||||
|
||||
if not all([request_id, status_url, result_url]):
|
||||
raise ValueError("Missing required data in submission response")
|
||||
|
||||
# Poll for status with exponential backoff
|
||||
max_attempts = 30
|
||||
attempt = 0
|
||||
base_wait_time = 5
|
||||
|
||||
while attempt < max_attempts:
|
||||
status_response = httpx.get(f"{status_url}?logs=1", headers=headers)
|
||||
status_response.raise_for_status()
|
||||
status_data = status_response.json()
|
||||
|
||||
# Process new logs only
|
||||
logs = status_data.get("logs", [])
|
||||
if logs and isinstance(logs, list):
|
||||
for log in logs:
|
||||
if isinstance(log, dict):
|
||||
# Create a unique key for this log entry
|
||||
log_key = (
|
||||
f"{log.get('timestamp', '')}-{log.get('message', '')}"
|
||||
)
|
||||
if log_key not in seen_logs:
|
||||
seen_logs.add(log_key)
|
||||
message = log.get("message", "")
|
||||
if message:
|
||||
logger.debug(
|
||||
f"[FAL Generation] [{log.get('level', 'INFO')}] [{log.get('source', '')}] [{log.get('timestamp', '')}] {message}"
|
||||
)
|
||||
|
||||
status = status_data.get("status")
|
||||
if status == "COMPLETED":
|
||||
# Get the final result
|
||||
result_response = httpx.get(result_url, headers=headers)
|
||||
result_response.raise_for_status()
|
||||
result_data = result_response.json()
|
||||
|
||||
if "video" not in result_data or not isinstance(
|
||||
result_data["video"], dict
|
||||
):
|
||||
raise ValueError("Invalid response format - missing video data")
|
||||
|
||||
video_url = result_data["video"].get("url")
|
||||
if not video_url:
|
||||
raise ValueError("No video URL in response")
|
||||
|
||||
return video_url
|
||||
|
||||
elif status == "FAILED":
|
||||
error_msg = status_data.get("error", "No error details provided")
|
||||
raise RuntimeError(f"Video generation failed: {error_msg}")
|
||||
elif status == "IN_QUEUE":
|
||||
position = status_data.get("queue_position", "unknown")
|
||||
logger.debug(
|
||||
f"[FAL Generation] Status: In queue, position: {position}"
|
||||
)
|
||||
elif status == "IN_PROGRESS":
|
||||
logger.debug(
|
||||
"[FAL Generation] Status: Request is being processed..."
|
||||
)
|
||||
else:
|
||||
logger.info(f"[FAL Generation] Status: Unknown status: {status}")
|
||||
|
||||
wait_time = min(base_wait_time * (2**attempt), 60) # Cap at 60 seconds
|
||||
time.sleep(wait_time)
|
||||
attempt += 1
|
||||
|
||||
raise RuntimeError("Maximum polling attempts reached")
|
||||
|
||||
except httpx.HTTPError as e:
|
||||
raise RuntimeError(f"API request failed: {str(e)}")
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: FalCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
video_url = self.generate_video(input_data, credentials)
|
||||
yield "video_url", video_url
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
yield "error", error_message
|
||||
43
autogpt_platform/backend/backend/blocks/github/_api.py
Normal file
43
autogpt_platform/backend/backend/blocks/github/_api.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from backend.blocks.github._auth import GithubCredentials
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
def _convert_to_api_url(url: str) -> str:
|
||||
"""
|
||||
Converts a standard GitHub URL to the corresponding GitHub API URL.
|
||||
Handles repository URLs, issue URLs, pull request URLs, and more.
|
||||
"""
|
||||
parsed_url = urlparse(url)
|
||||
path_parts = parsed_url.path.strip("/").split("/")
|
||||
|
||||
if len(path_parts) >= 2:
|
||||
owner, repo = path_parts[0], path_parts[1]
|
||||
api_base = f"https://api.github.com/repos/{owner}/{repo}"
|
||||
|
||||
if len(path_parts) > 2:
|
||||
additional_path = "/".join(path_parts[2:])
|
||||
api_url = f"{api_base}/{additional_path}"
|
||||
else:
|
||||
# Repository base URL
|
||||
api_url = api_base
|
||||
else:
|
||||
raise ValueError("Invalid GitHub URL format.")
|
||||
|
||||
return api_url
|
||||
|
||||
|
||||
def _get_headers(credentials: GithubCredentials) -> dict[str, str]:
|
||||
return {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
|
||||
def get_api(credentials: GithubCredentials, convert_urls: bool = True) -> Requests:
|
||||
return Requests(
|
||||
trusted_origins=["https://api.github.com", "https://github.com"],
|
||||
extra_url_validator=_convert_to_api_url if convert_urls else None,
|
||||
extra_headers=_get_headers(credentials),
|
||||
)
|
||||
@@ -1,12 +1,13 @@
|
||||
from typing import Literal
|
||||
|
||||
from autogpt_libs.supabase_integration_credentials_store.types import (
|
||||
APIKeyCredentials,
|
||||
OAuth2Credentials,
|
||||
)
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
OAuth2Credentials,
|
||||
)
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
secrets = Secrets()
|
||||
|
||||
@@ -0,0 +1,700 @@
|
||||
{
|
||||
"action": "synchronize",
|
||||
"number": 8358,
|
||||
"pull_request": {
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358",
|
||||
"id": 2128918491,
|
||||
"node_id": "PR_kwDOJKSTjM5-5Lfb",
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358",
|
||||
"diff_url": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358.diff",
|
||||
"patch_url": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358.patch",
|
||||
"issue_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358",
|
||||
"number": 8358,
|
||||
"state": "open",
|
||||
"locked": false,
|
||||
"title": "feat(platform, blocks): Webhook-triggered blocks",
|
||||
"user": {
|
||||
"login": "Pwuts",
|
||||
"id": 12185583,
|
||||
"node_id": "MDQ6VXNlcjEyMTg1NTgz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Pwuts",
|
||||
"html_url": "https://github.com/Pwuts",
|
||||
"followers_url": "https://api.github.com/users/Pwuts/followers",
|
||||
"following_url": "https://api.github.com/users/Pwuts/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Pwuts/orgs",
|
||||
"repos_url": "https://api.github.com/users/Pwuts/repos",
|
||||
"events_url": "https://api.github.com/users/Pwuts/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Pwuts/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"body": "- Resolves #8352\r\n\r\n## Changes 🏗️\r\n\r\n- feat(blocks): Add GitHub Pull Request Trigger block\r\n\r\n### feat(platform): Add support for Webhook-triggered blocks\r\n- ⚠️ Add `PLATFORM_BASE_URL` setting\r\n\r\n- Add webhook config option and `BlockType.WEBHOOK` to `Block`\r\n - Add check to `Block.__init__` to enforce type and shape of webhook event filter\r\n - Add check to `Block.__init__` to enforce `payload` input on webhook blocks\r\n\r\n- Add `Webhook` model + CRUD functions in `backend.data.integrations` to represent webhooks created by our system\r\n - Add `IntegrationWebhook` to DB schema + reference `AgentGraphNode.webhook_id`\r\n - Add `set_node_webhook(..)` in `backend.data.graph`\r\n\r\n- Add webhook-related endpoints:\r\n - `POST /integrations/{provider}/webhooks/{webhook_id}/ingress` endpoint, to receive webhook payloads, and for all associated nodes create graph executions\r\n - Add `Node.is_triggered_by_event_type(..)` helper method\r\n - `POST /integrations/{provider}/webhooks/{webhook_id}/ping` endpoint, to allow testing a webhook\r\n - Add `WebhookEvent` + pub/sub functions in `backend.data.integrations`\r\n\r\n- Add `backend.integrations.webhooks` module, including:\r\n - `graph_lifecycle_hooks`, e.g. `on_graph_activate(..)`, to handle corresponding webhook creation etc.\r\n - Add calls to these hooks in the graph create/update endpoints\r\n - `BaseWebhooksManager` + `GithubWebhooksManager` to handle creating + registering, removing + deregistering, and retrieving existing webhooks, and validating incoming payloads\r\n\r\n### Other improvements\r\n- fix(blocks): Allow having an input and output pin with the same name\r\n- feat(blocks): Allow hiding inputs (e.g. `payload`) with `SchemaField(hidden=True)`\r\n- feat(backend/data): Add `graph_id`, `graph_version` to `Node`; `user_id` to `GraphMeta`\r\n - Add `Creatable` versions of `Node`, `GraphMeta` and `Graph` without these properties\r\n - Add `graph_from_creatable(..)` helper function in `backend.data.graph`\r\n- refactor(backend/data): Make `RedisEventQueue` generic\r\n- refactor(frontend): Deduplicate & clean up code for different block types in `generateInputHandles(..)` in `CustomNode`\r\n- refactor(backend): Remove unused subgraph functionality\r\n\r\n## How it works\r\n- When a graph is created, the `on_graph_activate` and `on_node_activate` hooks are called on the graph and its nodes\r\n- If a webhook-triggered node has presets for all the relevant inputs, `on_node_activate` will get/create a suitable webhook and link it by setting `AgentGraphNode.webhook_id`\r\n - `on_node_activate` uses `webhook_manager.get_suitable_webhook(..)`, which tries to find a suitable webhook (with matching requirements) or creates it if none exists yet\r\n- When a graph is deactivated (in favor of a newer/other version) or deleted, `on_graph_deactivate` and `on_node_deactivate` are called on the graph and its nodes to clean up webhooks that are no longer in use\r\n- When a valid webhook payload is received, two things happen:\r\n 1. It is broadcast on the Redis channel `webhooks/{webhook_id}/{event_type}`\r\n 2. Graph executions are initiated for all nodes triggered by this webhook\r\n\r\n## TODO\r\n- [ ] #8537\r\n- [x] #8538\r\n- [ ] #8357\r\n- [ ] ~~#8554~~ can be done in a follow-up PR\r\n- [ ] Test test test!\r\n- [ ] Add note on `repo` input of webhook blocks that the credentials used must have the right permissions for the given organization/repo\r\n- [x] Implement proper detection and graceful handling of webhook creation failing due to insufficient permissions. This should give a clear message to the user to e.g. \"give the app access to this organization in your settings\".\r\n- [ ] Nice-to-have: make a button on webhook blocks to trigger a ping and check its result. The API endpoints for this is already implemented.",
|
||||
"created_at": "2024-10-16T22:13:47Z",
|
||||
"updated_at": "2024-11-11T18:34:54Z",
|
||||
"closed_at": null,
|
||||
"merged_at": null,
|
||||
"merge_commit_sha": "cbfd0cdd8db52cdd5a3b7ce088fc0ab4617a652e",
|
||||
"assignee": {
|
||||
"login": "Pwuts",
|
||||
"id": 12185583,
|
||||
"node_id": "MDQ6VXNlcjEyMTg1NTgz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Pwuts",
|
||||
"html_url": "https://github.com/Pwuts",
|
||||
"followers_url": "https://api.github.com/users/Pwuts/followers",
|
||||
"following_url": "https://api.github.com/users/Pwuts/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Pwuts/orgs",
|
||||
"repos_url": "https://api.github.com/users/Pwuts/repos",
|
||||
"events_url": "https://api.github.com/users/Pwuts/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Pwuts/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"assignees": [
|
||||
{
|
||||
"login": "Pwuts",
|
||||
"id": 12185583,
|
||||
"node_id": "MDQ6VXNlcjEyMTg1NTgz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Pwuts",
|
||||
"html_url": "https://github.com/Pwuts",
|
||||
"followers_url": "https://api.github.com/users/Pwuts/followers",
|
||||
"following_url": "https://api.github.com/users/Pwuts/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Pwuts/orgs",
|
||||
"repos_url": "https://api.github.com/users/Pwuts/repos",
|
||||
"events_url": "https://api.github.com/users/Pwuts/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Pwuts/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
}
|
||||
],
|
||||
"requested_reviewers": [
|
||||
{
|
||||
"login": "kcze",
|
||||
"id": 34861343,
|
||||
"node_id": "MDQ6VXNlcjM0ODYxMzQz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/34861343?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/kcze",
|
||||
"html_url": "https://github.com/kcze",
|
||||
"followers_url": "https://api.github.com/users/kcze/followers",
|
||||
"following_url": "https://api.github.com/users/kcze/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/kcze/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/kcze/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/kcze/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/kcze/orgs",
|
||||
"repos_url": "https://api.github.com/users/kcze/repos",
|
||||
"events_url": "https://api.github.com/users/kcze/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/kcze/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
}
|
||||
],
|
||||
"requested_teams": [
|
||||
{
|
||||
"name": "DevOps",
|
||||
"id": 9547361,
|
||||
"node_id": "T_kwDOB8roIc4Aka5h",
|
||||
"slug": "devops",
|
||||
"description": "",
|
||||
"privacy": "closed",
|
||||
"notification_setting": "notifications_enabled",
|
||||
"url": "https://api.github.com/organizations/130738209/team/9547361",
|
||||
"html_url": "https://github.com/orgs/Significant-Gravitas/teams/devops",
|
||||
"members_url": "https://api.github.com/organizations/130738209/team/9547361/members{/member}",
|
||||
"repositories_url": "https://api.github.com/organizations/130738209/team/9547361/repos",
|
||||
"permission": "pull",
|
||||
"parent": null
|
||||
}
|
||||
],
|
||||
"labels": [
|
||||
{
|
||||
"id": 5272676214,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABOkandg",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/documentation",
|
||||
"name": "documentation",
|
||||
"color": "0075ca",
|
||||
"default": true,
|
||||
"description": "Improvements or additions to documentation"
|
||||
},
|
||||
{
|
||||
"id": 5410633769,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABQn-4KQ",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/size/xl",
|
||||
"name": "size/xl",
|
||||
"color": "E751DD",
|
||||
"default": false,
|
||||
"description": ""
|
||||
},
|
||||
{
|
||||
"id": 6892322271,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABmtB93w",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/Review%20effort%20[1-5]:%204",
|
||||
"name": "Review effort [1-5]: 4",
|
||||
"color": "d1bcf9",
|
||||
"default": false,
|
||||
"description": null
|
||||
},
|
||||
{
|
||||
"id": 7218433025,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABrkCMAQ",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/platform/frontend",
|
||||
"name": "platform/frontend",
|
||||
"color": "033C07",
|
||||
"default": false,
|
||||
"description": "AutoGPT Platform - Front end"
|
||||
},
|
||||
{
|
||||
"id": 7219356193,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABrk6iIQ",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/platform/backend",
|
||||
"name": "platform/backend",
|
||||
"color": "ededed",
|
||||
"default": false,
|
||||
"description": "AutoGPT Platform - Back end"
|
||||
},
|
||||
{
|
||||
"id": 7515330106,
|
||||
"node_id": "LA_kwDOJKSTjM8AAAABv_LWOg",
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels/platform/blocks",
|
||||
"name": "platform/blocks",
|
||||
"color": "eb5757",
|
||||
"default": false,
|
||||
"description": null
|
||||
}
|
||||
],
|
||||
"milestone": null,
|
||||
"draft": false,
|
||||
"commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/commits",
|
||||
"review_comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/comments",
|
||||
"review_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/comments{/number}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358/comments",
|
||||
"statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/8f708a2b60463eec10747d8f45dead35b5a45bd0",
|
||||
"head": {
|
||||
"label": "Significant-Gravitas:reinier/open-1961-implement-github-on-pull-request-block",
|
||||
"ref": "reinier/open-1961-implement-github-on-pull-request-block",
|
||||
"sha": "8f708a2b60463eec10747d8f45dead35b5a45bd0",
|
||||
"user": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"repo": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"forks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/forks",
|
||||
"keys_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/teams",
|
||||
"hooks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/events",
|
||||
"assignees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tags",
|
||||
"blobs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscription",
|
||||
"commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/merges",
|
||||
"archive_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/downloads",
|
||||
"issues_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases{/id}",
|
||||
"deployments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/deployments",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-11-11T18:16:29Z",
|
||||
"pushed_at": "2024-11-11T18:34:52Z",
|
||||
"git_url": "git://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"ssh_url": "git@github.com:Significant-Gravitas/AutoGPT.git",
|
||||
"clone_url": "https://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"svn_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"homepage": "https://agpt.co",
|
||||
"size": 181894,
|
||||
"stargazers_count": 168203,
|
||||
"watchers_count": 168203,
|
||||
"language": "Python",
|
||||
"has_issues": true,
|
||||
"has_projects": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"has_discussions": true,
|
||||
"forks_count": 44376,
|
||||
"mirror_url": null,
|
||||
"archived": false,
|
||||
"disabled": false,
|
||||
"open_issues_count": 189,
|
||||
"license": {
|
||||
"key": "other",
|
||||
"name": "Other",
|
||||
"spdx_id": "NOASSERTION",
|
||||
"url": null,
|
||||
"node_id": "MDc6TGljZW5zZTA="
|
||||
},
|
||||
"allow_forking": true,
|
||||
"is_template": false,
|
||||
"web_commit_signoff_required": false,
|
||||
"topics": [
|
||||
"ai",
|
||||
"artificial-intelligence",
|
||||
"autonomous-agents",
|
||||
"gpt-4",
|
||||
"openai",
|
||||
"python"
|
||||
],
|
||||
"visibility": "public",
|
||||
"forks": 44376,
|
||||
"open_issues": 189,
|
||||
"watchers": 168203,
|
||||
"default_branch": "master",
|
||||
"allow_squash_merge": true,
|
||||
"allow_merge_commit": false,
|
||||
"allow_rebase_merge": false,
|
||||
"allow_auto_merge": true,
|
||||
"delete_branch_on_merge": true,
|
||||
"allow_update_branch": true,
|
||||
"use_squash_pr_title_as_default": true,
|
||||
"squash_merge_commit_message": "COMMIT_MESSAGES",
|
||||
"squash_merge_commit_title": "PR_TITLE",
|
||||
"merge_commit_message": "BLANK",
|
||||
"merge_commit_title": "PR_TITLE"
|
||||
}
|
||||
},
|
||||
"base": {
|
||||
"label": "Significant-Gravitas:dev",
|
||||
"ref": "dev",
|
||||
"sha": "0b5b95eff5e18c1e162d2b30b66a7be2bed1cbc2",
|
||||
"user": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"repo": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"forks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/forks",
|
||||
"keys_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/teams",
|
||||
"hooks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/events",
|
||||
"assignees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tags",
|
||||
"blobs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscription",
|
||||
"commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/merges",
|
||||
"archive_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/downloads",
|
||||
"issues_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases{/id}",
|
||||
"deployments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/deployments",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-11-11T18:16:29Z",
|
||||
"pushed_at": "2024-11-11T18:34:52Z",
|
||||
"git_url": "git://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"ssh_url": "git@github.com:Significant-Gravitas/AutoGPT.git",
|
||||
"clone_url": "https://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"svn_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"homepage": "https://agpt.co",
|
||||
"size": 181894,
|
||||
"stargazers_count": 168203,
|
||||
"watchers_count": 168203,
|
||||
"language": "Python",
|
||||
"has_issues": true,
|
||||
"has_projects": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"has_discussions": true,
|
||||
"forks_count": 44376,
|
||||
"mirror_url": null,
|
||||
"archived": false,
|
||||
"disabled": false,
|
||||
"open_issues_count": 189,
|
||||
"license": {
|
||||
"key": "other",
|
||||
"name": "Other",
|
||||
"spdx_id": "NOASSERTION",
|
||||
"url": null,
|
||||
"node_id": "MDc6TGljZW5zZTA="
|
||||
},
|
||||
"allow_forking": true,
|
||||
"is_template": false,
|
||||
"web_commit_signoff_required": false,
|
||||
"topics": [
|
||||
"ai",
|
||||
"artificial-intelligence",
|
||||
"autonomous-agents",
|
||||
"gpt-4",
|
||||
"openai",
|
||||
"python"
|
||||
],
|
||||
"visibility": "public",
|
||||
"forks": 44376,
|
||||
"open_issues": 189,
|
||||
"watchers": 168203,
|
||||
"default_branch": "master",
|
||||
"allow_squash_merge": true,
|
||||
"allow_merge_commit": false,
|
||||
"allow_rebase_merge": false,
|
||||
"allow_auto_merge": true,
|
||||
"delete_branch_on_merge": true,
|
||||
"allow_update_branch": true,
|
||||
"use_squash_pr_title_as_default": true,
|
||||
"squash_merge_commit_message": "COMMIT_MESSAGES",
|
||||
"squash_merge_commit_title": "PR_TITLE",
|
||||
"merge_commit_message": "BLANK",
|
||||
"merge_commit_title": "PR_TITLE"
|
||||
}
|
||||
},
|
||||
"_links": {
|
||||
"self": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358"
|
||||
},
|
||||
"html": {
|
||||
"href": "https://github.com/Significant-Gravitas/AutoGPT/pull/8358"
|
||||
},
|
||||
"issue": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358"
|
||||
},
|
||||
"comments": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/8358/comments"
|
||||
},
|
||||
"review_comments": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/comments"
|
||||
},
|
||||
"review_comment": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/comments{/number}"
|
||||
},
|
||||
"commits": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls/8358/commits"
|
||||
},
|
||||
"statuses": {
|
||||
"href": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/8f708a2b60463eec10747d8f45dead35b5a45bd0"
|
||||
}
|
||||
},
|
||||
"author_association": "MEMBER",
|
||||
"auto_merge": null,
|
||||
"active_lock_reason": null,
|
||||
"merged": false,
|
||||
"mergeable": null,
|
||||
"rebaseable": null,
|
||||
"mergeable_state": "unknown",
|
||||
"merged_by": null,
|
||||
"comments": 12,
|
||||
"review_comments": 29,
|
||||
"maintainer_can_modify": false,
|
||||
"commits": 62,
|
||||
"additions": 1674,
|
||||
"deletions": 331,
|
||||
"changed_files": 36
|
||||
},
|
||||
"before": "f40aef87672203f47bbbd53f83fae0964c5624da",
|
||||
"after": "8f708a2b60463eec10747d8f45dead35b5a45bd0",
|
||||
"repository": {
|
||||
"id": 614765452,
|
||||
"node_id": "R_kgDOJKSTjA",
|
||||
"name": "AutoGPT",
|
||||
"full_name": "Significant-Gravitas/AutoGPT",
|
||||
"private": false,
|
||||
"owner": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Significant-Gravitas",
|
||||
"html_url": "https://github.com/Significant-Gravitas",
|
||||
"followers_url": "https://api.github.com/users/Significant-Gravitas/followers",
|
||||
"following_url": "https://api.github.com/users/Significant-Gravitas/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Significant-Gravitas/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Significant-Gravitas/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Significant-Gravitas/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Significant-Gravitas/orgs",
|
||||
"repos_url": "https://api.github.com/users/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/users/Significant-Gravitas/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Significant-Gravitas/received_events",
|
||||
"type": "Organization",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
},
|
||||
"html_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"description": "AutoGPT is the vision of accessible AI for everyone, to use and to build on. Our mission is to provide the tools, so that you can focus on what matters.",
|
||||
"fork": false,
|
||||
"url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT",
|
||||
"forks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/forks",
|
||||
"keys_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/keys{/key_id}",
|
||||
"collaborators_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/collaborators{/collaborator}",
|
||||
"teams_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/teams",
|
||||
"hooks_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/hooks",
|
||||
"issue_events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/events{/number}",
|
||||
"events_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/events",
|
||||
"assignees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/assignees{/user}",
|
||||
"branches_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/branches{/branch}",
|
||||
"tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/tags",
|
||||
"blobs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/blobs{/sha}",
|
||||
"git_tags_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/tags{/sha}",
|
||||
"git_refs_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/refs{/sha}",
|
||||
"trees_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/trees{/sha}",
|
||||
"statuses_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/statuses/{sha}",
|
||||
"languages_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/languages",
|
||||
"stargazers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/stargazers",
|
||||
"contributors_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contributors",
|
||||
"subscribers_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscribers",
|
||||
"subscription_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/subscription",
|
||||
"commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/commits{/sha}",
|
||||
"git_commits_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/git/commits{/sha}",
|
||||
"comments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/comments{/number}",
|
||||
"issue_comment_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues/comments{/number}",
|
||||
"contents_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/contents/{+path}",
|
||||
"compare_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/compare/{base}...{head}",
|
||||
"merges_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/merges",
|
||||
"archive_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/{archive_format}{/ref}",
|
||||
"downloads_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/downloads",
|
||||
"issues_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/issues{/number}",
|
||||
"pulls_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/pulls{/number}",
|
||||
"milestones_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/milestones{/number}",
|
||||
"notifications_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/notifications{?since,all,participating}",
|
||||
"labels_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/labels{/name}",
|
||||
"releases_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/releases{/id}",
|
||||
"deployments_url": "https://api.github.com/repos/Significant-Gravitas/AutoGPT/deployments",
|
||||
"created_at": "2023-03-16T09:21:07Z",
|
||||
"updated_at": "2024-11-11T18:16:29Z",
|
||||
"pushed_at": "2024-11-11T18:34:52Z",
|
||||
"git_url": "git://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"ssh_url": "git@github.com:Significant-Gravitas/AutoGPT.git",
|
||||
"clone_url": "https://github.com/Significant-Gravitas/AutoGPT.git",
|
||||
"svn_url": "https://github.com/Significant-Gravitas/AutoGPT",
|
||||
"homepage": "https://agpt.co",
|
||||
"size": 181894,
|
||||
"stargazers_count": 168203,
|
||||
"watchers_count": 168203,
|
||||
"language": "Python",
|
||||
"has_issues": true,
|
||||
"has_projects": true,
|
||||
"has_downloads": true,
|
||||
"has_wiki": true,
|
||||
"has_pages": false,
|
||||
"has_discussions": true,
|
||||
"forks_count": 44376,
|
||||
"mirror_url": null,
|
||||
"archived": false,
|
||||
"disabled": false,
|
||||
"open_issues_count": 189,
|
||||
"license": {
|
||||
"key": "other",
|
||||
"name": "Other",
|
||||
"spdx_id": "NOASSERTION",
|
||||
"url": null,
|
||||
"node_id": "MDc6TGljZW5zZTA="
|
||||
},
|
||||
"allow_forking": true,
|
||||
"is_template": false,
|
||||
"web_commit_signoff_required": false,
|
||||
"topics": [
|
||||
"ai",
|
||||
"artificial-intelligence",
|
||||
"autonomous-agents",
|
||||
"gpt-4",
|
||||
"openai",
|
||||
"python"
|
||||
],
|
||||
"visibility": "public",
|
||||
"forks": 44376,
|
||||
"open_issues": 189,
|
||||
"watchers": 168203,
|
||||
"default_branch": "master",
|
||||
"custom_properties": {
|
||||
|
||||
}
|
||||
},
|
||||
"organization": {
|
||||
"login": "Significant-Gravitas",
|
||||
"id": 130738209,
|
||||
"node_id": "O_kgDOB8roIQ",
|
||||
"url": "https://api.github.com/orgs/Significant-Gravitas",
|
||||
"repos_url": "https://api.github.com/orgs/Significant-Gravitas/repos",
|
||||
"events_url": "https://api.github.com/orgs/Significant-Gravitas/events",
|
||||
"hooks_url": "https://api.github.com/orgs/Significant-Gravitas/hooks",
|
||||
"issues_url": "https://api.github.com/orgs/Significant-Gravitas/issues",
|
||||
"members_url": "https://api.github.com/orgs/Significant-Gravitas/members{/member}",
|
||||
"public_members_url": "https://api.github.com/orgs/Significant-Gravitas/public_members{/member}",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/130738209?v=4",
|
||||
"description": ""
|
||||
},
|
||||
"enterprise": {
|
||||
"id": 149607,
|
||||
"slug": "significant-gravitas",
|
||||
"name": "Significant Gravitas",
|
||||
"node_id": "E_kgDOAAJIZw",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/b/149607?v=4",
|
||||
"description": "The creators of AutoGPT",
|
||||
"website_url": "discord.gg/autogpt",
|
||||
"html_url": "https://github.com/enterprises/significant-gravitas",
|
||||
"created_at": "2024-04-18T17:43:53Z",
|
||||
"updated_at": "2024-10-23T16:59:55Z"
|
||||
},
|
||||
"sender": {
|
||||
"login": "Pwuts",
|
||||
"id": 12185583,
|
||||
"node_id": "MDQ6VXNlcjEyMTg1NTgz",
|
||||
"avatar_url": "https://avatars.githubusercontent.com/u/12185583?v=4",
|
||||
"gravatar_id": "",
|
||||
"url": "https://api.github.com/users/Pwuts",
|
||||
"html_url": "https://github.com/Pwuts",
|
||||
"followers_url": "https://api.github.com/users/Pwuts/followers",
|
||||
"following_url": "https://api.github.com/users/Pwuts/following{/other_user}",
|
||||
"gists_url": "https://api.github.com/users/Pwuts/gists{/gist_id}",
|
||||
"starred_url": "https://api.github.com/users/Pwuts/starred{/owner}{/repo}",
|
||||
"subscriptions_url": "https://api.github.com/users/Pwuts/subscriptions",
|
||||
"organizations_url": "https://api.github.com/users/Pwuts/orgs",
|
||||
"repos_url": "https://api.github.com/users/Pwuts/repos",
|
||||
"events_url": "https://api.github.com/users/Pwuts/events{/privacy}",
|
||||
"received_events_url": "https://api.github.com/users/Pwuts/received_events",
|
||||
"type": "User",
|
||||
"user_view_type": "public",
|
||||
"site_admin": false
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,11 @@
|
||||
import requests
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -13,6 +15,11 @@ from ._auth import (
|
||||
)
|
||||
|
||||
|
||||
def is_github_url(url: str) -> bool:
|
||||
return urlparse(url).netloc == "github.com"
|
||||
|
||||
|
||||
# --8<-- [start:GithubCommentBlockExample]
|
||||
class GithubCommentBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
@@ -39,15 +46,27 @@ class GithubCommentBlock(Block):
|
||||
categories={BlockCategory.DEVELOPER_TOOLS},
|
||||
input_schema=GithubCommentBlock.Input,
|
||||
output_schema=GithubCommentBlock.Output,
|
||||
test_input={
|
||||
"issue_url": "https://github.com/owner/repo/issues/1",
|
||||
"comment": "This is a test comment.",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_input=[
|
||||
{
|
||||
"issue_url": "https://github.com/owner/repo/issues/1",
|
||||
"comment": "This is a test comment.",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
{
|
||||
"issue_url": "https://github.com/owner/repo/pull/1",
|
||||
"comment": "This is a test comment.",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
],
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("id", 1337),
|
||||
("url", "https://github.com/owner/repo/issues/1#issuecomment-1337"),
|
||||
("id", 1337),
|
||||
(
|
||||
"url",
|
||||
"https://github.com/owner/repo/issues/1#issuecomment-1337",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"post_comment": lambda *args, **kwargs: (
|
||||
@@ -61,27 +80,12 @@ class GithubCommentBlock(Block):
|
||||
def post_comment(
|
||||
credentials: GithubCredentials, issue_url: str, body_text: str
|
||||
) -> tuple[int, str]:
|
||||
if "/pull/" in issue_url:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
+ "/comments"
|
||||
)
|
||||
else:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos") + "/comments"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
api = get_api(credentials)
|
||||
data = {"body": body_text}
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
if "pull" in issue_url:
|
||||
issue_url = issue_url.replace("pull", "issues")
|
||||
comments_url = issue_url + "/comments"
|
||||
response = api.post(comments_url, json=data)
|
||||
comment = response.json()
|
||||
return comment["id"], comment["html_url"]
|
||||
|
||||
@@ -92,16 +96,16 @@ class GithubCommentBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
id, url = self.post_comment(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.comment,
|
||||
)
|
||||
yield "id", id
|
||||
yield "url", url
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to post comment: {str(e)}"
|
||||
id, url = self.post_comment(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.comment,
|
||||
)
|
||||
yield "id", id
|
||||
yield "url", url
|
||||
|
||||
|
||||
# --8<-- [end:GithubCommentBlockExample]
|
||||
|
||||
|
||||
class GithubMakeIssueBlock(Block):
|
||||
@@ -155,16 +159,10 @@ class GithubMakeIssueBlock(Block):
|
||||
def create_issue(
|
||||
credentials: GithubCredentials, repo_url: str, title: str, body: str
|
||||
) -> tuple[int, str]:
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/issues"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
api = get_api(credentials)
|
||||
data = {"title": title, "body": body}
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
issues_url = repo_url + "/issues"
|
||||
response = api.post(issues_url, json=data)
|
||||
issue = response.json()
|
||||
return issue["number"], issue["html_url"]
|
||||
|
||||
@@ -175,17 +173,14 @@ class GithubMakeIssueBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
number, url = self.create_issue(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.title,
|
||||
input_data.body,
|
||||
)
|
||||
yield "number", number
|
||||
yield "url", url
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to create issue: {str(e)}"
|
||||
number, url = self.create_issue(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.title,
|
||||
input_data.body,
|
||||
)
|
||||
yield "number", number
|
||||
yield "url", url
|
||||
|
||||
|
||||
class GithubReadIssueBlock(Block):
|
||||
@@ -234,21 +229,12 @@ class GithubReadIssueBlock(Block):
|
||||
def read_issue(
|
||||
credentials: GithubCredentials, issue_url: str
|
||||
) -> tuple[str, str, str]:
|
||||
api_url = issue_url.replace("github.com", "api.github.com/repos")
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
response = api.get(issue_url)
|
||||
data = response.json()
|
||||
title = data.get("title", "No title found")
|
||||
body = data.get("body", "No body content found")
|
||||
user = data.get("user", {}).get("login", "No user found")
|
||||
|
||||
return title, body, user
|
||||
|
||||
def run(
|
||||
@@ -258,16 +244,16 @@ class GithubReadIssueBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
title, body, user = self.read_issue(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
)
|
||||
title, body, user = self.read_issue(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
)
|
||||
if title:
|
||||
yield "title", title
|
||||
if body:
|
||||
yield "body", body
|
||||
if user:
|
||||
yield "user", user
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to read issue: {str(e)}"
|
||||
|
||||
|
||||
class GithubListIssuesBlock(Block):
|
||||
@@ -323,20 +309,13 @@ class GithubListIssuesBlock(Block):
|
||||
def list_issues(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.IssueItem]:
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/issues"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
issues_url = repo_url + "/issues"
|
||||
response = api.get(issues_url)
|
||||
data = response.json()
|
||||
issues: list[GithubListIssuesBlock.Output.IssueItem] = [
|
||||
{"title": issue["title"], "url": issue["html_url"]} for issue in data
|
||||
]
|
||||
|
||||
return issues
|
||||
|
||||
def run(
|
||||
@@ -346,14 +325,11 @@ class GithubListIssuesBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
issues = self.list_issues(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("issue", issue) for issue in issues)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to list issues: {str(e)}"
|
||||
issues = self.list_issues(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("issue", issue) for issue in issues)
|
||||
|
||||
|
||||
class GithubAddLabelBlock(Block):
|
||||
@@ -393,28 +369,10 @@ class GithubAddLabelBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def add_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
|
||||
# Convert the provided GitHub URL to the API URL
|
||||
if "/pull/" in issue_url:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
+ "/labels"
|
||||
)
|
||||
else:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos") + "/labels"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
api = get_api(credentials)
|
||||
data = {"labels": [label]}
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
labels_url = issue_url + "/labels"
|
||||
api.post(labels_url, json=data)
|
||||
return "Label added successfully"
|
||||
|
||||
def run(
|
||||
@@ -424,15 +382,12 @@ class GithubAddLabelBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.add_label(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.label,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to add label: {str(e)}"
|
||||
status = self.add_label(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.label,
|
||||
)
|
||||
yield "status", status
|
||||
|
||||
|
||||
class GithubRemoveLabelBlock(Block):
|
||||
@@ -474,31 +429,9 @@ class GithubRemoveLabelBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def remove_label(credentials: GithubCredentials, issue_url: str, label: str) -> str:
|
||||
# Convert the provided GitHub URL to the API URL
|
||||
if "/pull/" in issue_url:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
+ f"/labels/{label}"
|
||||
)
|
||||
else:
|
||||
api_url = (
|
||||
issue_url.replace("github.com", "api.github.com/repos")
|
||||
+ f"/labels/{label}"
|
||||
)
|
||||
|
||||
# Log the constructed API URL for debugging
|
||||
print(f"Constructed API URL: {api_url}")
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.delete(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
label_url = issue_url + f"/labels/{label}"
|
||||
api.delete(label_url)
|
||||
return "Label removed successfully"
|
||||
|
||||
def run(
|
||||
@@ -508,15 +441,12 @@ class GithubRemoveLabelBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.remove_label(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.label,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to remove label: {str(e)}"
|
||||
status = self.remove_label(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.label,
|
||||
)
|
||||
yield "status", status
|
||||
|
||||
|
||||
class GithubAssignIssueBlock(Block):
|
||||
@@ -564,23 +494,10 @@ class GithubAssignIssueBlock(Block):
|
||||
issue_url: str,
|
||||
assignee: str,
|
||||
) -> str:
|
||||
# Extracting repo path and issue number from the issue URL
|
||||
repo_path, issue_number = issue_url.replace("https://github.com/", "").split(
|
||||
"/issues/"
|
||||
)
|
||||
api_url = (
|
||||
f"https://api.github.com/repos/{repo_path}/issues/{issue_number}/assignees"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
api = get_api(credentials)
|
||||
assignees_url = issue_url + "/assignees"
|
||||
data = {"assignees": [assignee]}
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
api.post(assignees_url, json=data)
|
||||
return "Issue assigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -590,15 +507,12 @@ class GithubAssignIssueBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.assign_issue(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.assignee,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to assign issue: {str(e)}"
|
||||
status = self.assign_issue(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.assignee,
|
||||
)
|
||||
yield "status", status
|
||||
|
||||
|
||||
class GithubUnassignIssueBlock(Block):
|
||||
@@ -646,23 +560,10 @@ class GithubUnassignIssueBlock(Block):
|
||||
issue_url: str,
|
||||
assignee: str,
|
||||
) -> str:
|
||||
# Extracting repo path and issue number from the issue URL
|
||||
repo_path, issue_number = issue_url.replace("https://github.com/", "").split(
|
||||
"/issues/"
|
||||
)
|
||||
api_url = (
|
||||
f"https://api.github.com/repos/{repo_path}/issues/{issue_number}/assignees"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
api = get_api(credentials)
|
||||
assignees_url = issue_url + "/assignees"
|
||||
data = {"assignees": [assignee]}
|
||||
|
||||
response = requests.delete(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
api.delete(assignees_url, json=data)
|
||||
return "Issue unassigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -672,12 +573,9 @@ class GithubUnassignIssueBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.unassign_issue(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.assignee,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to unassign issue: {str(e)}"
|
||||
status = self.unassign_issue(
|
||||
credentials,
|
||||
input_data.issue_url,
|
||||
input_data.assignee,
|
||||
)
|
||||
yield "status", status
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import requests
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -64,20 +64,13 @@ class GithubListPullRequestsBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def list_prs(credentials: GithubCredentials, repo_url: str) -> list[Output.PRItem]:
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/pulls"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
pulls_url = repo_url + "/pulls"
|
||||
response = api.get(pulls_url)
|
||||
data = response.json()
|
||||
pull_requests: list[GithubListPullRequestsBlock.Output.PRItem] = [
|
||||
{"title": pr["title"], "url": pr["html_url"]} for pr in data
|
||||
]
|
||||
|
||||
return pull_requests
|
||||
|
||||
def run(
|
||||
@@ -87,14 +80,11 @@ class GithubListPullRequestsBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
pull_requests = self.list_prs(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("pull_request", pr) for pr in pull_requests)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to list pull requests: {str(e)}"
|
||||
pull_requests = self.list_prs(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("pull_request", pr) for pr in pull_requests)
|
||||
|
||||
|
||||
class GithubMakePullRequestBlock(Block):
|
||||
@@ -113,7 +103,11 @@ class GithubMakePullRequestBlock(Block):
|
||||
placeholder="Enter the pull request body",
|
||||
)
|
||||
head: str = SchemaField(
|
||||
description="The name of the branch where your changes are implemented. For cross-repository pull requests in the same network, namespace head with a user like this: username:branch.",
|
||||
description=(
|
||||
"The name of the branch where your changes are implemented. "
|
||||
"For cross-repository pull requests in the same network, "
|
||||
"namespace head with a user like this: username:branch."
|
||||
),
|
||||
placeholder="Enter the head branch",
|
||||
)
|
||||
base: str = SchemaField(
|
||||
@@ -165,17 +159,10 @@ class GithubMakePullRequestBlock(Block):
|
||||
head: str,
|
||||
base: str,
|
||||
) -> tuple[int, str]:
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/pulls"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
api = get_api(credentials)
|
||||
pulls_url = repo_url + "/pulls"
|
||||
data = {"title": title, "body": body, "head": head, "base": base}
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
response = api.post(pulls_url, json=data)
|
||||
pr_data = response.json()
|
||||
return pr_data["number"], pr_data["html_url"]
|
||||
|
||||
@@ -197,15 +184,8 @@ class GithubMakePullRequestBlock(Block):
|
||||
)
|
||||
yield "number", number
|
||||
yield "url", url
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
if http_err.response.status_code == 422:
|
||||
error_details = http_err.response.json()
|
||||
error_message = error_details.get("message", "Unknown error")
|
||||
else:
|
||||
error_message = str(http_err)
|
||||
yield "error", f"Failed to create pull request: {error_message}"
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to create pull request: {str(e)}"
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GithubReadPullRequestBlock(Block):
|
||||
@@ -260,42 +240,21 @@ class GithubReadPullRequestBlock(Block):
|
||||
|
||||
@staticmethod
|
||||
def read_pr(credentials: GithubCredentials, pr_url: str) -> tuple[str, str, str]:
|
||||
api_url = pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/issues/"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
# Adjust the URL to access the issue endpoint for PR metadata
|
||||
issue_url = pr_url.replace("/pull/", "/issues/")
|
||||
response = api.get(issue_url)
|
||||
data = response.json()
|
||||
title = data.get("title", "No title found")
|
||||
body = data.get("body", "No body content found")
|
||||
author = data.get("user", {}).get("login", "No user found")
|
||||
|
||||
return title, body, author
|
||||
|
||||
@staticmethod
|
||||
def read_pr_changes(credentials: GithubCredentials, pr_url: str) -> str:
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/files"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
files_url = pr_url + "/files"
|
||||
response = api.get(files_url)
|
||||
files = response.json()
|
||||
changes = []
|
||||
for file in files:
|
||||
@@ -303,7 +262,6 @@ class GithubReadPullRequestBlock(Block):
|
||||
patch = file.get("patch")
|
||||
if filename and patch:
|
||||
changes.append(f"File: {filename}\n{patch}")
|
||||
|
||||
return "\n\n".join(changes)
|
||||
|
||||
def run(
|
||||
@@ -313,23 +271,20 @@ class GithubReadPullRequestBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
title, body, author = self.read_pr(
|
||||
title, body, author = self.read_pr(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
)
|
||||
yield "title", title
|
||||
yield "body", body
|
||||
yield "author", author
|
||||
|
||||
if input_data.include_pr_changes:
|
||||
changes = self.read_pr_changes(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
)
|
||||
yield "title", title
|
||||
yield "body", body
|
||||
yield "author", author
|
||||
|
||||
if input_data.include_pr_changes:
|
||||
changes = self.read_pr_changes(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
)
|
||||
yield "changes", changes
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to read pull request: {str(e)}"
|
||||
yield "changes", changes
|
||||
|
||||
|
||||
class GithubAssignPRReviewerBlock(Block):
|
||||
@@ -375,23 +330,10 @@ class GithubAssignPRReviewerBlock(Block):
|
||||
def assign_reviewer(
|
||||
credentials: GithubCredentials, pr_url: str, reviewer: str
|
||||
) -> str:
|
||||
# Convert the PR URL to the appropriate API endpoint
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/requested_reviewers"
|
||||
)
|
||||
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
api = get_api(credentials)
|
||||
reviewers_url = pr_url + "/requested_reviewers"
|
||||
data = {"reviewers": [reviewer]}
|
||||
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
api.post(reviewers_url, json=data)
|
||||
return "Reviewer assigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -408,19 +350,8 @@ class GithubAssignPRReviewerBlock(Block):
|
||||
input_data.reviewer,
|
||||
)
|
||||
yield "status", status
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
if http_err.response.status_code == 422:
|
||||
error_msg = (
|
||||
"Failed to assign reviewer: "
|
||||
f"The reviewer '{input_data.reviewer}' may not have permission "
|
||||
"or the pull request is not in a valid state. "
|
||||
f"Detailed error: {http_err.response.text}"
|
||||
)
|
||||
else:
|
||||
error_msg = f"HTTP error: {http_err} - {http_err.response.text}"
|
||||
yield "error", error_msg
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to assign reviewer: {str(e)}"
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GithubUnassignPRReviewerBlock(Block):
|
||||
@@ -466,21 +397,10 @@ class GithubUnassignPRReviewerBlock(Block):
|
||||
def unassign_reviewer(
|
||||
credentials: GithubCredentials, pr_url: str, reviewer: str
|
||||
) -> str:
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/requested_reviewers"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
api = get_api(credentials)
|
||||
reviewers_url = pr_url + "/requested_reviewers"
|
||||
data = {"reviewers": [reviewer]}
|
||||
|
||||
response = requests.delete(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
api.delete(reviewers_url, json=data)
|
||||
return "Reviewer unassigned successfully"
|
||||
|
||||
def run(
|
||||
@@ -498,7 +418,7 @@ class GithubUnassignPRReviewerBlock(Block):
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to unassign reviewer: {str(e)}"
|
||||
yield "error", str(e)
|
||||
|
||||
|
||||
class GithubListPRReviewersBlock(Block):
|
||||
@@ -557,26 +477,14 @@ class GithubListPRReviewersBlock(Block):
|
||||
def list_reviewers(
|
||||
credentials: GithubCredentials, pr_url: str
|
||||
) -> list[Output.ReviewerItem]:
|
||||
api_url = (
|
||||
pr_url.replace("github.com", "api.github.com/repos").replace(
|
||||
"/pull/", "/pulls/"
|
||||
)
|
||||
+ "/requested_reviewers"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
reviewers_url = pr_url + "/requested_reviewers"
|
||||
response = api.get(reviewers_url)
|
||||
data = response.json()
|
||||
reviewers: list[GithubListPRReviewersBlock.Output.ReviewerItem] = [
|
||||
{"username": reviewer["login"], "url": reviewer["html_url"]}
|
||||
for reviewer in data.get("users", [])
|
||||
]
|
||||
|
||||
return reviewers
|
||||
|
||||
def run(
|
||||
@@ -586,11 +494,8 @@ class GithubListPRReviewersBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
reviewers = self.list_reviewers(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
)
|
||||
yield from (("reviewer", reviewer) for reviewer in reviewers)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to list reviewers: {str(e)}"
|
||||
reviewers = self.list_reviewers(
|
||||
credentials,
|
||||
input_data.pr_url,
|
||||
)
|
||||
yield from (("reviewer", reviewer) for reviewer in reviewers)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import base64
|
||||
|
||||
import requests
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._api import get_api
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
@@ -68,17 +68,11 @@ class GithubListTagsBlock(Block):
|
||||
def list_tags(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.TagItem]:
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/tags"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
tags_url = repo_url + "/tags"
|
||||
response = api.get(tags_url)
|
||||
data = response.json()
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
tags: list[GithubListTagsBlock.Output.TagItem] = [
|
||||
{
|
||||
"name": tag["name"],
|
||||
@@ -86,7 +80,6 @@ class GithubListTagsBlock(Block):
|
||||
}
|
||||
for tag in data
|
||||
]
|
||||
|
||||
return tags
|
||||
|
||||
def run(
|
||||
@@ -96,14 +89,11 @@ class GithubListTagsBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
tags = self.list_tags(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("tag", tag) for tag in tags)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to list tags: {str(e)}"
|
||||
tags = self.list_tags(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("tag", tag) for tag in tags)
|
||||
|
||||
|
||||
class GithubListBranchesBlock(Block):
|
||||
@@ -160,20 +150,18 @@ class GithubListBranchesBlock(Block):
|
||||
def list_branches(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.BranchItem]:
|
||||
api_url = repo_url.replace("github.com", "api.github.com/repos") + "/branches"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
branches_url = repo_url + "/branches"
|
||||
response = api.get(branches_url)
|
||||
data = response.json()
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
branches: list[GithubListBranchesBlock.Output.BranchItem] = [
|
||||
{"name": branch["name"], "url": branch["commit"]["url"]} for branch in data
|
||||
{
|
||||
"name": branch["name"],
|
||||
"url": f"https://github.com/{repo_path}/tree/{branch['name']}",
|
||||
}
|
||||
for branch in data
|
||||
]
|
||||
|
||||
return branches
|
||||
|
||||
def run(
|
||||
@@ -183,14 +171,11 @@ class GithubListBranchesBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
branches = self.list_branches(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("branch", branch) for branch in branches)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to list branches: {str(e)}"
|
||||
branches = self.list_branches(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("branch", branch) for branch in branches)
|
||||
|
||||
|
||||
class GithubListDiscussionsBlock(Block):
|
||||
@@ -252,6 +237,8 @@ class GithubListDiscussionsBlock(Block):
|
||||
def list_discussions(
|
||||
credentials: GithubCredentials, repo_url: str, num_discussions: int
|
||||
) -> list[Output.DiscussionItem]:
|
||||
api = get_api(credentials)
|
||||
# GitHub GraphQL API endpoint is different; we'll use api.post with custom URL
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
owner, repo = repo_path.split("/")
|
||||
query = """
|
||||
@@ -267,24 +254,15 @@ class GithubListDiscussionsBlock(Block):
|
||||
}
|
||||
"""
|
||||
variables = {"owner": owner, "repo": repo, "num": num_discussions}
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
response = api.post(
|
||||
"https://api.github.com/graphql",
|
||||
json={"query": query, "variables": variables},
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
discussions: list[GithubListDiscussionsBlock.Output.DiscussionItem] = [
|
||||
{"title": discussion["title"], "url": discussion["url"]}
|
||||
for discussion in data["data"]["repository"]["discussions"]["nodes"]
|
||||
]
|
||||
|
||||
return discussions
|
||||
|
||||
def run(
|
||||
@@ -294,13 +272,10 @@ class GithubListDiscussionsBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
discussions = self.list_discussions(
|
||||
credentials, input_data.repo_url, input_data.num_discussions
|
||||
)
|
||||
yield from (("discussion", discussion) for discussion in discussions)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to list discussions: {str(e)}"
|
||||
discussions = self.list_discussions(
|
||||
credentials, input_data.repo_url, input_data.num_discussions
|
||||
)
|
||||
yield from (("discussion", discussion) for discussion in discussions)
|
||||
|
||||
|
||||
class GithubListReleasesBlock(Block):
|
||||
@@ -357,21 +332,13 @@ class GithubListReleasesBlock(Block):
|
||||
def list_releases(
|
||||
credentials: GithubCredentials, repo_url: str
|
||||
) -> list[Output.ReleaseItem]:
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/releases"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
releases_url = repo_url + "/releases"
|
||||
response = api.get(releases_url)
|
||||
data = response.json()
|
||||
releases: list[GithubListReleasesBlock.Output.ReleaseItem] = [
|
||||
{"name": release["name"], "url": release["html_url"]} for release in data
|
||||
]
|
||||
|
||||
return releases
|
||||
|
||||
def run(
|
||||
@@ -381,14 +348,11 @@ class GithubListReleasesBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
releases = self.list_releases(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("release", release) for release in releases)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to list releases: {str(e)}"
|
||||
releases = self.list_releases(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
)
|
||||
yield from (("release", release) for release in releases)
|
||||
|
||||
|
||||
class GithubReadFileBlock(Block):
|
||||
@@ -444,16 +408,9 @@ class GithubReadFileBlock(Block):
|
||||
def read_file(
|
||||
credentials: GithubCredentials, repo_url: str, file_path: str, branch: str
|
||||
) -> tuple[str, int]:
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/contents/{file_path}?ref={branch}"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
content_url = repo_url + f"/contents/{file_path}?ref={branch}"
|
||||
response = api.get(content_url)
|
||||
content = response.json()
|
||||
|
||||
if isinstance(content, list):
|
||||
@@ -474,18 +431,15 @@ class GithubReadFileBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
raw_content, size = self.read_file(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.file_path.lstrip("/"),
|
||||
input_data.branch,
|
||||
)
|
||||
yield "raw_content", raw_content
|
||||
yield "text_content", base64.b64decode(raw_content).decode("utf-8")
|
||||
yield "size", size
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to read file: {str(e)}"
|
||||
raw_content, size = self.read_file(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.file_path.lstrip("/"),
|
||||
input_data.branch,
|
||||
)
|
||||
yield "raw_content", raw_content
|
||||
yield "text_content", base64.b64decode(raw_content).decode("utf-8")
|
||||
yield "size", size
|
||||
|
||||
|
||||
class GithubReadFolderBlock(Block):
|
||||
@@ -564,46 +518,33 @@ class GithubReadFolderBlock(Block):
|
||||
def read_folder(
|
||||
credentials: GithubCredentials, repo_url: str, folder_path: str, branch: str
|
||||
) -> tuple[list[Output.FileEntry], list[Output.DirEntry]]:
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/contents/{folder_path}?ref={branch}"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
contents_url = repo_url + f"/contents/{folder_path}?ref={branch}"
|
||||
response = api.get(contents_url)
|
||||
content = response.json()
|
||||
|
||||
if isinstance(content, list):
|
||||
# Multiple entries of different types exist at this path
|
||||
if not (dir := next((d for d in content if d["type"] == "dir"), None)):
|
||||
raise TypeError("Not a folder")
|
||||
content = dir
|
||||
|
||||
if content["type"] != "dir":
|
||||
if not isinstance(content, list):
|
||||
raise TypeError("Not a folder")
|
||||
|
||||
return (
|
||||
[
|
||||
GithubReadFolderBlock.Output.FileEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
size=entry["size"],
|
||||
)
|
||||
for entry in content["entries"]
|
||||
if entry["type"] == "file"
|
||||
],
|
||||
[
|
||||
GithubReadFolderBlock.Output.DirEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
)
|
||||
for entry in content["entries"]
|
||||
if entry["type"] == "dir"
|
||||
],
|
||||
)
|
||||
files = [
|
||||
GithubReadFolderBlock.Output.FileEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
size=entry["size"],
|
||||
)
|
||||
for entry in content
|
||||
if entry["type"] == "file"
|
||||
]
|
||||
dirs = [
|
||||
GithubReadFolderBlock.Output.DirEntry(
|
||||
name=entry["name"],
|
||||
path=entry["path"],
|
||||
)
|
||||
for entry in content
|
||||
if entry["type"] == "dir"
|
||||
]
|
||||
|
||||
return files, dirs
|
||||
|
||||
def run(
|
||||
self,
|
||||
@@ -612,17 +553,14 @@ class GithubReadFolderBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
files, dirs = self.read_folder(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.folder_path.lstrip("/"),
|
||||
input_data.branch,
|
||||
)
|
||||
yield from (("file", file) for file in files)
|
||||
yield from (("dir", dir) for dir in dirs)
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to read folder: {str(e)}"
|
||||
files, dirs = self.read_folder(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.folder_path.lstrip("/"),
|
||||
input_data.branch,
|
||||
)
|
||||
yield from (("file", file) for file in files)
|
||||
yield from (("dir", dir) for dir in dirs)
|
||||
|
||||
|
||||
class GithubMakeBranchBlock(Block):
|
||||
@@ -674,26 +612,16 @@ class GithubMakeBranchBlock(Block):
|
||||
new_branch: str,
|
||||
source_branch: str,
|
||||
) -> str:
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
ref_api_url = (
|
||||
f"https://api.github.com/repos/{repo_path}/git/refs/heads/{source_branch}"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.get(ref_api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
# Get the SHA of the source branch
|
||||
ref_url = repo_url + f"/git/refs/heads/{source_branch}"
|
||||
response = api.get(ref_url)
|
||||
sha = response.json()["object"]["sha"]
|
||||
|
||||
create_branch_api_url = f"https://api.github.com/repos/{repo_path}/git/refs"
|
||||
# Create the new branch
|
||||
create_ref_url = repo_url + "/git/refs"
|
||||
data = {"ref": f"refs/heads/{new_branch}", "sha": sha}
|
||||
|
||||
response = requests.post(create_branch_api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
response = api.post(create_ref_url, json=data)
|
||||
return "Branch created successfully"
|
||||
|
||||
def run(
|
||||
@@ -703,16 +631,13 @@ class GithubMakeBranchBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.create_branch(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.new_branch,
|
||||
input_data.source_branch,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to create branch: {str(e)}"
|
||||
status = self.create_branch(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.new_branch,
|
||||
input_data.source_branch,
|
||||
)
|
||||
yield "status", status
|
||||
|
||||
|
||||
class GithubDeleteBranchBlock(Block):
|
||||
@@ -756,16 +681,9 @@ class GithubDeleteBranchBlock(Block):
|
||||
def delete_branch(
|
||||
credentials: GithubCredentials, repo_url: str, branch: str
|
||||
) -> str:
|
||||
repo_path = repo_url.replace("https://github.com/", "")
|
||||
api_url = f"https://api.github.com/repos/{repo_path}/git/refs/heads/{branch}"
|
||||
headers = {
|
||||
"Authorization": credentials.bearer(),
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
|
||||
response = requests.delete(api_url, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
api = get_api(credentials)
|
||||
ref_url = repo_url + f"/git/refs/heads/{branch}"
|
||||
api.delete(ref_url)
|
||||
return "Branch deleted successfully"
|
||||
|
||||
def run(
|
||||
@@ -775,12 +693,9 @@ class GithubDeleteBranchBlock(Block):
|
||||
credentials: GithubCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
status = self.delete_branch(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.branch,
|
||||
)
|
||||
yield "status", status
|
||||
except Exception as e:
|
||||
yield "error", f"Failed to delete branch: {str(e)}"
|
||||
status = self.delete_branch(
|
||||
credentials,
|
||||
input_data.repo_url,
|
||||
input_data.branch,
|
||||
)
|
||||
yield "status", status
|
||||
|
||||
156
autogpt_platform/backend/backend/blocks/github/triggers.py
Normal file
156
autogpt_platform/backend/backend/blocks/github/triggers.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
BlockWebhookConfig,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
GithubCredentialsField,
|
||||
GithubCredentialsInput,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# --8<-- [start:GithubTriggerExample]
|
||||
class GitHubTriggerBase:
|
||||
class Input(BlockSchema):
|
||||
credentials: GithubCredentialsInput = GithubCredentialsField("repo")
|
||||
repo: str = SchemaField(
|
||||
description=(
|
||||
"Repository to subscribe to.\n\n"
|
||||
"**Note:** Make sure your GitHub credentials have permissions "
|
||||
"to create webhooks on this repo."
|
||||
),
|
||||
placeholder="{owner}/{repo}",
|
||||
)
|
||||
# --8<-- [start:example-payload-field]
|
||||
payload: dict = SchemaField(hidden=True, default={})
|
||||
# --8<-- [end:example-payload-field]
|
||||
|
||||
class Output(BlockSchema):
|
||||
payload: dict = SchemaField(
|
||||
description="The complete webhook payload that was received from GitHub. "
|
||||
"Includes information about the affected resource (e.g. pull request), "
|
||||
"the event, and the user who triggered the event."
|
||||
)
|
||||
triggered_by_user: dict = SchemaField(
|
||||
description="Object representing the GitHub user who triggered the event"
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the payload could not be processed"
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "payload", input_data.payload
|
||||
yield "triggered_by_user", input_data.payload["sender"]
|
||||
|
||||
|
||||
class GithubPullRequestTriggerBlock(GitHubTriggerBase, Block):
|
||||
EXAMPLE_PAYLOAD_FILE = (
|
||||
Path(__file__).parent / "example_payloads" / "pull_request.synchronize.json"
|
||||
)
|
||||
|
||||
# --8<-- [start:example-event-filter]
|
||||
class Input(GitHubTriggerBase.Input):
|
||||
class EventsFilter(BaseModel):
|
||||
"""
|
||||
https://docs.github.com/en/webhooks/webhook-events-and-payloads#pull_request
|
||||
"""
|
||||
|
||||
opened: bool = False
|
||||
edited: bool = False
|
||||
closed: bool = False
|
||||
reopened: bool = False
|
||||
synchronize: bool = False
|
||||
assigned: bool = False
|
||||
unassigned: bool = False
|
||||
labeled: bool = False
|
||||
unlabeled: bool = False
|
||||
converted_to_draft: bool = False
|
||||
locked: bool = False
|
||||
unlocked: bool = False
|
||||
enqueued: bool = False
|
||||
dequeued: bool = False
|
||||
milestoned: bool = False
|
||||
demilestoned: bool = False
|
||||
ready_for_review: bool = False
|
||||
review_requested: bool = False
|
||||
review_request_removed: bool = False
|
||||
auto_merge_enabled: bool = False
|
||||
auto_merge_disabled: bool = False
|
||||
|
||||
events: EventsFilter = SchemaField(
|
||||
title="Events", description="The events to subscribe to"
|
||||
)
|
||||
# --8<-- [end:example-event-filter]
|
||||
|
||||
class Output(GitHubTriggerBase.Output):
|
||||
event: str = SchemaField(
|
||||
description="The PR event that triggered the webhook (e.g. 'opened')"
|
||||
)
|
||||
number: int = SchemaField(description="The number of the affected pull request")
|
||||
pull_request: dict = SchemaField(
|
||||
description="Object representing the affected pull request"
|
||||
)
|
||||
pull_request_url: str = SchemaField(
|
||||
description="The URL of the affected pull request"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
from backend.integrations.webhooks.github import GithubWebhookType
|
||||
|
||||
example_payload = json.loads(self.EXAMPLE_PAYLOAD_FILE.read_text())
|
||||
|
||||
super().__init__(
|
||||
id="6c60ec01-8128-419e-988f-96a063ee2fea",
|
||||
description="This block triggers on pull request events and outputs the event type and payload.",
|
||||
categories={BlockCategory.DEVELOPER_TOOLS, BlockCategory.INPUT},
|
||||
input_schema=GithubPullRequestTriggerBlock.Input,
|
||||
output_schema=GithubPullRequestTriggerBlock.Output,
|
||||
# --8<-- [start:example-webhook_config]
|
||||
webhook_config=BlockWebhookConfig(
|
||||
provider="github",
|
||||
webhook_type=GithubWebhookType.REPO,
|
||||
resource_format="{repo}",
|
||||
event_filter_input="events",
|
||||
event_format="pull_request.{event}",
|
||||
),
|
||||
# --8<-- [end:example-webhook_config]
|
||||
test_input={
|
||||
"repo": "Significant-Gravitas/AutoGPT",
|
||||
"events": {"opened": True, "synchronize": True},
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"payload": example_payload,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("payload", example_payload),
|
||||
("triggered_by_user", example_payload["sender"]),
|
||||
("event", example_payload["action"]),
|
||||
("number", example_payload["number"]),
|
||||
("pull_request", example_payload["pull_request"]),
|
||||
("pull_request_url", example_payload["pull_request"]["html_url"]),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
|
||||
yield from super().run(input_data, **kwargs)
|
||||
yield "event", input_data.payload["action"]
|
||||
yield "number", input_data.payload["number"]
|
||||
yield "pull_request", input_data.payload["pull_request"]
|
||||
yield "pull_request_url", input_data.payload["pull_request"]["html_url"]
|
||||
|
||||
|
||||
# --8<-- [end:GithubTriggerExample]
|
||||
53
autogpt_platform/backend/backend/blocks/google/_auth.py
Normal file
53
autogpt_platform/backend/backend/blocks/google/_auth.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import CredentialsField, CredentialsMetaInput, OAuth2Credentials
|
||||
from backend.util.settings import Secrets
|
||||
|
||||
# --8<-- [start:GoogleOAuthIsConfigured]
|
||||
secrets = Secrets()
|
||||
GOOGLE_OAUTH_IS_CONFIGURED = bool(
|
||||
secrets.google_client_id and secrets.google_client_secret
|
||||
)
|
||||
# --8<-- [end:GoogleOAuthIsConfigured]
|
||||
GoogleCredentials = OAuth2Credentials
|
||||
GoogleCredentialsInput = CredentialsMetaInput[Literal["google"], Literal["oauth2"]]
|
||||
|
||||
|
||||
def GoogleCredentialsField(scopes: list[str]) -> GoogleCredentialsInput:
|
||||
"""
|
||||
Creates a Google credentials input on a block.
|
||||
|
||||
Params:
|
||||
scopes: The authorization scopes needed for the block to work.
|
||||
"""
|
||||
return CredentialsField(
|
||||
provider="google",
|
||||
supported_credential_types={"oauth2"},
|
||||
required_scopes=set(scopes),
|
||||
description="The Google integration requires OAuth2 authentication.",
|
||||
)
|
||||
|
||||
|
||||
TEST_CREDENTIALS = OAuth2Credentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="google",
|
||||
access_token=SecretStr("mock-google-access-token"),
|
||||
refresh_token=SecretStr("mock-google-refresh-token"),
|
||||
access_token_expires_at=1234567890,
|
||||
scopes=[
|
||||
"https://www.googleapis.com/auth/gmail.readonly",
|
||||
"https://www.googleapis.com/auth/gmail.send",
|
||||
],
|
||||
title="Mock Google OAuth2 Credentials",
|
||||
username="mock-google-username",
|
||||
refresh_token_expires_at=1234567890,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
529
autogpt_platform/backend/backend/blocks/google/gmail.py
Normal file
529
autogpt_platform/backend/backend/blocks/google/gmail.py
Normal file
@@ -0,0 +1,529 @@
|
||||
import base64
|
||||
from email.utils import parseaddr
|
||||
from typing import List
|
||||
|
||||
from google.oauth2.credentials import Credentials
|
||||
from googleapiclient.discovery import build
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._auth import (
|
||||
GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
GoogleCredentials,
|
||||
GoogleCredentialsField,
|
||||
GoogleCredentialsInput,
|
||||
)
|
||||
|
||||
|
||||
class Attachment(BaseModel):
|
||||
filename: str
|
||||
content_type: str
|
||||
size: int
|
||||
attachment_id: str
|
||||
|
||||
|
||||
class Email(BaseModel):
|
||||
id: str
|
||||
subject: str
|
||||
snippet: str
|
||||
from_: str
|
||||
to: str
|
||||
date: str
|
||||
body: str = "" # Default to an empty string
|
||||
sizeEstimate: int
|
||||
attachments: List[Attachment]
|
||||
|
||||
|
||||
class GmailReadBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GoogleCredentialsInput = GoogleCredentialsField(
|
||||
["https://www.googleapis.com/auth/gmail.readonly"]
|
||||
)
|
||||
query: str = SchemaField(
|
||||
description="Search query for reading emails",
|
||||
default="is:unread",
|
||||
)
|
||||
max_results: int = SchemaField(
|
||||
description="Maximum number of emails to retrieve",
|
||||
default=10,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
email: Email = SchemaField(
|
||||
description="Email data",
|
||||
)
|
||||
emails: list[Email] = SchemaField(
|
||||
description="List of email data",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if any",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="25310c70-b89b-43ba-b25c-4dfa7e2a481c",
|
||||
description="This block reads emails from Gmail.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
input_schema=GmailReadBlock.Input,
|
||||
output_schema=GmailReadBlock.Output,
|
||||
test_input={
|
||||
"query": "is:unread",
|
||||
"max_results": 5,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"email",
|
||||
{
|
||||
"id": "1",
|
||||
"subject": "Test Email",
|
||||
"snippet": "This is a test email",
|
||||
"from_": "test@example.com",
|
||||
"to": "recipient@example.com",
|
||||
"date": "2024-01-01",
|
||||
"body": "This is a test email",
|
||||
"sizeEstimate": 100,
|
||||
"attachments": [],
|
||||
},
|
||||
),
|
||||
(
|
||||
"emails",
|
||||
[
|
||||
{
|
||||
"id": "1",
|
||||
"subject": "Test Email",
|
||||
"snippet": "This is a test email",
|
||||
"from_": "test@example.com",
|
||||
"to": "recipient@example.com",
|
||||
"date": "2024-01-01",
|
||||
"body": "This is a test email",
|
||||
"sizeEstimate": 100,
|
||||
"attachments": [],
|
||||
}
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_read_emails": lambda *args, **kwargs: [
|
||||
{
|
||||
"id": "1",
|
||||
"subject": "Test Email",
|
||||
"snippet": "This is a test email",
|
||||
"from_": "test@example.com",
|
||||
"to": "recipient@example.com",
|
||||
"date": "2024-01-01",
|
||||
"body": "This is a test email",
|
||||
"sizeEstimate": 100,
|
||||
"attachments": [],
|
||||
}
|
||||
],
|
||||
"_send_email": lambda *args, **kwargs: {"id": "1", "status": "sent"},
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = self._build_service(credentials, **kwargs)
|
||||
messages = self._read_emails(service, input_data.query, input_data.max_results)
|
||||
for email in messages:
|
||||
yield "email", email
|
||||
yield "emails", messages
|
||||
|
||||
@staticmethod
|
||||
def _build_service(credentials: GoogleCredentials, **kwargs):
|
||||
creds = Credentials(
|
||||
token=(
|
||||
credentials.access_token.get_secret_value()
|
||||
if credentials.access_token
|
||||
else None
|
||||
),
|
||||
refresh_token=(
|
||||
credentials.refresh_token.get_secret_value()
|
||||
if credentials.refresh_token
|
||||
else None
|
||||
),
|
||||
token_uri="https://oauth2.googleapis.com/token",
|
||||
client_id=kwargs.get("client_id"),
|
||||
client_secret=kwargs.get("client_secret"),
|
||||
scopes=credentials.scopes,
|
||||
)
|
||||
return build("gmail", "v1", credentials=creds)
|
||||
|
||||
def _read_emails(
|
||||
self, service, query: str | None, max_results: int | None
|
||||
) -> list[Email]:
|
||||
results = (
|
||||
service.users()
|
||||
.messages()
|
||||
.list(userId="me", q=query or "", maxResults=max_results or 10)
|
||||
.execute()
|
||||
)
|
||||
messages = results.get("messages", [])
|
||||
|
||||
email_data = []
|
||||
for message in messages:
|
||||
msg = (
|
||||
service.users()
|
||||
.messages()
|
||||
.get(userId="me", id=message["id"], format="full")
|
||||
.execute()
|
||||
)
|
||||
|
||||
headers = {
|
||||
header["name"].lower(): header["value"]
|
||||
for header in msg["payload"]["headers"]
|
||||
}
|
||||
|
||||
attachments = self._get_attachments(service, msg)
|
||||
|
||||
email = Email(
|
||||
id=msg["id"],
|
||||
subject=headers.get("subject", "No Subject"),
|
||||
snippet=msg["snippet"],
|
||||
from_=parseaddr(headers.get("from", ""))[1],
|
||||
to=parseaddr(headers.get("to", ""))[1],
|
||||
date=headers.get("date", ""),
|
||||
body=self._get_email_body(msg),
|
||||
sizeEstimate=msg["sizeEstimate"],
|
||||
attachments=attachments,
|
||||
)
|
||||
email_data.append(email)
|
||||
|
||||
return email_data
|
||||
|
||||
def _get_email_body(self, msg):
|
||||
if "parts" in msg["payload"]:
|
||||
for part in msg["payload"]["parts"]:
|
||||
if part["mimeType"] == "text/plain":
|
||||
return base64.urlsafe_b64decode(part["body"]["data"]).decode(
|
||||
"utf-8"
|
||||
)
|
||||
elif msg["payload"]["mimeType"] == "text/plain":
|
||||
return base64.urlsafe_b64decode(msg["payload"]["body"]["data"]).decode(
|
||||
"utf-8"
|
||||
)
|
||||
|
||||
return "This email does not contain a text body."
|
||||
|
||||
def _get_attachments(self, service, message):
|
||||
attachments = []
|
||||
if "parts" in message["payload"]:
|
||||
for part in message["payload"]["parts"]:
|
||||
if part["filename"]:
|
||||
attachment = Attachment(
|
||||
filename=part["filename"],
|
||||
content_type=part["mimeType"],
|
||||
size=int(part["body"].get("size", 0)),
|
||||
attachment_id=part["body"]["attachmentId"],
|
||||
)
|
||||
attachments.append(attachment)
|
||||
return attachments
|
||||
|
||||
# Add a new method to download attachment content
|
||||
def download_attachment(self, service, message_id: str, attachment_id: str):
|
||||
attachment = (
|
||||
service.users()
|
||||
.messages()
|
||||
.attachments()
|
||||
.get(userId="me", messageId=message_id, id=attachment_id)
|
||||
.execute()
|
||||
)
|
||||
file_data = base64.urlsafe_b64decode(attachment["data"].encode("UTF-8"))
|
||||
return file_data
|
||||
|
||||
|
||||
class GmailSendBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GoogleCredentialsInput = GoogleCredentialsField(
|
||||
["https://www.googleapis.com/auth/gmail.send"]
|
||||
)
|
||||
to: str = SchemaField(
|
||||
description="Recipient email address",
|
||||
)
|
||||
subject: str = SchemaField(
|
||||
description="Email subject",
|
||||
)
|
||||
body: str = SchemaField(
|
||||
description="Email body",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: dict = SchemaField(
|
||||
description="Send confirmation",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if any",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="6c27abc2-e51d-499e-a85f-5a0041ba94f0",
|
||||
description="This block sends an email using Gmail.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailSendBlock.Input,
|
||||
output_schema=GmailSendBlock.Output,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
test_input={
|
||||
"to": "recipient@example.com",
|
||||
"subject": "Test Email",
|
||||
"body": "This is a test email sent from GmailSendBlock.",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
("result", {"id": "1", "status": "sent"}),
|
||||
],
|
||||
test_mock={
|
||||
"_send_email": lambda *args, **kwargs: {"id": "1", "status": "sent"},
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GmailReadBlock._build_service(credentials, **kwargs)
|
||||
send_result = self._send_email(
|
||||
service, input_data.to, input_data.subject, input_data.body
|
||||
)
|
||||
yield "result", send_result
|
||||
|
||||
def _send_email(self, service, to: str, subject: str, body: str) -> dict:
|
||||
if not to or not subject or not body:
|
||||
raise ValueError("To, subject, and body are required for sending an email")
|
||||
message = self._create_message(to, subject, body)
|
||||
sent_message = (
|
||||
service.users().messages().send(userId="me", body=message).execute()
|
||||
)
|
||||
return {"id": sent_message["id"], "status": "sent"}
|
||||
|
||||
def _create_message(self, to: str, subject: str, body: str) -> dict:
|
||||
import base64
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
message = MIMEText(body)
|
||||
message["to"] = to
|
||||
message["subject"] = subject
|
||||
raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8")
|
||||
return {"raw": raw_message}
|
||||
|
||||
|
||||
class GmailListLabelsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GoogleCredentialsInput = GoogleCredentialsField(
|
||||
["https://www.googleapis.com/auth/gmail.labels"]
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: list[dict] = SchemaField(
|
||||
description="List of labels",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if any",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3e1c2c1c-c689-4520-b956-1f3bf4e02bb7",
|
||||
description="This block lists all labels in Gmail.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailListLabelsBlock.Input,
|
||||
output_schema=GmailListLabelsBlock.Output,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
[
|
||||
{"id": "Label_1", "name": "Important"},
|
||||
{"id": "Label_2", "name": "Work"},
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_list_labels": lambda *args, **kwargs: [
|
||||
{"id": "Label_1", "name": "Important"},
|
||||
{"id": "Label_2", "name": "Work"},
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GmailReadBlock._build_service(credentials, **kwargs)
|
||||
labels = self._list_labels(service)
|
||||
yield "result", labels
|
||||
|
||||
def _list_labels(self, service) -> list[dict]:
|
||||
results = service.users().labels().list(userId="me").execute()
|
||||
labels = results.get("labels", [])
|
||||
return [{"id": label["id"], "name": label["name"]} for label in labels]
|
||||
|
||||
|
||||
class GmailAddLabelBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GoogleCredentialsInput = GoogleCredentialsField(
|
||||
["https://www.googleapis.com/auth/gmail.modify"]
|
||||
)
|
||||
message_id: str = SchemaField(
|
||||
description="Message ID to add label to",
|
||||
)
|
||||
label_name: str = SchemaField(
|
||||
description="Label name to add",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: dict = SchemaField(
|
||||
description="Label addition result",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if any",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="f884b2fb-04f4-4265-9658-14f433926ac9",
|
||||
description="This block adds a label to a Gmail message.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailAddLabelBlock.Input,
|
||||
output_schema=GmailAddLabelBlock.Output,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
test_input={
|
||||
"message_id": "12345",
|
||||
"label_name": "Important",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
{"status": "Label added successfully", "label_id": "Label_1"},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_add_label": lambda *args, **kwargs: {
|
||||
"status": "Label added successfully",
|
||||
"label_id": "Label_1",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GmailReadBlock._build_service(credentials, **kwargs)
|
||||
result = self._add_label(service, input_data.message_id, input_data.label_name)
|
||||
yield "result", result
|
||||
|
||||
def _add_label(self, service, message_id: str, label_name: str) -> dict:
|
||||
label_id = self._get_or_create_label(service, label_name)
|
||||
service.users().messages().modify(
|
||||
userId="me", id=message_id, body={"addLabelIds": [label_id]}
|
||||
).execute()
|
||||
return {"status": "Label added successfully", "label_id": label_id}
|
||||
|
||||
def _get_or_create_label(self, service, label_name: str) -> str:
|
||||
label_id = self._get_label_id(service, label_name)
|
||||
if not label_id:
|
||||
label = (
|
||||
service.users()
|
||||
.labels()
|
||||
.create(userId="me", body={"name": label_name})
|
||||
.execute()
|
||||
)
|
||||
label_id = label["id"]
|
||||
return label_id
|
||||
|
||||
def _get_label_id(self, service, label_name: str) -> str | None:
|
||||
results = service.users().labels().list(userId="me").execute()
|
||||
labels = results.get("labels", [])
|
||||
for label in labels:
|
||||
if label["name"] == label_name:
|
||||
return label["id"]
|
||||
return None
|
||||
|
||||
|
||||
class GmailRemoveLabelBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GoogleCredentialsInput = GoogleCredentialsField(
|
||||
["https://www.googleapis.com/auth/gmail.modify"]
|
||||
)
|
||||
message_id: str = SchemaField(
|
||||
description="Message ID to remove label from",
|
||||
)
|
||||
label_name: str = SchemaField(
|
||||
description="Label name to remove",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: dict = SchemaField(
|
||||
description="Label removal result",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if any",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="0afc0526-aba1-4b2b-888e-a22b7c3f359d",
|
||||
description="This block removes a label from a Gmail message.",
|
||||
categories={BlockCategory.COMMUNICATION},
|
||||
input_schema=GmailRemoveLabelBlock.Input,
|
||||
output_schema=GmailRemoveLabelBlock.Output,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
test_input={
|
||||
"message_id": "12345",
|
||||
"label_name": "Important",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
{"status": "Label removed successfully", "label_id": "Label_1"},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_remove_label": lambda *args, **kwargs: {
|
||||
"status": "Label removed successfully",
|
||||
"label_id": "Label_1",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GmailReadBlock._build_service(credentials, **kwargs)
|
||||
result = self._remove_label(
|
||||
service, input_data.message_id, input_data.label_name
|
||||
)
|
||||
yield "result", result
|
||||
|
||||
def _remove_label(self, service, message_id: str, label_name: str) -> dict:
|
||||
label_id = self._get_label_id(service, label_name)
|
||||
if label_id:
|
||||
service.users().messages().modify(
|
||||
userId="me", id=message_id, body={"removeLabelIds": [label_id]}
|
||||
).execute()
|
||||
return {"status": "Label removed successfully", "label_id": label_id}
|
||||
else:
|
||||
return {"status": "Label not found", "label_name": label_name}
|
||||
|
||||
def _get_label_id(self, service, label_name: str) -> str | None:
|
||||
results = service.users().labels().list(userId="me").execute()
|
||||
labels = results.get("labels", [])
|
||||
for label in labels:
|
||||
if label["name"] == label_name:
|
||||
return label["id"]
|
||||
return None
|
||||
184
autogpt_platform/backend/backend/blocks/google/sheets.py
Normal file
184
autogpt_platform/backend/backend/blocks/google/sheets.py
Normal file
@@ -0,0 +1,184 @@
|
||||
from google.oauth2.credentials import Credentials
|
||||
from googleapiclient.discovery import build
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
from ._auth import (
|
||||
GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
GoogleCredentials,
|
||||
GoogleCredentialsField,
|
||||
GoogleCredentialsInput,
|
||||
)
|
||||
|
||||
|
||||
class GoogleSheetsReadBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GoogleCredentialsInput = GoogleCredentialsField(
|
||||
["https://www.googleapis.com/auth/spreadsheets.readonly"]
|
||||
)
|
||||
spreadsheet_id: str = SchemaField(
|
||||
description="The ID of the spreadsheet to read from",
|
||||
)
|
||||
range: str = SchemaField(
|
||||
description="The A1 notation of the range to read",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: list[list[str]] = SchemaField(
|
||||
description="The data read from the spreadsheet",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if any",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5724e902-3635-47e9-a108-aaa0263a4988",
|
||||
description="This block reads data from a Google Sheets spreadsheet.",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=GoogleSheetsReadBlock.Input,
|
||||
output_schema=GoogleSheetsReadBlock.Output,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
test_input={
|
||||
"spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
|
||||
"range": "Sheet1!A1:B2",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
[
|
||||
["Name", "Score"],
|
||||
["Alice", "85"],
|
||||
],
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_read_sheet": lambda *args, **kwargs: [
|
||||
["Name", "Score"],
|
||||
["Alice", "85"],
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = self._build_service(credentials, **kwargs)
|
||||
data = self._read_sheet(service, input_data.spreadsheet_id, input_data.range)
|
||||
yield "result", data
|
||||
|
||||
@staticmethod
|
||||
def _build_service(credentials: GoogleCredentials, **kwargs):
|
||||
creds = Credentials(
|
||||
token=(
|
||||
credentials.access_token.get_secret_value()
|
||||
if credentials.access_token
|
||||
else None
|
||||
),
|
||||
refresh_token=(
|
||||
credentials.refresh_token.get_secret_value()
|
||||
if credentials.refresh_token
|
||||
else None
|
||||
),
|
||||
token_uri="https://oauth2.googleapis.com/token",
|
||||
client_id=kwargs.get("client_id"),
|
||||
client_secret=kwargs.get("client_secret"),
|
||||
scopes=credentials.scopes,
|
||||
)
|
||||
return build("sheets", "v4", credentials=creds)
|
||||
|
||||
def _read_sheet(self, service, spreadsheet_id: str, range: str) -> list[list[str]]:
|
||||
sheet = service.spreadsheets()
|
||||
result = sheet.values().get(spreadsheetId=spreadsheet_id, range=range).execute()
|
||||
return result.get("values", [])
|
||||
|
||||
|
||||
class GoogleSheetsWriteBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: GoogleCredentialsInput = GoogleCredentialsField(
|
||||
["https://www.googleapis.com/auth/spreadsheets"]
|
||||
)
|
||||
spreadsheet_id: str = SchemaField(
|
||||
description="The ID of the spreadsheet to write to",
|
||||
)
|
||||
range: str = SchemaField(
|
||||
description="The A1 notation of the range to write",
|
||||
)
|
||||
values: list[list[str]] = SchemaField(
|
||||
description="The data to write to the spreadsheet",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: dict = SchemaField(
|
||||
description="The result of the write operation",
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if any",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="d9291e87-301d-47a8-91fe-907fb55460e5",
|
||||
description="This block writes data to a Google Sheets spreadsheet.",
|
||||
categories={BlockCategory.DATA},
|
||||
input_schema=GoogleSheetsWriteBlock.Input,
|
||||
output_schema=GoogleSheetsWriteBlock.Output,
|
||||
disabled=not GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
test_input={
|
||||
"spreadsheet_id": "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms",
|
||||
"range": "Sheet1!A1:B2",
|
||||
"values": [
|
||||
["Name", "Score"],
|
||||
["Bob", "90"],
|
||||
],
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
{"updatedCells": 4, "updatedColumns": 2, "updatedRows": 2},
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"_write_sheet": lambda *args, **kwargs: {
|
||||
"updatedCells": 4,
|
||||
"updatedColumns": 2,
|
||||
"updatedRows": 2,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: GoogleCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
service = GoogleSheetsReadBlock._build_service(credentials, **kwargs)
|
||||
result = self._write_sheet(
|
||||
service,
|
||||
input_data.spreadsheet_id,
|
||||
input_data.range,
|
||||
input_data.values,
|
||||
)
|
||||
yield "result", result
|
||||
|
||||
def _write_sheet(
|
||||
self, service, spreadsheet_id: str, range: str, values: list[list[str]]
|
||||
) -> dict:
|
||||
body = {"values": values}
|
||||
result = (
|
||||
service.spreadsheets()
|
||||
.values()
|
||||
.update(
|
||||
spreadsheetId=spreadsheet_id,
|
||||
range=range,
|
||||
valueInputOption="USER_ENTERED",
|
||||
body=body,
|
||||
)
|
||||
.execute()
|
||||
)
|
||||
return result
|
||||
@@ -1,8 +1,29 @@
|
||||
from typing import Literal
|
||||
|
||||
import googlemaps
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="google_maps",
|
||||
api_key=SecretStr("mock-google-maps-api-key"),
|
||||
title="Mock Google Maps API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class Place(BaseModel):
|
||||
@@ -16,8 +37,11 @@ class Place(BaseModel):
|
||||
|
||||
class GoogleMapsSearchBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
api_key: BlockSecret = SecretField(
|
||||
key="google_maps_api_key",
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal["google_maps"], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
provider="google_maps",
|
||||
supported_credential_types={"api_key"},
|
||||
description="Google Maps API Key",
|
||||
)
|
||||
query: str = SchemaField(
|
||||
@@ -49,7 +73,7 @@ class GoogleMapsSearchBlock(Block):
|
||||
input_schema=GoogleMapsSearchBlock.Input,
|
||||
output_schema=GoogleMapsSearchBlock.Output,
|
||||
test_input={
|
||||
"api_key": "your_test_api_key",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"query": "restaurants in new york",
|
||||
"radius": 5000,
|
||||
"max_results": 5,
|
||||
@@ -79,23 +103,23 @@ class GoogleMapsSearchBlock(Block):
|
||||
}
|
||||
]
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
places = self.search_places(
|
||||
input_data.api_key.get_secret_value(),
|
||||
input_data.query,
|
||||
input_data.radius,
|
||||
input_data.max_results,
|
||||
)
|
||||
for place in places:
|
||||
yield "place", place
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
places = self.search_places(
|
||||
credentials.api_key,
|
||||
input_data.query,
|
||||
input_data.radius,
|
||||
input_data.max_results,
|
||||
)
|
||||
for place in places:
|
||||
yield "place", place
|
||||
|
||||
def search_places(self, api_key, query, radius, max_results):
|
||||
client = googlemaps.Client(key=api_key)
|
||||
def search_places(self, api_key: SecretStr, query, radius, max_results):
|
||||
client = googlemaps.Client(key=api_key.get_secret_value())
|
||||
return self._search_places(client, query, radius, max_results)
|
||||
|
||||
def _search_places(self, client, query, radius, max_results):
|
||||
|
||||
14
autogpt_platform/backend/backend/blocks/helpers/http.py
Normal file
14
autogpt_platform/backend/backend/blocks/helpers/http.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class GetRequest:
|
||||
@classmethod
|
||||
def get_request(
|
||||
cls, url: str, headers: Optional[dict] = None, json: bool = False
|
||||
) -> Any:
|
||||
if headers is None:
|
||||
headers = {}
|
||||
response = requests.get(url, headers=headers)
|
||||
return response.json() if json else response.text
|
||||
@@ -1,9 +1,10 @@
|
||||
import json
|
||||
from enum import Enum
|
||||
|
||||
import requests
|
||||
from typing import Any
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class HttpMethod(Enum):
|
||||
@@ -18,15 +19,32 @@ class HttpMethod(Enum):
|
||||
|
||||
class SendWebRequestBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
url: str
|
||||
method: HttpMethod = HttpMethod.POST
|
||||
headers: dict[str, str] = {}
|
||||
body: object = {}
|
||||
url: str = SchemaField(
|
||||
description="The URL to send the request to",
|
||||
placeholder="https://api.example.com",
|
||||
)
|
||||
method: HttpMethod = SchemaField(
|
||||
description="The HTTP method to use for the request",
|
||||
default=HttpMethod.POST,
|
||||
)
|
||||
headers: dict[str, str] = SchemaField(
|
||||
description="The headers to include in the request",
|
||||
default={},
|
||||
)
|
||||
json_format: bool = SchemaField(
|
||||
title="JSON format",
|
||||
description="Whether to send and receive body as JSON",
|
||||
default=True,
|
||||
)
|
||||
body: Any = SchemaField(
|
||||
description="The body of the request",
|
||||
default=None,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: object
|
||||
client_error: object
|
||||
server_error: object
|
||||
response: object = SchemaField(description="The response from the server")
|
||||
client_error: object = SchemaField(description="The error on 4xx status codes")
|
||||
server_error: object = SchemaField(description="The error on 5xx status codes")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -45,13 +63,16 @@ class SendWebRequestBlock(Block):
|
||||
input_data.method.value,
|
||||
input_data.url,
|
||||
headers=input_data.headers,
|
||||
json=input_data.body,
|
||||
json=input_data.body if input_data.json_format else None,
|
||||
data=input_data.body if not input_data.json_format else None,
|
||||
)
|
||||
result = response.json() if input_data.json_format else response.text
|
||||
|
||||
if response.status_code // 100 == 2:
|
||||
yield "response", response.json()
|
||||
yield "response", result
|
||||
elif response.status_code // 100 == 4:
|
||||
yield "client_error", response.json()
|
||||
yield "client_error", result
|
||||
elif response.status_code // 100 == 5:
|
||||
yield "server_error", response.json()
|
||||
yield "server_error", result
|
||||
else:
|
||||
raise ValueError(f"Unexpected status code: {response.status_code}")
|
||||
|
||||
36
autogpt_platform/backend/backend/blocks/hubspot/_auth.py
Normal file
36
autogpt_platform/backend/backend/blocks/hubspot/_auth.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
|
||||
HubSpotCredentials = APIKeyCredentials
|
||||
HubSpotCredentialsInput = CredentialsMetaInput[
|
||||
Literal["hubspot"],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
|
||||
def HubSpotCredentialsField() -> HubSpotCredentialsInput:
|
||||
"""Creates a HubSpot credentials input on a block."""
|
||||
return CredentialsField(
|
||||
provider="hubspot",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The HubSpot integration requires an API Key.",
|
||||
)
|
||||
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="hubspot",
|
||||
api_key=SecretStr("mock-hubspot-api-key"),
|
||||
title="Mock HubSpot API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
106
autogpt_platform/backend/backend/blocks/hubspot/company.py
Normal file
106
autogpt_platform/backend/backend/blocks/hubspot/company.py
Normal file
@@ -0,0 +1,106 @@
|
||||
from backend.blocks.hubspot._auth import (
|
||||
HubSpotCredentials,
|
||||
HubSpotCredentialsField,
|
||||
HubSpotCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class HubSpotCompanyBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
|
||||
operation: str = SchemaField(
|
||||
description="Operation to perform (create, update, get)", default="get"
|
||||
)
|
||||
company_data: dict = SchemaField(
|
||||
description="Company data for create/update operations", default={}
|
||||
)
|
||||
domain: str = SchemaField(
|
||||
description="Company domain for get/update operations", default=""
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
company: dict = SchemaField(description="Company information")
|
||||
status: str = SchemaField(description="Operation status")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="3ae02219-d540-47cd-9c78-3ad6c7d9820a",
|
||||
description="Manages HubSpot companies - create, update, and retrieve company information",
|
||||
categories={BlockCategory.CRM},
|
||||
input_schema=HubSpotCompanyBlock.Input,
|
||||
output_schema=HubSpotCompanyBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
if input_data.operation == "create":
|
||||
response = requests.post(
|
||||
base_url, headers=headers, json={"properties": input_data.company_data}
|
||||
)
|
||||
result = response.json()
|
||||
yield "company", result
|
||||
yield "status", "created"
|
||||
|
||||
elif input_data.operation == "get":
|
||||
search_url = f"{base_url}/search"
|
||||
search_data = {
|
||||
"filterGroups": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"propertyName": "domain",
|
||||
"operator": "EQ",
|
||||
"value": input_data.domain,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
response = requests.post(search_url, headers=headers, json=search_data)
|
||||
result = response.json()
|
||||
yield "company", result.get("results", [{}])[0]
|
||||
yield "status", "retrieved"
|
||||
|
||||
elif input_data.operation == "update":
|
||||
# First get company ID by domain
|
||||
search_response = requests.post(
|
||||
f"{base_url}/search",
|
||||
headers=headers,
|
||||
json={
|
||||
"filterGroups": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"propertyName": "domain",
|
||||
"operator": "EQ",
|
||||
"value": input_data.domain,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
company_id = search_response.json().get("results", [{}])[0].get("id")
|
||||
|
||||
if company_id:
|
||||
response = requests.patch(
|
||||
f"{base_url}/{company_id}",
|
||||
headers=headers,
|
||||
json={"properties": input_data.company_data},
|
||||
)
|
||||
result = response.json()
|
||||
yield "company", result
|
||||
yield "status", "updated"
|
||||
else:
|
||||
yield "company", {}
|
||||
yield "status", "company_not_found"
|
||||
106
autogpt_platform/backend/backend/blocks/hubspot/contact.py
Normal file
106
autogpt_platform/backend/backend/blocks/hubspot/contact.py
Normal file
@@ -0,0 +1,106 @@
|
||||
from backend.blocks.hubspot._auth import (
|
||||
HubSpotCredentials,
|
||||
HubSpotCredentialsField,
|
||||
HubSpotCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class HubSpotContactBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
|
||||
operation: str = SchemaField(
|
||||
description="Operation to perform (create, update, get)", default="get"
|
||||
)
|
||||
contact_data: dict = SchemaField(
|
||||
description="Contact data for create/update operations", default={}
|
||||
)
|
||||
email: str = SchemaField(
|
||||
description="Email address for get/update operations", default=""
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
contact: dict = SchemaField(description="Contact information")
|
||||
status: str = SchemaField(description="Operation status")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="5267326e-c4c1-4016-9f54-4e72ad02f813",
|
||||
description="Manages HubSpot contacts - create, update, and retrieve contact information",
|
||||
categories={BlockCategory.CRM},
|
||||
input_schema=HubSpotContactBlock.Input,
|
||||
output_schema=HubSpotContactBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
base_url = "https://api.hubapi.com/crm/v3/objects/contacts"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
if input_data.operation == "create":
|
||||
response = requests.post(
|
||||
base_url, headers=headers, json={"properties": input_data.contact_data}
|
||||
)
|
||||
result = response.json()
|
||||
yield "contact", result
|
||||
yield "status", "created"
|
||||
|
||||
elif input_data.operation == "get":
|
||||
# Search for contact by email
|
||||
search_url = f"{base_url}/search"
|
||||
search_data = {
|
||||
"filterGroups": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"propertyName": "email",
|
||||
"operator": "EQ",
|
||||
"value": input_data.email,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
response = requests.post(search_url, headers=headers, json=search_data)
|
||||
result = response.json()
|
||||
yield "contact", result.get("results", [{}])[0]
|
||||
yield "status", "retrieved"
|
||||
|
||||
elif input_data.operation == "update":
|
||||
search_response = requests.post(
|
||||
f"{base_url}/search",
|
||||
headers=headers,
|
||||
json={
|
||||
"filterGroups": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"propertyName": "email",
|
||||
"operator": "EQ",
|
||||
"value": input_data.email,
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
)
|
||||
contact_id = search_response.json().get("results", [{}])[0].get("id")
|
||||
|
||||
if contact_id:
|
||||
response = requests.patch(
|
||||
f"{base_url}/{contact_id}",
|
||||
headers=headers,
|
||||
json={"properties": input_data.contact_data},
|
||||
)
|
||||
result = response.json()
|
||||
yield "contact", result
|
||||
yield "status", "updated"
|
||||
else:
|
||||
yield "contact", {}
|
||||
yield "status", "contact_not_found"
|
||||
121
autogpt_platform/backend/backend/blocks/hubspot/engagement.py
Normal file
121
autogpt_platform/backend/backend/blocks/hubspot/engagement.py
Normal file
@@ -0,0 +1,121 @@
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from backend.blocks.hubspot._auth import (
|
||||
HubSpotCredentials,
|
||||
HubSpotCredentialsField,
|
||||
HubSpotCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class HubSpotEngagementBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
|
||||
operation: str = SchemaField(
|
||||
description="Operation to perform (send_email, track_engagement)",
|
||||
default="send_email",
|
||||
)
|
||||
email_data: dict = SchemaField(
|
||||
description="Email data including recipient, subject, content",
|
||||
default={},
|
||||
)
|
||||
contact_id: str = SchemaField(
|
||||
description="Contact ID for engagement tracking", default=""
|
||||
)
|
||||
timeframe_days: int = SchemaField(
|
||||
description="Number of days to look back for engagement",
|
||||
default=30,
|
||||
optional=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: dict = SchemaField(description="Operation result")
|
||||
status: str = SchemaField(description="Operation status")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="c6524385-7d87-49d6-a470-248bd29ca765",
|
||||
description="Manages HubSpot engagements - sends emails and tracks engagement metrics",
|
||||
categories={BlockCategory.CRM, BlockCategory.COMMUNICATION},
|
||||
input_schema=HubSpotEngagementBlock.Input,
|
||||
output_schema=HubSpotEngagementBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
base_url = "https://api.hubapi.com"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
if input_data.operation == "send_email":
|
||||
# Using the email send API
|
||||
email_url = f"{base_url}/crm/v3/objects/emails"
|
||||
email_data = {
|
||||
"properties": {
|
||||
"hs_timestamp": datetime.now().isoformat(),
|
||||
"hubspot_owner_id": "1", # This should be configurable
|
||||
"hs_email_direction": "OUTBOUND",
|
||||
"hs_email_status": "SEND",
|
||||
"hs_email_subject": input_data.email_data.get("subject"),
|
||||
"hs_email_text": input_data.email_data.get("content"),
|
||||
"hs_email_to_email": input_data.email_data.get("recipient"),
|
||||
}
|
||||
}
|
||||
|
||||
response = requests.post(email_url, headers=headers, json=email_data)
|
||||
result = response.json()
|
||||
yield "result", result
|
||||
yield "status", "email_sent"
|
||||
|
||||
elif input_data.operation == "track_engagement":
|
||||
# Get engagement events for the contact
|
||||
from_date = datetime.now() - timedelta(days=input_data.timeframe_days)
|
||||
engagement_url = (
|
||||
f"{base_url}/crm/v3/objects/contacts/{input_data.contact_id}/engagement"
|
||||
)
|
||||
|
||||
params = {"limit": 100, "after": from_date.isoformat()}
|
||||
|
||||
response = requests.get(engagement_url, headers=headers, params=params)
|
||||
engagements = response.json()
|
||||
|
||||
# Process engagement metrics
|
||||
metrics = {
|
||||
"email_opens": 0,
|
||||
"email_clicks": 0,
|
||||
"email_replies": 0,
|
||||
"last_engagement": None,
|
||||
"engagement_score": 0,
|
||||
}
|
||||
|
||||
for engagement in engagements.get("results", []):
|
||||
eng_type = engagement.get("properties", {}).get("hs_engagement_type")
|
||||
if eng_type == "EMAIL":
|
||||
metrics["email_opens"] += 1
|
||||
elif eng_type == "EMAIL_CLICK":
|
||||
metrics["email_clicks"] += 1
|
||||
elif eng_type == "EMAIL_REPLY":
|
||||
metrics["email_replies"] += 1
|
||||
|
||||
# Update last engagement time
|
||||
eng_time = engagement.get("properties", {}).get("hs_timestamp")
|
||||
if eng_time and (
|
||||
not metrics["last_engagement"]
|
||||
or eng_time > metrics["last_engagement"]
|
||||
):
|
||||
metrics["last_engagement"] = eng_time
|
||||
|
||||
# Calculate simple engagement score
|
||||
metrics["engagement_score"] = (
|
||||
metrics["email_opens"]
|
||||
+ metrics["email_clicks"] * 2
|
||||
+ metrics["email_replies"] * 3
|
||||
)
|
||||
|
||||
yield "result", metrics
|
||||
yield "status", "engagement_tracked"
|
||||
281
autogpt_platform/backend/backend/blocks/ideogram.py
Normal file
281
autogpt_platform/backend/backend/blocks/ideogram.py
Normal file
@@ -0,0 +1,281 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Literal, Optional
|
||||
|
||||
from pydantic import SecretStr
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="ideogram",
|
||||
api_key=SecretStr("mock-ideogram-api-key"),
|
||||
title="Mock Ideogram API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class IdeogramModelName(str, Enum):
|
||||
V2 = "V_2"
|
||||
V1 = "V_1"
|
||||
V1_TURBO = "V_1_TURBO"
|
||||
V2_TURBO = "V_2_TURBO"
|
||||
|
||||
|
||||
class MagicPromptOption(str, Enum):
|
||||
AUTO = "AUTO"
|
||||
ON = "ON"
|
||||
OFF = "OFF"
|
||||
|
||||
|
||||
class StyleType(str, Enum):
|
||||
AUTO = "AUTO"
|
||||
GENERAL = "GENERAL"
|
||||
REALISTIC = "REALISTIC"
|
||||
DESIGN = "DESIGN"
|
||||
RENDER_3D = "RENDER_3D"
|
||||
ANIME = "ANIME"
|
||||
|
||||
|
||||
class ColorPalettePreset(str, Enum):
|
||||
NONE = "NONE"
|
||||
EMBER = "EMBER"
|
||||
FRESH = "FRESH"
|
||||
JUNGLE = "JUNGLE"
|
||||
MAGIC = "MAGIC"
|
||||
MELON = "MELON"
|
||||
MOSAIC = "MOSAIC"
|
||||
PASTEL = "PASTEL"
|
||||
ULTRAMARINE = "ULTRAMARINE"
|
||||
|
||||
|
||||
class AspectRatio(str, Enum):
|
||||
ASPECT_10_16 = "ASPECT_10_16"
|
||||
ASPECT_16_10 = "ASPECT_16_10"
|
||||
ASPECT_9_16 = "ASPECT_9_16"
|
||||
ASPECT_16_9 = "ASPECT_16_9"
|
||||
ASPECT_3_2 = "ASPECT_3_2"
|
||||
ASPECT_2_3 = "ASPECT_2_3"
|
||||
ASPECT_4_3 = "ASPECT_4_3"
|
||||
ASPECT_3_4 = "ASPECT_3_4"
|
||||
ASPECT_1_1 = "ASPECT_1_1"
|
||||
ASPECT_1_3 = "ASPECT_1_3"
|
||||
ASPECT_3_1 = "ASPECT_3_1"
|
||||
|
||||
|
||||
class UpscaleOption(str, Enum):
|
||||
AI_UPSCALE = "AI Upscale"
|
||||
NO_UPSCALE = "No Upscale"
|
||||
|
||||
|
||||
class IdeogramModelBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
|
||||
credentials: CredentialsMetaInput[Literal["ideogram"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="ideogram",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Ideogram integration can be used with any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="Text prompt for image generation",
|
||||
placeholder="e.g., 'A futuristic cityscape at sunset'",
|
||||
title="Prompt",
|
||||
)
|
||||
ideogram_model_name: IdeogramModelName = SchemaField(
|
||||
description="The name of the Image Generation Model, e.g., V_2",
|
||||
default=IdeogramModelName.V2,
|
||||
title="Image Generation Model",
|
||||
advanced=False,
|
||||
)
|
||||
aspect_ratio: AspectRatio = SchemaField(
|
||||
description="Aspect ratio for the generated image",
|
||||
default=AspectRatio.ASPECT_1_1,
|
||||
title="Aspect Ratio",
|
||||
advanced=False,
|
||||
)
|
||||
upscale: UpscaleOption = SchemaField(
|
||||
description="Upscale the generated image",
|
||||
default=UpscaleOption.NO_UPSCALE,
|
||||
title="Upscale Image",
|
||||
advanced=False,
|
||||
)
|
||||
magic_prompt_option: MagicPromptOption = SchemaField(
|
||||
description="Whether to use MagicPrompt for enhancing the request",
|
||||
default=MagicPromptOption.AUTO,
|
||||
title="Magic Prompt Option",
|
||||
advanced=True,
|
||||
)
|
||||
seed: Optional[int] = SchemaField(
|
||||
description="Random seed. Set for reproducible generation",
|
||||
default=None,
|
||||
title="Seed",
|
||||
advanced=True,
|
||||
)
|
||||
style_type: StyleType = SchemaField(
|
||||
description="Style type to apply, applicable for V_2 and above",
|
||||
default=StyleType.AUTO,
|
||||
title="Style Type",
|
||||
advanced=True,
|
||||
)
|
||||
negative_prompt: Optional[str] = SchemaField(
|
||||
description="Description of what to exclude from the image",
|
||||
default=None,
|
||||
title="Negative Prompt",
|
||||
advanced=True,
|
||||
)
|
||||
color_palette_name: ColorPalettePreset = SchemaField(
|
||||
description="Color palette preset name, choose 'None' to skip",
|
||||
default=ColorPalettePreset.NONE,
|
||||
title="Color Palette Preset",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
result: str = SchemaField(description="Generated image URL")
|
||||
error: str = SchemaField(description="Error message if the model run failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="6ab085e2-20b3-4055-bc3e-08036e01eca6",
|
||||
description="This block runs Ideogram models with both simple and advanced settings.",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=IdeogramModelBlock.Input,
|
||||
output_schema=IdeogramModelBlock.Output,
|
||||
test_input={
|
||||
"ideogram_model_name": IdeogramModelName.V2,
|
||||
"prompt": "A futuristic cityscape at sunset",
|
||||
"aspect_ratio": AspectRatio.ASPECT_1_1,
|
||||
"upscale": UpscaleOption.NO_UPSCALE,
|
||||
"magic_prompt_option": MagicPromptOption.AUTO,
|
||||
"seed": None,
|
||||
"style_type": StyleType.AUTO,
|
||||
"negative_prompt": None,
|
||||
"color_palette_name": ColorPalettePreset.NONE,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
(
|
||||
"result",
|
||||
"https://ideogram.ai/api/images/test-generated-image-url.png",
|
||||
),
|
||||
],
|
||||
test_mock={
|
||||
"run_model": lambda api_key, model_name, prompt, seed, aspect_ratio, magic_prompt_option, style_type, negative_prompt, color_palette_name: "https://ideogram.ai/api/images/test-generated-image-url.png",
|
||||
"upscale_image": lambda api_key, image_url: "https://ideogram.ai/api/images/test-upscaled-image-url.png",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
seed = input_data.seed
|
||||
|
||||
# Step 1: Generate the image
|
||||
result = self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
model_name=input_data.ideogram_model_name.value,
|
||||
prompt=input_data.prompt,
|
||||
seed=seed,
|
||||
aspect_ratio=input_data.aspect_ratio.value,
|
||||
magic_prompt_option=input_data.magic_prompt_option.value,
|
||||
style_type=input_data.style_type.value,
|
||||
negative_prompt=input_data.negative_prompt,
|
||||
color_palette_name=input_data.color_palette_name.value,
|
||||
)
|
||||
|
||||
# Step 2: Upscale the image if requested
|
||||
if input_data.upscale == UpscaleOption.AI_UPSCALE:
|
||||
result = self.upscale_image(
|
||||
api_key=credentials.api_key,
|
||||
image_url=result,
|
||||
)
|
||||
|
||||
yield "result", result
|
||||
|
||||
def run_model(
|
||||
self,
|
||||
api_key: SecretStr,
|
||||
model_name: str,
|
||||
prompt: str,
|
||||
seed: Optional[int],
|
||||
aspect_ratio: str,
|
||||
magic_prompt_option: str,
|
||||
style_type: str,
|
||||
negative_prompt: Optional[str],
|
||||
color_palette_name: str,
|
||||
):
|
||||
url = "https://api.ideogram.ai/generate"
|
||||
headers = {
|
||||
"Api-Key": api_key.get_secret_value(),
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
data: Dict[str, Any] = {
|
||||
"image_request": {
|
||||
"prompt": prompt,
|
||||
"model": model_name,
|
||||
"aspect_ratio": aspect_ratio,
|
||||
"magic_prompt_option": magic_prompt_option,
|
||||
"style_type": style_type,
|
||||
}
|
||||
}
|
||||
|
||||
if seed is not None:
|
||||
data["image_request"]["seed"] = seed
|
||||
|
||||
if negative_prompt:
|
||||
data["image_request"]["negative_prompt"] = negative_prompt
|
||||
|
||||
if color_palette_name != "NONE":
|
||||
data["image_request"]["color_palette"] = {"name": color_palette_name}
|
||||
|
||||
try:
|
||||
response = requests.post(url, json=data, headers=headers)
|
||||
return response.json()["data"][0]["url"]
|
||||
except RequestException as e:
|
||||
raise Exception(f"Failed to fetch image: {str(e)}")
|
||||
|
||||
def upscale_image(self, api_key: SecretStr, image_url: str):
|
||||
url = "https://api.ideogram.ai/upscale"
|
||||
headers = {
|
||||
"Api-Key": api_key.get_secret_value(),
|
||||
}
|
||||
|
||||
try:
|
||||
# Step 1: Download the image from the provided URL
|
||||
image_response = requests.get(image_url)
|
||||
|
||||
# Step 2: Send the downloaded image to the upscale API
|
||||
files = {
|
||||
"image_file": ("image.png", image_response.content, "image/png"),
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
url,
|
||||
headers=headers,
|
||||
data={
|
||||
"image_request": "{}", # Empty JSON object
|
||||
},
|
||||
files=files,
|
||||
)
|
||||
|
||||
return response.json()["data"][0]["url"]
|
||||
|
||||
except RequestException as e:
|
||||
raise Exception(f"Failed to upscale image: {str(e)}")
|
||||
@@ -2,13 +2,28 @@ from typing import Any
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.json import json
|
||||
|
||||
|
||||
class StepThroughItemsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
items: list | dict = SchemaField(
|
||||
items: list = SchemaField(
|
||||
advanced=False,
|
||||
description="The list or dictionary of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
|
||||
default=[],
|
||||
)
|
||||
items_object: dict = SchemaField(
|
||||
advanced=False,
|
||||
description="The list or dictionary of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
|
||||
default={},
|
||||
)
|
||||
items_str: str = SchemaField(
|
||||
advanced=False,
|
||||
description="The list or dictionary of items to iterate over",
|
||||
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
|
||||
default="",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -39,14 +54,20 @@ class StepThroughItemsBlock(Block):
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
items = input_data.items
|
||||
if isinstance(items, dict):
|
||||
# If items is a dictionary, iterate over its values
|
||||
for item in items.values():
|
||||
yield "item", item
|
||||
yield "key", item
|
||||
else:
|
||||
# If items is a list, iterate over the list
|
||||
for index, item in enumerate(items):
|
||||
yield "item", item
|
||||
yield "key", index
|
||||
for data in [input_data.items, input_data.items_object, input_data.items_str]:
|
||||
if not data:
|
||||
continue
|
||||
if isinstance(data, str):
|
||||
items = json.loads(data)
|
||||
else:
|
||||
items = data
|
||||
if isinstance(items, dict):
|
||||
# If items is a dictionary, iterate over its values
|
||||
for item in items.values():
|
||||
yield "item", item
|
||||
yield "key", item
|
||||
else:
|
||||
# If items is a list, iterate over the list
|
||||
for index, item in enumerate(items):
|
||||
yield "item", item
|
||||
yield "key", index
|
||||
|
||||
52
autogpt_platform/backend/backend/blocks/jina/_auth.py
Normal file
52
autogpt_platform/backend/backend/blocks/jina/_auth.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
|
||||
|
||||
JinaCredentials = APIKeyCredentials
|
||||
JinaCredentialsInput = CredentialsMetaInput[
|
||||
Literal["jina"],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="jina",
|
||||
api_key=SecretStr("mock-jina-api-key"),
|
||||
title="Mock Jina API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
def JinaCredentialsField() -> JinaCredentialsInput:
|
||||
"""
|
||||
Creates a Jina credentials input on a block.
|
||||
|
||||
"""
|
||||
return CredentialsField(
|
||||
provider="jina",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Jina integration can be used with an API Key.",
|
||||
)
|
||||
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="jina",
|
||||
api_key=SecretStr("mock-jina-api-key"),
|
||||
title="Mock Jina API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
67
autogpt_platform/backend/backend/blocks/jina/chunking.py
Normal file
67
autogpt_platform/backend/backend/blocks/jina/chunking.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from backend.blocks.jina._auth import (
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
JinaCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class JinaChunkingBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
texts: list = SchemaField(description="List of texts to chunk")
|
||||
|
||||
credentials: JinaCredentialsInput = JinaCredentialsField()
|
||||
max_chunk_length: int = SchemaField(
|
||||
description="Maximum length of each chunk", default=1000
|
||||
)
|
||||
return_tokens: bool = SchemaField(
|
||||
description="Whether to return token information", default=False
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
chunks: list = SchemaField(description="List of chunked texts")
|
||||
tokens: list = SchemaField(
|
||||
description="List of token information for each chunk", optional=True
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="806fb15e-830f-4796-8692-557d300ff43c",
|
||||
description="Chunks texts using Jina AI's segmentation service",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=JinaChunkingBlock.Input,
|
||||
output_schema=JinaChunkingBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://segment.jina.ai/"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
|
||||
all_chunks = []
|
||||
all_tokens = []
|
||||
|
||||
for text in input_data.texts:
|
||||
data = {
|
||||
"content": text,
|
||||
"return_tokens": str(input_data.return_tokens).lower(),
|
||||
"return_chunks": "true",
|
||||
"max_chunk_length": str(input_data.max_chunk_length),
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
result = response.json()
|
||||
|
||||
all_chunks.extend(result.get("chunks", []))
|
||||
if input_data.return_tokens:
|
||||
all_tokens.extend(result.get("tokens", []))
|
||||
|
||||
yield "chunks", all_chunks
|
||||
if input_data.return_tokens:
|
||||
yield "tokens", all_tokens
|
||||
43
autogpt_platform/backend/backend/blocks/jina/embeddings.py
Normal file
43
autogpt_platform/backend/backend/blocks/jina/embeddings.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from backend.blocks.jina._auth import (
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
JinaCredentialsInput,
|
||||
)
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.request import requests
|
||||
|
||||
|
||||
class JinaEmbeddingBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
texts: list = SchemaField(description="List of texts to embed")
|
||||
credentials: JinaCredentialsInput = JinaCredentialsField()
|
||||
model: str = SchemaField(
|
||||
description="Jina embedding model to use",
|
||||
default="jina-embeddings-v2-base-en",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
embeddings: list = SchemaField(description="List of embeddings")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="7c56b3ab-62e7-43a2-a2dc-4ec4245660b6",
|
||||
description="Generates embeddings using Jina AI",
|
||||
categories={BlockCategory.AI},
|
||||
input_schema=JinaEmbeddingBlock.Input,
|
||||
output_schema=JinaEmbeddingBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
url = "https://api.jina.ai/v1/embeddings"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
data = {"input": input_data.texts, "model": input_data.model}
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
embeddings = [e["embedding"] for e in response.json()["data"]]
|
||||
yield "embeddings", embeddings
|
||||
107
autogpt_platform/backend/backend/blocks/jina/search.py
Normal file
107
autogpt_platform/backend/backend/blocks/jina/search.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from groq._utils._utils import quote
|
||||
|
||||
from backend.blocks.jina._auth import (
|
||||
TEST_CREDENTIALS,
|
||||
TEST_CREDENTIALS_INPUT,
|
||||
JinaCredentials,
|
||||
JinaCredentialsField,
|
||||
JinaCredentialsInput,
|
||||
)
|
||||
from backend.blocks.search import GetRequest
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class SearchTheWebBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
credentials: JinaCredentialsInput = JinaCredentialsField()
|
||||
query: str = SchemaField(description="The search query to search the web for")
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: str = SchemaField(
|
||||
description="The search results including content from top 5 URLs"
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the search fails")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="87840993-2053-44b7-8da4-187ad4ee518c",
|
||||
description="This block searches the internet for the given search query.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchTheWebBlock.Input,
|
||||
output_schema=SearchTheWebBlock.Output,
|
||||
test_input={
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"query": "Artificial Intelligence",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=("results", "search content"),
|
||||
test_mock={"get_request": lambda *args, **kwargs: "search content"},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Encode the search query
|
||||
encoded_query = quote(input_data.query)
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
|
||||
# Prepend the Jina Search URL to the encoded query
|
||||
jina_search_url = f"https://s.jina.ai/{encoded_query}"
|
||||
results = self.get_request(jina_search_url, headers=headers, json=False)
|
||||
|
||||
# Output the search results
|
||||
yield "results", results
|
||||
|
||||
|
||||
class ExtractWebsiteContentBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
credentials: JinaCredentialsInput = JinaCredentialsField()
|
||||
url: str = SchemaField(description="The URL to scrape the content from")
|
||||
raw_content: bool = SchemaField(
|
||||
default=False,
|
||||
title="Raw Content",
|
||||
description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
content: str = SchemaField(description="The scraped content from the given URL")
|
||||
error: str = SchemaField(
|
||||
description="Error message if the content cannot be retrieved"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="436c3984-57fd-4b85-8e9a-459b356883bd",
|
||||
description="This block scrapes the content from the given web URL.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=ExtractWebsiteContentBlock.Input,
|
||||
output_schema=ExtractWebsiteContentBlock.Output,
|
||||
test_input={
|
||||
"url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
test_output=("content", "scraped content"),
|
||||
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
if input_data.raw_content:
|
||||
url = input_data.url
|
||||
headers = {}
|
||||
else:
|
||||
url = f"https://r.jina.ai/{input_data.url}"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
|
||||
}
|
||||
|
||||
content = self.get_request(url, json=False, headers=headers)
|
||||
yield "content", content
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,38 @@
|
||||
from typing import List
|
||||
from enum import Enum
|
||||
from typing import List, Literal
|
||||
|
||||
import requests
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
BlockSecret,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
SecretField,
|
||||
)
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="medium",
|
||||
api_key=SecretStr("mock-medium-api-key"),
|
||||
title="Mock Medium API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class PublishToMediumStatus(str, Enum):
|
||||
PUBLIC = "public"
|
||||
DRAFT = "draft"
|
||||
UNLISTED = "unlisted"
|
||||
|
||||
|
||||
class PublishToMediumBlock(Block):
|
||||
@@ -34,9 +63,9 @@ class PublishToMediumBlock(Block):
|
||||
description="The original home of this content, if it was originally published elsewhere",
|
||||
placeholder="https://yourblog.com/original-post",
|
||||
)
|
||||
publish_status: str = SchemaField(
|
||||
description="The publish status: 'public', 'draft', or 'unlisted'",
|
||||
placeholder="public",
|
||||
publish_status: PublishToMediumStatus = SchemaField(
|
||||
description="The publish status",
|
||||
placeholder=PublishToMediumStatus.DRAFT,
|
||||
)
|
||||
license: str = SchemaField(
|
||||
default="all-rights-reserved",
|
||||
@@ -48,10 +77,12 @@ class PublishToMediumBlock(Block):
|
||||
description="Whether to notify followers that the user has published",
|
||||
placeholder="False",
|
||||
)
|
||||
api_key: BlockSecret = SecretField(
|
||||
key="medium_api_key",
|
||||
description="""The API key for the Medium integration. You can get this from https://medium.com/me/settings/security and scrolling down to "integration Tokens".""",
|
||||
placeholder="Enter your Medium API key",
|
||||
credentials: CredentialsMetaInput[Literal["medium"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="medium",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Medium integration can be used with any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
@@ -79,8 +110,8 @@ class PublishToMediumBlock(Block):
|
||||
"tags": ["test", "automation"],
|
||||
"license": "all-rights-reserved",
|
||||
"notify_followers": False,
|
||||
"publish_status": "draft",
|
||||
"api_key": "your_test_api_key",
|
||||
"publish_status": PublishToMediumStatus.DRAFT.value,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
("post_id", "e6f36a"),
|
||||
@@ -97,11 +128,12 @@ class PublishToMediumBlock(Block):
|
||||
}
|
||||
}
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def create_post(
|
||||
self,
|
||||
api_key,
|
||||
api_key: SecretStr,
|
||||
author_id,
|
||||
title,
|
||||
content,
|
||||
@@ -113,7 +145,7 @@ class PublishToMediumBlock(Block):
|
||||
notify_followers,
|
||||
):
|
||||
headers = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Authorization": f"Bearer {api_key.get_secret_value()}",
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
@@ -137,32 +169,28 @@ class PublishToMediumBlock(Block):
|
||||
|
||||
return response.json()
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
response = self.create_post(
|
||||
input_data.api_key.get_secret_value(),
|
||||
input_data.author_id.get_secret_value(),
|
||||
input_data.title,
|
||||
input_data.content,
|
||||
input_data.content_format,
|
||||
input_data.tags,
|
||||
input_data.canonical_url,
|
||||
input_data.publish_status,
|
||||
input_data.license,
|
||||
input_data.notify_followers,
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
response = self.create_post(
|
||||
credentials.api_key,
|
||||
input_data.author_id.get_secret_value(),
|
||||
input_data.title,
|
||||
input_data.content,
|
||||
input_data.content_format,
|
||||
input_data.tags,
|
||||
input_data.canonical_url,
|
||||
input_data.publish_status,
|
||||
input_data.license,
|
||||
input_data.notify_followers,
|
||||
)
|
||||
|
||||
if "data" in response:
|
||||
yield "post_id", response["data"]["id"]
|
||||
yield "post_url", response["data"]["url"]
|
||||
yield "published_at", response["data"]["publishedAt"]
|
||||
else:
|
||||
error_message = response.get("errors", [{}])[0].get(
|
||||
"message", "Unknown error occurred"
|
||||
)
|
||||
|
||||
if "data" in response:
|
||||
yield "post_id", response["data"]["id"]
|
||||
yield "post_url", response["data"]["url"]
|
||||
yield "published_at", response["data"]["publishedAt"]
|
||||
else:
|
||||
error_message = response.get("errors", [{}])[0].get(
|
||||
"message", "Unknown error occurred"
|
||||
)
|
||||
yield "error", f"Failed to create Medium post: {error_message}"
|
||||
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Network error occurred while creating Medium post: {str(e)}"
|
||||
except Exception as e:
|
||||
yield "error", f"Error occurred while creating Medium post: {str(e)}"
|
||||
raise RuntimeError(f"Failed to create Medium post: {error_message}")
|
||||
|
||||
232
autogpt_platform/backend/backend/blocks/pinecone.py
Normal file
232
autogpt_platform/backend/backend/blocks/pinecone.py
Normal file
@@ -0,0 +1,232 @@
|
||||
import uuid
|
||||
from typing import Any, Literal
|
||||
|
||||
from pinecone import Pinecone, ServerlessSpec
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
PineconeCredentials = APIKeyCredentials
|
||||
PineconeCredentialsInput = CredentialsMetaInput[
|
||||
Literal["pinecone"],
|
||||
Literal["api_key"],
|
||||
]
|
||||
|
||||
|
||||
def PineconeCredentialsField() -> PineconeCredentialsInput:
|
||||
"""
|
||||
Creates a Pinecone credentials input on a block.
|
||||
|
||||
"""
|
||||
return CredentialsField(
|
||||
provider="pinecone",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Pinecone integration can be used with an API Key.",
|
||||
)
|
||||
|
||||
|
||||
class PineconeInitBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: PineconeCredentialsInput = PineconeCredentialsField()
|
||||
index_name: str = SchemaField(description="Name of the Pinecone index")
|
||||
dimension: int = SchemaField(
|
||||
description="Dimension of the vectors", default=768
|
||||
)
|
||||
metric: str = SchemaField(
|
||||
description="Distance metric for the index", default="cosine"
|
||||
)
|
||||
cloud: str = SchemaField(
|
||||
description="Cloud provider for serverless", default="aws"
|
||||
)
|
||||
region: str = SchemaField(
|
||||
description="Region for serverless", default="us-east-1"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
index: str = SchemaField(description="Name of the initialized Pinecone index")
|
||||
message: str = SchemaField(description="Status message")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="48d8fdab-8f03-41f3-8407-8107ba11ec9b",
|
||||
description="Initializes a Pinecone index",
|
||||
categories={BlockCategory.LOGIC},
|
||||
input_schema=PineconeInitBlock.Input,
|
||||
output_schema=PineconeInitBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
pc = Pinecone(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
try:
|
||||
existing_indexes = pc.list_indexes()
|
||||
if input_data.index_name not in [index.name for index in existing_indexes]:
|
||||
pc.create_index(
|
||||
name=input_data.index_name,
|
||||
dimension=input_data.dimension,
|
||||
metric=input_data.metric,
|
||||
spec=ServerlessSpec(
|
||||
cloud=input_data.cloud, region=input_data.region
|
||||
),
|
||||
)
|
||||
message = f"Created new index: {input_data.index_name}"
|
||||
else:
|
||||
message = f"Using existing index: {input_data.index_name}"
|
||||
|
||||
yield "index", input_data.index_name
|
||||
yield "message", message
|
||||
except Exception as e:
|
||||
yield "message", f"Error initializing Pinecone index: {str(e)}"
|
||||
|
||||
|
||||
class PineconeQueryBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: PineconeCredentialsInput = PineconeCredentialsField()
|
||||
query_vector: list = SchemaField(description="Query vector")
|
||||
namespace: str = SchemaField(
|
||||
description="Namespace to query in Pinecone", default=""
|
||||
)
|
||||
top_k: int = SchemaField(
|
||||
description="Number of top results to return", default=3
|
||||
)
|
||||
include_values: bool = SchemaField(
|
||||
description="Whether to include vector values in the response",
|
||||
default=False,
|
||||
)
|
||||
include_metadata: bool = SchemaField(
|
||||
description="Whether to include metadata in the response", default=True
|
||||
)
|
||||
host: str = SchemaField(description="Host for pinecone", default="")
|
||||
idx_name: str = SchemaField(description="Index name for pinecone")
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: Any = SchemaField(description="Query results from Pinecone")
|
||||
combined_results: Any = SchemaField(
|
||||
description="Combined results from Pinecone"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="9ad93d0f-91b4-4c9c-8eb1-82e26b4a01c5",
|
||||
description="Queries a Pinecone index",
|
||||
categories={BlockCategory.LOGIC},
|
||||
input_schema=PineconeQueryBlock.Input,
|
||||
output_schema=PineconeQueryBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
# Create a new client instance
|
||||
pc = Pinecone(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
# Get the index
|
||||
idx = pc.Index(input_data.idx_name)
|
||||
|
||||
# Ensure query_vector is in correct format
|
||||
query_vector = input_data.query_vector
|
||||
if isinstance(query_vector, list) and len(query_vector) > 0:
|
||||
if isinstance(query_vector[0], list):
|
||||
query_vector = query_vector[0]
|
||||
|
||||
results = idx.query(
|
||||
namespace=input_data.namespace,
|
||||
vector=query_vector,
|
||||
top_k=input_data.top_k,
|
||||
include_values=input_data.include_values,
|
||||
include_metadata=input_data.include_metadata,
|
||||
).to_dict()
|
||||
combined_text = ""
|
||||
if results["matches"]:
|
||||
texts = [
|
||||
match["metadata"]["text"]
|
||||
for match in results["matches"]
|
||||
if match.get("metadata", {}).get("text")
|
||||
]
|
||||
combined_text = "\n\n".join(texts)
|
||||
|
||||
# Return both the raw matches and combined text
|
||||
yield "results", {
|
||||
"matches": results["matches"],
|
||||
"combined_text": combined_text,
|
||||
}
|
||||
yield "combined_results", combined_text
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error querying Pinecone: {str(e)}"
|
||||
raise RuntimeError(error_msg) from e
|
||||
|
||||
|
||||
class PineconeInsertBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
credentials: PineconeCredentialsInput = PineconeCredentialsField()
|
||||
index: str = SchemaField(description="Initialized Pinecone index")
|
||||
chunks: list = SchemaField(description="List of text chunks to ingest")
|
||||
embeddings: list = SchemaField(
|
||||
description="List of embeddings corresponding to the chunks"
|
||||
)
|
||||
namespace: str = SchemaField(
|
||||
description="Namespace to use in Pinecone", default=""
|
||||
)
|
||||
metadata: dict = SchemaField(
|
||||
description="Additional metadata to store with each vector", default={}
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
upsert_response: str = SchemaField(
|
||||
description="Response from Pinecone upsert operation"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="477f2168-cd91-475a-8146-9499a5982434",
|
||||
description="Upload data to a Pinecone index",
|
||||
categories={BlockCategory.LOGIC},
|
||||
input_schema=PineconeInsertBlock.Input,
|
||||
output_schema=PineconeInsertBlock.Output,
|
||||
)
|
||||
|
||||
def run(
|
||||
self,
|
||||
input_data: Input,
|
||||
*,
|
||||
credentials: APIKeyCredentials,
|
||||
**kwargs,
|
||||
) -> BlockOutput:
|
||||
try:
|
||||
# Create a new client instance
|
||||
pc = Pinecone(api_key=credentials.api_key.get_secret_value())
|
||||
|
||||
# Get the index
|
||||
idx = pc.Index(input_data.index)
|
||||
|
||||
vectors = []
|
||||
for chunk, embedding in zip(input_data.chunks, input_data.embeddings):
|
||||
vector_metadata = input_data.metadata.copy()
|
||||
vector_metadata["text"] = chunk
|
||||
vectors.append(
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"values": embedding,
|
||||
"metadata": vector_metadata,
|
||||
}
|
||||
)
|
||||
idx.upsert(vectors=vectors, namespace=input_data.namespace)
|
||||
|
||||
yield "upsert_response", "successfully upserted"
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error uploading to Pinecone: {str(e)}"
|
||||
raise RuntimeError(error_msg) from e
|
||||
@@ -2,10 +2,10 @@ from datetime import datetime, timezone
|
||||
from typing import Iterator
|
||||
|
||||
import praw
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SecretField
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.util.mock import MockObject
|
||||
|
||||
|
||||
@@ -48,28 +48,29 @@ def get_praw(creds: RedditCredentials) -> praw.Reddit:
|
||||
|
||||
class GetRedditPostsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
subreddit: str = Field(description="Subreddit name")
|
||||
creds: RedditCredentials = Field(
|
||||
subreddit: str = SchemaField(description="Subreddit name")
|
||||
creds: RedditCredentials = SchemaField(
|
||||
description="Reddit credentials",
|
||||
default=RedditCredentials(),
|
||||
)
|
||||
last_minutes: int | None = Field(
|
||||
last_minutes: int | None = SchemaField(
|
||||
description="Post time to stop minutes ago while fetching posts",
|
||||
default=None,
|
||||
)
|
||||
last_post: str | None = Field(
|
||||
last_post: str | None = SchemaField(
|
||||
description="Post ID to stop when reached while fetching posts",
|
||||
default=None,
|
||||
)
|
||||
post_limit: int | None = Field(
|
||||
post_limit: int | None = SchemaField(
|
||||
description="Number of posts to fetch", default=10
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
post: RedditPost = Field(description="Reddit post")
|
||||
post: RedditPost = SchemaField(description="Reddit post")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
disabled=True,
|
||||
id="c6731acb-4285-4ee1-bc9b-03d0766c370f",
|
||||
description="This block fetches Reddit posts from a defined subreddit name.",
|
||||
categories={BlockCategory.SOCIAL},
|
||||
@@ -114,7 +115,7 @@ class GetRedditPostsBlock(Block):
|
||||
def get_posts(input_data: Input) -> Iterator[praw.reddit.Submission]:
|
||||
client = get_praw(input_data.creds)
|
||||
subreddit = client.subreddit(input_data.subreddit)
|
||||
return subreddit.new(limit=input_data.post_limit)
|
||||
return subreddit.new(limit=input_data.post_limit or 10)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
current_time = datetime.now(tz=timezone.utc)
|
||||
@@ -140,13 +141,13 @@ class GetRedditPostsBlock(Block):
|
||||
|
||||
class PostRedditCommentBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
creds: RedditCredentials = Field(
|
||||
creds: RedditCredentials = SchemaField(
|
||||
description="Reddit credentials", default=RedditCredentials()
|
||||
)
|
||||
data: RedditComment = Field(description="Reddit comment")
|
||||
data: RedditComment = SchemaField(description="Reddit comment")
|
||||
|
||||
class Output(BlockSchema):
|
||||
comment_id: str
|
||||
comment_id: str = SchemaField(description="Posted comment ID")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -164,8 +165,10 @@ class PostRedditCommentBlock(Block):
|
||||
def reply_post(creds: RedditCredentials, comment: RedditComment) -> str:
|
||||
client = get_praw(creds)
|
||||
submission = client.submission(id=comment.post_id)
|
||||
comment = submission.reply(comment.comment)
|
||||
return comment.id # type: ignore
|
||||
new_comment = submission.reply(comment.comment)
|
||||
if not new_comment:
|
||||
raise ValueError("Failed to post comment.")
|
||||
return new_comment.id
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
yield "comment_id", self.reply_post(input_data.creds, input_data.data)
|
||||
|
||||
@@ -1,22 +1,46 @@
|
||||
import os
|
||||
from enum import Enum
|
||||
from typing import Literal
|
||||
|
||||
import replicate
|
||||
from pydantic import SecretStr
|
||||
from replicate.helpers import FileOutput
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="replicate",
|
||||
api_key=SecretStr("mock-replicate-api-key"),
|
||||
title="Mock Replicate API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
# Model name enum
|
||||
class ReplicateFluxModelName(str, Enum):
|
||||
FLUX_SCHNELL = ("Flux Schnell",)
|
||||
FLUX_PRO = ("Flux Pro",)
|
||||
FLUX_PRO1_1 = ("Flux Pro 1.1",)
|
||||
|
||||
@property
|
||||
def api_name(self):
|
||||
api_names = {
|
||||
ReplicateFluxModelName.FLUX_SCHNELL: "black-forest-labs/flux-schnell",
|
||||
ReplicateFluxModelName.FLUX_PRO: "black-forest-labs/flux-pro",
|
||||
ReplicateFluxModelName.FLUX_PRO1_1: "black-forest-labs/flux-1.1-pro",
|
||||
}
|
||||
return api_names[self]
|
||||
|
||||
@@ -30,9 +54,13 @@ class ImageType(str, Enum):
|
||||
|
||||
class ReplicateFluxAdvancedModelBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
api_key: BlockSecret = SecretField(
|
||||
key="replicate_api_key",
|
||||
description="Replicate API Key",
|
||||
credentials: CredentialsMetaInput[Literal["replicate"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="replicate",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Replicate integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
)
|
||||
prompt: str = SchemaField(
|
||||
description="Text prompt for image generation",
|
||||
@@ -108,7 +136,7 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
input_schema=ReplicateFluxAdvancedModelBlock.Input,
|
||||
output_schema=ReplicateFluxAdvancedModelBlock.Output,
|
||||
test_input={
|
||||
"api_key": "test_api_key",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"replicate_model_name": ReplicateFluxModelName.FLUX_SCHNELL,
|
||||
"prompt": "A beautiful landscape painting of a serene lake at sunrise",
|
||||
"seed": None,
|
||||
@@ -129,36 +157,36 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
test_mock={
|
||||
"run_model": lambda api_key, model_name, prompt, seed, steps, guidance, interval, aspect_ratio, output_format, output_quality, safety_tolerance: "https://replicate.com/output/generated-image-url.jpg",
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# If the seed is not provided, generate a random seed
|
||||
seed = input_data.seed
|
||||
if seed is None:
|
||||
seed = int.from_bytes(os.urandom(4), "big")
|
||||
|
||||
try:
|
||||
# Run the model using the provided inputs
|
||||
result = self.run_model(
|
||||
api_key=input_data.api_key.get_secret_value(),
|
||||
model_name=input_data.replicate_model_name.api_name,
|
||||
prompt=input_data.prompt,
|
||||
seed=seed,
|
||||
steps=input_data.steps,
|
||||
guidance=input_data.guidance,
|
||||
interval=input_data.interval,
|
||||
aspect_ratio=input_data.aspect_ratio,
|
||||
output_format=input_data.output_format,
|
||||
output_quality=input_data.output_quality,
|
||||
safety_tolerance=input_data.safety_tolerance,
|
||||
)
|
||||
yield "result", result
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
# Run the model using the provided inputs
|
||||
result = self.run_model(
|
||||
api_key=credentials.api_key,
|
||||
model_name=input_data.replicate_model_name.api_name,
|
||||
prompt=input_data.prompt,
|
||||
seed=seed,
|
||||
steps=input_data.steps,
|
||||
guidance=input_data.guidance,
|
||||
interval=input_data.interval,
|
||||
aspect_ratio=input_data.aspect_ratio,
|
||||
output_format=input_data.output_format,
|
||||
output_quality=input_data.output_quality,
|
||||
safety_tolerance=input_data.safety_tolerance,
|
||||
)
|
||||
yield "result", result
|
||||
|
||||
def run_model(
|
||||
self,
|
||||
api_key,
|
||||
api_key: SecretStr,
|
||||
model_name,
|
||||
prompt,
|
||||
seed,
|
||||
@@ -171,10 +199,10 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
safety_tolerance,
|
||||
):
|
||||
# Initialize Replicate client with the API key
|
||||
client = replicate.Client(api_token=api_key)
|
||||
client = replicate.Client(api_token=api_key.get_secret_value())
|
||||
|
||||
# Run the model with additional parameters
|
||||
output = client.run(
|
||||
output: FileOutput | list[FileOutput] = client.run( # type: ignore This is because they changed the return type, and didn't update the type hint! It should be overloaded depending on the value of `use_file_output` to `FileOutput | list[FileOutput]` but it's `Any | Iterator[Any]`
|
||||
f"{model_name}",
|
||||
input={
|
||||
"prompt": prompt,
|
||||
@@ -187,13 +215,21 @@ class ReplicateFluxAdvancedModelBlock(Block):
|
||||
"output_quality": output_quality,
|
||||
"safety_tolerance": safety_tolerance,
|
||||
},
|
||||
wait=False, # don't arbitrarily return data:octect/stream or sometimes url depending on the model???? what is this api
|
||||
)
|
||||
|
||||
# Check if output is a list or a string and extract accordingly; otherwise, assign a default message
|
||||
if isinstance(output, list) and len(output) > 0:
|
||||
result_url = output[0] # If output is a list, get the first element
|
||||
if isinstance(output[0], FileOutput):
|
||||
result_url = output[0].url # If output is a list, get the first element
|
||||
else:
|
||||
result_url = output[
|
||||
0
|
||||
] # If output is a list and not a FileOutput, get the first element. Should never happen, but just in case.
|
||||
elif isinstance(output, FileOutput):
|
||||
result_url = output.url # If output is a FileOutput, use the url
|
||||
elif isinstance(output, str):
|
||||
result_url = output # If output is a string, use it directly
|
||||
result_url = output # If output is a string (for some reason due to their janky type hinting), use it directly
|
||||
else:
|
||||
result_url = (
|
||||
"No output received" # Fallback message if output is not as expected
|
||||
|
||||
@@ -1,27 +1,27 @@
|
||||
from typing import Any
|
||||
from typing import Literal
|
||||
from urllib.parse import quote
|
||||
|
||||
import requests
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.blocks.helpers.http import GetRequest
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
|
||||
|
||||
class GetRequest:
|
||||
@classmethod
|
||||
def get_request(cls, url: str, json=False) -> Any:
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
return response.json() if json else response.text
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
|
||||
|
||||
class GetWikipediaSummaryBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
topic: str
|
||||
topic: str = SchemaField(description="The topic to fetch the summary for")
|
||||
|
||||
class Output(BlockSchema):
|
||||
summary: str
|
||||
error: str
|
||||
summary: str = SchemaField(description="The summary of the given topic")
|
||||
error: str = SchemaField(
|
||||
description="Error message if the summary cannot be retrieved"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -36,115 +36,58 @@ class GetWikipediaSummaryBlock(Block, GetRequest):
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
topic = input_data.topic
|
||||
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
|
||||
response = self.get_request(url, json=True)
|
||||
yield "summary", response["extract"]
|
||||
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
yield "error", f"HTTP error occurred: {http_err}"
|
||||
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Request to Wikipedia failed: {e}"
|
||||
|
||||
except KeyError as e:
|
||||
yield "error", f"Error parsing Wikipedia response: {e}"
|
||||
topic = input_data.topic
|
||||
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
|
||||
response = self.get_request(url, json=True)
|
||||
if "extract" not in response:
|
||||
raise RuntimeError(f"Unable to parse Wikipedia response: {response}")
|
||||
yield "summary", response["extract"]
|
||||
|
||||
|
||||
class SearchTheWebBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
query: str # The search query
|
||||
|
||||
class Output(BlockSchema):
|
||||
results: str # The search results including content from top 5 URLs
|
||||
error: str # Error message if the search fails
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="87840993-2053-44b7-8da4-187ad4ee518c",
|
||||
description="This block searches the internet for the given search query.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=SearchTheWebBlock.Input,
|
||||
output_schema=SearchTheWebBlock.Output,
|
||||
test_input={"query": "Artificial Intelligence"},
|
||||
test_output=("results", "search content"),
|
||||
test_mock={"get_request": lambda url, json: "search content"},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
# Encode the search query
|
||||
encoded_query = quote(input_data.query)
|
||||
|
||||
# Prepend the Jina Search URL to the encoded query
|
||||
jina_search_url = f"https://s.jina.ai/{encoded_query}"
|
||||
|
||||
# Make the request to Jina Search
|
||||
response = self.get_request(jina_search_url, json=False)
|
||||
|
||||
# Output the search results
|
||||
yield "results", response
|
||||
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
yield "error", f"HTTP error occurred: {http_err}"
|
||||
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Request to Jina Search failed: {e}"
|
||||
|
||||
|
||||
class ExtractWebsiteContentBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
url: str # The URL to scrape
|
||||
raw_content: bool = SchemaField(
|
||||
default=False,
|
||||
title="Raw Content",
|
||||
description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content",
|
||||
advanced=True,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
content: str # The scraped content from the URL
|
||||
error: str
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="436c3984-57fd-4b85-8e9a-459b356883bd",
|
||||
description="This block scrapes the content from the given web URL.",
|
||||
categories={BlockCategory.SEARCH},
|
||||
input_schema=ExtractWebsiteContentBlock.Input,
|
||||
output_schema=ExtractWebsiteContentBlock.Output,
|
||||
test_input={"url": "https://en.wikipedia.org/wiki/Artificial_intelligence"},
|
||||
test_output=("content", "scraped content"),
|
||||
test_mock={"get_request": lambda url, json: "scraped content"},
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
if input_data.raw_content:
|
||||
url = input_data.url
|
||||
else:
|
||||
url = f"https://r.jina.ai/{input_data.url}"
|
||||
|
||||
try:
|
||||
content = self.get_request(url, json=False)
|
||||
yield "content", content
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
yield "error", f"HTTP error occurred: {http_err}"
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Request to URL failed: {e}"
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="openweathermap",
|
||||
api_key=SecretStr("mock-openweathermap-api-key"),
|
||||
title="Mock OpenWeatherMap API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class GetWeatherInformationBlock(Block, GetRequest):
|
||||
class Input(BlockSchema):
|
||||
location: str
|
||||
api_key: BlockSecret = SecretField(key="openweathermap_api_key")
|
||||
use_celsius: bool = True
|
||||
location: str = SchemaField(
|
||||
description="Location to get weather information for"
|
||||
)
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal["openweathermap"], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
provider="openweathermap",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The OpenWeatherMap integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
use_celsius: bool = SchemaField(
|
||||
default=True,
|
||||
description="Whether to use Celsius or Fahrenheit for temperature",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
temperature: str
|
||||
humidity: str
|
||||
condition: str
|
||||
error: str
|
||||
temperature: str = SchemaField(
|
||||
description="Temperature in the specified location"
|
||||
)
|
||||
humidity: str = SchemaField(description="Humidity in the specified location")
|
||||
condition: str = SchemaField(
|
||||
description="Weather condition in the specified location"
|
||||
)
|
||||
error: str = SchemaField(
|
||||
description="Error message if the weather information cannot be retrieved"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -154,8 +97,8 @@ class GetWeatherInformationBlock(Block, GetRequest):
|
||||
description="Retrieves weather information for a specified location using OpenWeatherMap API.",
|
||||
test_input={
|
||||
"location": "New York",
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"use_celsius": True,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[
|
||||
("temperature", "21.66"),
|
||||
@@ -168,29 +111,21 @@ class GetWeatherInformationBlock(Block, GetRequest):
|
||||
"weather": [{"description": "overcast clouds"}],
|
||||
}
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
units = "metric" if input_data.use_celsius else "imperial"
|
||||
api_key = input_data.api_key.get_secret_value()
|
||||
location = input_data.location
|
||||
url = f"http://api.openweathermap.org/data/2.5/weather?q={quote(location)}&appid={api_key}&units={units}"
|
||||
weather_data = self.get_request(url, json=True)
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
units = "metric" if input_data.use_celsius else "imperial"
|
||||
api_key = credentials.api_key
|
||||
location = input_data.location
|
||||
url = f"http://api.openweathermap.org/data/2.5/weather?q={quote(location)}&appid={api_key}&units={units}"
|
||||
weather_data = self.get_request(url, json=True)
|
||||
|
||||
if "main" in weather_data and "weather" in weather_data:
|
||||
yield "temperature", str(weather_data["main"]["temp"])
|
||||
yield "humidity", str(weather_data["main"]["humidity"])
|
||||
yield "condition", weather_data["weather"][0]["description"]
|
||||
else:
|
||||
yield "error", f"Expected keys not found in response: {weather_data}"
|
||||
|
||||
except requests.exceptions.HTTPError as http_err:
|
||||
if http_err.response.status_code == 403:
|
||||
yield "error", "Request to weather API failed: 403 Forbidden. Check your API key and permissions."
|
||||
else:
|
||||
yield "error", f"HTTP error occurred: {http_err}"
|
||||
except requests.RequestException as e:
|
||||
yield "error", f"Request to weather API failed: {e}"
|
||||
except KeyError as e:
|
||||
yield "error", f"Error processing weather data: {e}"
|
||||
if "main" in weather_data and "weather" in weather_data:
|
||||
yield "temperature", str(weather_data["main"]["temp"])
|
||||
yield "humidity", str(weather_data["main"]["humidity"])
|
||||
yield "condition", weather_data["weather"][0]["description"]
|
||||
else:
|
||||
raise RuntimeError(f"Expected keys not found in response: {weather_data}")
|
||||
|
||||
@@ -1,19 +1,45 @@
|
||||
import time
|
||||
from typing import Literal
|
||||
|
||||
import requests
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import BlockSecret, SchemaField, SecretField
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="d_id",
|
||||
api_key=SecretStr("mock-d-id-api-key"),
|
||||
title="Mock D-ID API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class CreateTalkingAvatarVideoBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
api_key: BlockSecret = SecretField(
|
||||
key="did_api_key", description="D-ID API Key"
|
||||
credentials: CredentialsMetaInput[Literal["d_id"], Literal["api_key"]] = (
|
||||
CredentialsField(
|
||||
provider="d_id",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The D-ID integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
)
|
||||
script_input: str = SchemaField(
|
||||
description="The text input for the script", default="Welcome to AutoGPT"
|
||||
description="The text input for the script",
|
||||
placeholder="Welcome to AutoGPT",
|
||||
)
|
||||
provider: Literal["microsoft", "elevenlabs", "amazon"] = SchemaField(
|
||||
description="The voice provider to use", default="microsoft"
|
||||
@@ -57,7 +83,7 @@ class CreateTalkingAvatarVideoBlock(Block):
|
||||
input_schema=CreateTalkingAvatarVideoBlock.Input,
|
||||
output_schema=CreateTalkingAvatarVideoBlock.Output,
|
||||
test_input={
|
||||
"api_key": "your_test_api_key",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
"script_input": "Welcome to AutoGPT",
|
||||
"voice_id": "en-US-JennyNeural",
|
||||
"presenter_id": "amy-Aq6OmGZnMt",
|
||||
@@ -85,62 +111,63 @@ class CreateTalkingAvatarVideoBlock(Block):
|
||||
"result_url": "https://d-id.com/api/clips/abcd1234-5678-efgh-ijkl-mnopqrstuvwx/video",
|
||||
},
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def create_clip(self, api_key: str, payload: dict) -> dict:
|
||||
def create_clip(self, api_key: SecretStr, payload: dict) -> dict:
|
||||
url = "https://api.d-id.com/clips"
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"content-type": "application/json",
|
||||
"authorization": f"Basic {api_key}",
|
||||
"authorization": f"Basic {api_key.get_secret_value()}",
|
||||
}
|
||||
response = requests.post(url, json=payload, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def get_clip_status(self, api_key: str, clip_id: str) -> dict:
|
||||
def get_clip_status(self, api_key: SecretStr, clip_id: str) -> dict:
|
||||
url = f"https://api.d-id.com/clips/{clip_id}"
|
||||
headers = {"accept": "application/json", "authorization": f"Basic {api_key}"}
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"authorization": f"Basic {api_key.get_secret_value()}",
|
||||
}
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
try:
|
||||
# Create the clip
|
||||
payload = {
|
||||
"script": {
|
||||
"type": "text",
|
||||
"subtitles": str(input_data.subtitles).lower(),
|
||||
"provider": {
|
||||
"type": input_data.provider,
|
||||
"voice_id": input_data.voice_id,
|
||||
},
|
||||
"ssml": str(input_data.ssml).lower(),
|
||||
"input": input_data.script_input,
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
# Create the clip
|
||||
payload = {
|
||||
"script": {
|
||||
"type": "text",
|
||||
"subtitles": str(input_data.subtitles).lower(),
|
||||
"provider": {
|
||||
"type": input_data.provider,
|
||||
"voice_id": input_data.voice_id,
|
||||
},
|
||||
"config": {"result_format": input_data.result_format},
|
||||
"presenter_config": {"crop": {"type": input_data.crop_type}},
|
||||
"presenter_id": input_data.presenter_id,
|
||||
"driver_id": input_data.driver_id,
|
||||
}
|
||||
"ssml": str(input_data.ssml).lower(),
|
||||
"input": input_data.script_input,
|
||||
},
|
||||
"config": {"result_format": input_data.result_format},
|
||||
"presenter_config": {"crop": {"type": input_data.crop_type}},
|
||||
"presenter_id": input_data.presenter_id,
|
||||
"driver_id": input_data.driver_id,
|
||||
}
|
||||
|
||||
response = self.create_clip(input_data.api_key.get_secret_value(), payload)
|
||||
clip_id = response["id"]
|
||||
response = self.create_clip(credentials.api_key, payload)
|
||||
clip_id = response["id"]
|
||||
|
||||
# Poll for clip status
|
||||
for _ in range(input_data.max_polling_attempts):
|
||||
status_response = self.get_clip_status(
|
||||
input_data.api_key.get_secret_value(), clip_id
|
||||
# Poll for clip status
|
||||
for _ in range(input_data.max_polling_attempts):
|
||||
status_response = self.get_clip_status(credentials.api_key, clip_id)
|
||||
if status_response["status"] == "done":
|
||||
yield "video_url", status_response["result_url"]
|
||||
return
|
||||
elif status_response["status"] == "error":
|
||||
raise RuntimeError(
|
||||
f"Clip creation failed: {status_response.get('error', 'Unknown error')}"
|
||||
)
|
||||
if status_response["status"] == "done":
|
||||
yield "video_url", status_response["result_url"]
|
||||
return
|
||||
elif status_response["status"] == "error":
|
||||
yield "error", f"Clip creation failed: {status_response.get('error', 'Unknown error')}"
|
||||
return
|
||||
time.sleep(input_data.polling_interval)
|
||||
|
||||
yield "error", "Clip creation timed out"
|
||||
except Exception as e:
|
||||
yield "error", str(e)
|
||||
time.sleep(input_data.polling_interval)
|
||||
|
||||
raise TimeoutError("Clip creation timed out")
|
||||
|
||||
@@ -2,9 +2,9 @@ import re
|
||||
from typing import Any
|
||||
|
||||
from jinja2 import BaseLoader, Environment
|
||||
from pydantic import Field
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util import json
|
||||
|
||||
jinja = Environment(loader=BaseLoader())
|
||||
@@ -12,15 +12,17 @@ jinja = Environment(loader=BaseLoader())
|
||||
|
||||
class MatchTextPatternBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: Any = Field(description="Text to match")
|
||||
match: str = Field(description="Pattern (Regex) to match")
|
||||
data: Any = Field(description="Data to be forwarded to output")
|
||||
case_sensitive: bool = Field(description="Case sensitive match", default=True)
|
||||
dot_all: bool = Field(description="Dot matches all", default=True)
|
||||
text: Any = SchemaField(description="Text to match")
|
||||
match: str = SchemaField(description="Pattern (Regex) to match")
|
||||
data: Any = SchemaField(description="Data to be forwarded to output")
|
||||
case_sensitive: bool = SchemaField(
|
||||
description="Case sensitive match", default=True
|
||||
)
|
||||
dot_all: bool = SchemaField(description="Dot matches all", default=True)
|
||||
|
||||
class Output(BlockSchema):
|
||||
positive: Any = Field(description="Output data if match is found")
|
||||
negative: Any = Field(description="Output data if match is not found")
|
||||
positive: Any = SchemaField(description="Output data if match is found")
|
||||
negative: Any = SchemaField(description="Output data if match is not found")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -64,15 +66,17 @@ class MatchTextPatternBlock(Block):
|
||||
|
||||
class ExtractTextInformationBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: Any = Field(description="Text to parse")
|
||||
pattern: str = Field(description="Pattern (Regex) to parse")
|
||||
group: int = Field(description="Group number to extract", default=0)
|
||||
case_sensitive: bool = Field(description="Case sensitive match", default=True)
|
||||
dot_all: bool = Field(description="Dot matches all", default=True)
|
||||
text: Any = SchemaField(description="Text to parse")
|
||||
pattern: str = SchemaField(description="Pattern (Regex) to parse")
|
||||
group: int = SchemaField(description="Group number to extract", default=0)
|
||||
case_sensitive: bool = SchemaField(
|
||||
description="Case sensitive match", default=True
|
||||
)
|
||||
dot_all: bool = SchemaField(description="Dot matches all", default=True)
|
||||
|
||||
class Output(BlockSchema):
|
||||
positive: str = Field(description="Extracted text")
|
||||
negative: str = Field(description="Original text")
|
||||
positive: str = SchemaField(description="Extracted text")
|
||||
negative: str = SchemaField(description="Original text")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -116,11 +120,15 @@ class ExtractTextInformationBlock(Block):
|
||||
|
||||
class FillTextTemplateBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
values: dict[str, Any] = Field(description="Values (dict) to be used in format")
|
||||
format: str = Field(description="Template to format the text using `values`")
|
||||
values: dict[str, Any] = SchemaField(
|
||||
description="Values (dict) to be used in format"
|
||||
)
|
||||
format: str = SchemaField(
|
||||
description="Template to format the text using `values`"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
output: str
|
||||
output: str = SchemaField(description="Formatted text")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -155,11 +163,13 @@ class FillTextTemplateBlock(Block):
|
||||
|
||||
class CombineTextsBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
input: list[str] = Field(description="text input to combine")
|
||||
delimiter: str = Field(description="Delimiter to combine texts", default="")
|
||||
input: list[str] = SchemaField(description="text input to combine")
|
||||
delimiter: str = SchemaField(
|
||||
description="Delimiter to combine texts", default=""
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
output: str = Field(description="Combined text")
|
||||
output: str = SchemaField(description="Combined text")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
|
||||
103
autogpt_platform/backend/backend/blocks/text_to_speech_block.py
Normal file
103
autogpt_platform/backend/backend/blocks/text_to_speech_block.py
Normal file
@@ -0,0 +1,103 @@
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import (
|
||||
APIKeyCredentials,
|
||||
CredentialsField,
|
||||
CredentialsMetaInput,
|
||||
SchemaField,
|
||||
)
|
||||
from backend.util.request import requests
|
||||
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="01234567-89ab-cdef-0123-456789abcdef",
|
||||
provider="unreal_speech",
|
||||
api_key=SecretStr("mock-unreal-speech-api-key"),
|
||||
title="Mock Unreal Speech API key",
|
||||
expires_at=None,
|
||||
)
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.type,
|
||||
}
|
||||
|
||||
|
||||
class UnrealTextToSpeechBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
text: str = SchemaField(
|
||||
description="The text to be converted to speech",
|
||||
placeholder="Enter the text you want to convert to speech",
|
||||
)
|
||||
voice_id: str = SchemaField(
|
||||
description="The voice ID to use for text-to-speech conversion",
|
||||
placeholder="Scarlett",
|
||||
default="Scarlett",
|
||||
)
|
||||
credentials: CredentialsMetaInput[
|
||||
Literal["unreal_speech"], Literal["api_key"]
|
||||
] = CredentialsField(
|
||||
provider="unreal_speech",
|
||||
supported_credential_types={"api_key"},
|
||||
description="The Unreal Speech integration can be used with "
|
||||
"any API key with sufficient permissions for the blocks it is used on.",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
mp3_url: str = SchemaField(description="The URL of the generated MP3 file")
|
||||
error: str = SchemaField(description="Error message if the API call failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="4ff1ff6d-cc40-4caa-ae69-011daa20c378",
|
||||
description="Converts text to speech using the Unreal Speech API",
|
||||
categories={BlockCategory.AI, BlockCategory.TEXT},
|
||||
input_schema=UnrealTextToSpeechBlock.Input,
|
||||
output_schema=UnrealTextToSpeechBlock.Output,
|
||||
test_input={
|
||||
"text": "This is a test of the text to speech API.",
|
||||
"voice_id": "Scarlett",
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
test_output=[("mp3_url", "https://example.com/test.mp3")],
|
||||
test_mock={
|
||||
"call_unreal_speech_api": lambda *args, **kwargs: {
|
||||
"OutputUri": "https://example.com/test.mp3"
|
||||
}
|
||||
},
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def call_unreal_speech_api(
|
||||
api_key: SecretStr, text: str, voice_id: str
|
||||
) -> dict[str, Any]:
|
||||
url = "https://api.v7.unrealspeech.com/speech"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {api_key.get_secret_value()}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {
|
||||
"Text": text,
|
||||
"VoiceId": voice_id,
|
||||
"Bitrate": "192k",
|
||||
"Speed": "0",
|
||||
"Pitch": "1",
|
||||
"TimestampType": "sentence",
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
return response.json()
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
api_response = self.call_unreal_speech_api(
|
||||
credentials.api_key,
|
||||
input_data.text,
|
||||
input_data.voice_id,
|
||||
)
|
||||
yield "mp3_url", api_response["OutputUri"]
|
||||
@@ -3,14 +3,22 @@ from datetime import datetime, timedelta
|
||||
from typing import Any, Union
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
|
||||
|
||||
class GetCurrentTimeBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
trigger: str
|
||||
trigger: str = SchemaField(
|
||||
description="Trigger any data to output the current time"
|
||||
)
|
||||
format: str = SchemaField(
|
||||
description="Format of the time to output", default="%H:%M:%S"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
time: str
|
||||
time: str = SchemaField(
|
||||
description="Current time in the specified format (default: %H:%M:%S)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -20,25 +28,38 @@ class GetCurrentTimeBlock(Block):
|
||||
input_schema=GetCurrentTimeBlock.Input,
|
||||
output_schema=GetCurrentTimeBlock.Output,
|
||||
test_input=[
|
||||
{"trigger": "Hello", "format": "{time}"},
|
||||
{"trigger": "Hello"},
|
||||
{"trigger": "Hello", "format": "%H:%M"},
|
||||
],
|
||||
test_output=[
|
||||
("time", lambda _: time.strftime("%H:%M:%S")),
|
||||
("time", lambda _: time.strftime("%H:%M")),
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
current_time = time.strftime("%H:%M:%S")
|
||||
current_time = time.strftime(input_data.format)
|
||||
yield "time", current_time
|
||||
|
||||
|
||||
class GetCurrentDateBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
trigger: str
|
||||
offset: Union[int, str]
|
||||
trigger: str = SchemaField(
|
||||
description="Trigger any data to output the current date"
|
||||
)
|
||||
offset: Union[int, str] = SchemaField(
|
||||
title="Days Offset",
|
||||
description="Offset in days from the current date",
|
||||
default=0,
|
||||
)
|
||||
format: str = SchemaField(
|
||||
description="Format of the date to output", default="%Y-%m-%d"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
date: str
|
||||
date: str = SchemaField(
|
||||
description="Current date in the specified format (default: YYYY-MM-DD)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -48,7 +69,8 @@ class GetCurrentDateBlock(Block):
|
||||
input_schema=GetCurrentDateBlock.Input,
|
||||
output_schema=GetCurrentDateBlock.Output,
|
||||
test_input=[
|
||||
{"trigger": "Hello", "format": "{date}", "offset": "7"},
|
||||
{"trigger": "Hello", "offset": "7"},
|
||||
{"trigger": "Hello", "offset": "7", "format": "%m/%d/%Y"},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
@@ -56,6 +78,12 @@ class GetCurrentDateBlock(Block):
|
||||
lambda t: abs(datetime.now() - datetime.strptime(t, "%Y-%m-%d"))
|
||||
< timedelta(days=8), # 7 days difference + 1 day error margin.
|
||||
),
|
||||
(
|
||||
"date",
|
||||
lambda t: abs(datetime.now() - datetime.strptime(t, "%m/%d/%Y"))
|
||||
< timedelta(days=8),
|
||||
# 7 days difference + 1 day error margin.
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -65,15 +93,23 @@ class GetCurrentDateBlock(Block):
|
||||
except ValueError:
|
||||
offset = 0
|
||||
current_date = datetime.now() - timedelta(days=offset)
|
||||
yield "date", current_date.strftime("%Y-%m-%d")
|
||||
yield "date", current_date.strftime(input_data.format)
|
||||
|
||||
|
||||
class GetCurrentDateAndTimeBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
trigger: str
|
||||
trigger: str = SchemaField(
|
||||
description="Trigger any data to output the current date and time"
|
||||
)
|
||||
format: str = SchemaField(
|
||||
description="Format of the date and time to output",
|
||||
default="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
date_time: str
|
||||
date_time: str = SchemaField(
|
||||
description="Current date and time in the specified format (default: YYYY-MM-DD HH:MM:SS)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
@@ -83,7 +119,7 @@ class GetCurrentDateAndTimeBlock(Block):
|
||||
input_schema=GetCurrentDateAndTimeBlock.Input,
|
||||
output_schema=GetCurrentDateAndTimeBlock.Output,
|
||||
test_input=[
|
||||
{"trigger": "Hello", "format": "{date_time}"},
|
||||
{"trigger": "Hello"},
|
||||
],
|
||||
test_output=[
|
||||
(
|
||||
@@ -97,20 +133,34 @@ class GetCurrentDateAndTimeBlock(Block):
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
current_date_time = time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
current_date_time = time.strftime(input_data.format)
|
||||
yield "date_time", current_date_time
|
||||
|
||||
|
||||
class CountdownTimerBlock(Block):
|
||||
class Input(BlockSchema):
|
||||
input_message: Any = "timer finished"
|
||||
seconds: Union[int, str] = 0
|
||||
minutes: Union[int, str] = 0
|
||||
hours: Union[int, str] = 0
|
||||
days: Union[int, str] = 0
|
||||
input_message: Any = SchemaField(
|
||||
advanced=False,
|
||||
description="Message to output after the timer finishes",
|
||||
default="timer finished",
|
||||
)
|
||||
seconds: Union[int, str] = SchemaField(
|
||||
advanced=False, description="Duration in seconds", default=0
|
||||
)
|
||||
minutes: Union[int, str] = SchemaField(
|
||||
advanced=False, description="Duration in minutes", default=0
|
||||
)
|
||||
hours: Union[int, str] = SchemaField(
|
||||
advanced=False, description="Duration in hours", default=0
|
||||
)
|
||||
days: Union[int, str] = SchemaField(
|
||||
advanced=False, description="Duration in days", default=0
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
output_message: str
|
||||
output_message: Any = SchemaField(
|
||||
description="Message after the timer finishes"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user