mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Compare commits
73 Commits
zamilmajdy
...
swiftyos/e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8cd26bba0e | ||
|
|
95bc7dbfca | ||
|
|
ff6ad3270e | ||
|
|
9ab5dd597b | ||
|
|
324216df31 | ||
|
|
3408277c0e | ||
|
|
83d96eb4b5 | ||
|
|
c26a96fc6d | ||
|
|
1aae4e7474 | ||
|
|
5981c4d70b | ||
|
|
8ecadca8a9 | ||
|
|
a7d7192022 | ||
|
|
95ce2c825f | ||
|
|
b9366c9b28 | ||
|
|
79948263c8 | ||
|
|
57223e6343 | ||
|
|
211e53bf5d | ||
|
|
dc1d5faa5d | ||
|
|
a187e87741 | ||
|
|
067983eb80 | ||
|
|
e6de8b98f7 | ||
|
|
607641c574 | ||
|
|
18dfbf191c | ||
|
|
841679216c | ||
|
|
50eac43e1a | ||
|
|
91445e4760 | ||
|
|
6127727aeb | ||
|
|
1bc3041615 | ||
|
|
9c84dbddca | ||
|
|
b67c2e166b | ||
|
|
801f3a3a24 | ||
|
|
b9f31a9c44 | ||
|
|
f4d4bb83b0 | ||
|
|
02618e1a52 | ||
|
|
8aba4a5d48 | ||
|
|
6ba9fd9cb4 | ||
|
|
b16c2eed52 | ||
|
|
5af718c9f5 | ||
|
|
a588cf1dc5 | ||
|
|
98a1adc397 | ||
|
|
19728ebc05 | ||
|
|
3e117aac5d | ||
|
|
8dabe6c70d | ||
|
|
a04919beca | ||
|
|
c6b22842a4 | ||
|
|
614f751a90 | ||
|
|
c458bec9c7 | ||
|
|
040bde3f49 | ||
|
|
71028d57d7 | ||
|
|
dbf014f936 | ||
|
|
c1e329497c | ||
|
|
c179a49218 | ||
|
|
942ac0bae4 | ||
|
|
af65058bb7 | ||
|
|
f37d4c2659 | ||
|
|
f9d88d2981 | ||
|
|
3458b94fcc | ||
|
|
08cd935a47 | ||
|
|
b2e94611e8 | ||
|
|
020bf02e48 | ||
|
|
aa6f91039c | ||
|
|
fa97632eae | ||
|
|
fe927c8345 | ||
|
|
fdd6a5d847 | ||
|
|
f60bd07e15 | ||
|
|
9210d448ee | ||
|
|
a0ecb96958 | ||
|
|
7eb04663ed | ||
|
|
da2aa34e3e | ||
|
|
bc71eac0ec | ||
|
|
3cf198eea1 | ||
|
|
e5eadeace4 | ||
|
|
231775f4d4 |
24
.github/dependabot.yml
vendored
24
.github/dependabot.yml
vendored
@@ -129,30 +129,6 @@ updates:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Submodules
|
||||
- package-ecosystem: "gitsubmodule"
|
||||
directory: "autogpt_platform/supabase"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(platform/deps)"
|
||||
prefix-development: "chore(platform/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: 'pip'
|
||||
directory: "docs/"
|
||||
|
||||
2
.github/workflows/platform-backend-ci.yml
vendored
2
.github/workflows/platform-backend-ci.yml
vendored
@@ -66,7 +66,7 @@ jobs:
|
||||
- name: Setup Supabase
|
||||
uses: supabase/setup-cli@v1
|
||||
with:
|
||||
version: latest
|
||||
version: 1.178.1
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
|
||||
2
.github/workflows/platform-frontend-ci.yml
vendored
2
.github/workflows/platform-frontend-ci.yml
vendored
@@ -82,7 +82,7 @@ jobs:
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../supabase/docker/.env.example ../.env
|
||||
cp ../.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,6 +1,3 @@
|
||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||
path = classic/forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
[submodule "autogpt_platform/supabase"]
|
||||
path = autogpt_platform/supabase
|
||||
url = https://github.com/supabase/supabase.git
|
||||
|
||||
@@ -20,6 +20,7 @@ Instead, please report them via:
|
||||
- Please provide detailed reports with reproducible steps
|
||||
- Include the version/commit hash where you discovered the vulnerability
|
||||
- Allow us a 90-day security fix window before any public disclosure
|
||||
- After patch is released, allow 30 days for users to update before public disclosure (for a total of 120 days max between update time and fix time)
|
||||
- Share any potential mitigations or workarounds if known
|
||||
|
||||
## Supported Versions
|
||||
|
||||
123
autogpt_platform/.env.example
Normal file
123
autogpt_platform/.env.example
Normal file
@@ -0,0 +1,123 @@
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=supabase
|
||||
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY=your-encryption-key-32-chars-min
|
||||
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
|
||||
############
|
||||
# Supavisor -- Database pooler
|
||||
############
|
||||
POOLER_PROXY_PORT_TRANSACTION=6543
|
||||
POOLER_DEFAULT_POOL_SIZE=20
|
||||
POOLER_MAX_CLIENT_CONN=100
|
||||
POOLER_TENANT_ID=your-tenant-id
|
||||
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3000
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
ENABLE_ANONYMOUS_USERS=false
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3000
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
# Add your OpenAI API key to enable SQL Editor Assistant
|
||||
OPENAI_API_KEY=
|
||||
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
||||
@@ -22,35 +22,29 @@ To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
2. Run the following command:
|
||||
```
|
||||
git submodule update --init --recursive --progress
|
||||
cp .env.example .env
|
||||
```
|
||||
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
|
||||
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
3. Run the following command:
|
||||
```
|
||||
cp supabase/docker/.env.example .env
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
4. Run the following command:
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
5. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
4. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
```
|
||||
cd frontend
|
||||
```
|
||||
You will need to run your frontend application separately on your local machine.
|
||||
|
||||
6. Run the following command:
|
||||
5. Run the following command:
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
7. Run the following command:
|
||||
6. Run the following command:
|
||||
```
|
||||
npm install
|
||||
npm run dev
|
||||
@@ -61,7 +55,7 @@ To run the AutoGPT Platform, follow these steps:
|
||||
yarn install && yarn dev
|
||||
```
|
||||
|
||||
8. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
|
||||
40
autogpt_platform/autogpt_libs/poetry.lock
generated
40
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -1476,30 +1476,30 @@ pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.9.6"
|
||||
version = "0.9.10"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.9.6-py3-none-linux_armv6l.whl", hash = "sha256:2f218f356dd2d995839f1941322ff021c72a492c470f0b26a34f844c29cdf5ba"},
|
||||
{file = "ruff-0.9.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b908ff4df65dad7b251c9968a2e4560836d8f5487c2f0cc238321ed951ea0504"},
|
||||
{file = "ruff-0.9.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b109c0ad2ececf42e75fa99dc4043ff72a357436bb171900714a9ea581ddef83"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1de4367cca3dac99bcbd15c161404e849bb0bfd543664db39232648dc00112dc"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3ee4d7c2c92ddfdaedf0bf31b2b176fa7aa8950efc454628d477394d35638b"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc1edd1775270e6aa2386119aea692039781429f0be1e0949ea5884e011aa8e"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4a091729086dffa4bd070aa5dab7e39cc6b9d62eb2bef8f3d91172d30d599666"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1bbc6808bf7b15796cef0815e1dfb796fbd383e7dbd4334709642649625e7c5"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:589d1d9f25b5754ff230dce914a174a7c951a85a4e9270613a2b74231fdac2f5"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc61dd5131742e21103fbbdcad683a8813be0e3c204472d520d9a5021ca8b217"},
|
||||
{file = "ruff-0.9.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5e2d9126161d0357e5c8f30b0bd6168d2c3872372f14481136d13de9937f79b6"},
|
||||
{file = "ruff-0.9.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:68660eab1a8e65babb5229a1f97b46e3120923757a68b5413d8561f8a85d4897"},
|
||||
{file = "ruff-0.9.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c4cae6c4cc7b9b4017c71114115db0445b00a16de3bcde0946273e8392856f08"},
|
||||
{file = "ruff-0.9.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19f505b643228b417c1111a2a536424ddde0db4ef9023b9e04a46ed8a1cb4656"},
|
||||
{file = "ruff-0.9.6-py3-none-win32.whl", hash = "sha256:194d8402bceef1b31164909540a597e0d913c0e4952015a5b40e28c146121b5d"},
|
||||
{file = "ruff-0.9.6-py3-none-win_amd64.whl", hash = "sha256:03482d5c09d90d4ee3f40d97578423698ad895c87314c4de39ed2af945633caa"},
|
||||
{file = "ruff-0.9.6-py3-none-win_arm64.whl", hash = "sha256:0e2bb706a2be7ddfea4a4af918562fdc1bcb16df255e5fa595bbd800ce322a5a"},
|
||||
{file = "ruff-0.9.6.tar.gz", hash = "sha256:81761592f72b620ec8fa1068a6fd00e98a5ebee342a3642efd84454f3031dca9"},
|
||||
{file = "ruff-0.9.10-py3-none-linux_armv6l.whl", hash = "sha256:eb4d25532cfd9fe461acc83498361ec2e2252795b4f40b17e80692814329e42d"},
|
||||
{file = "ruff-0.9.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:188a6638dab1aa9bb6228a7302387b2c9954e455fb25d6b4470cb0641d16759d"},
|
||||
{file = "ruff-0.9.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5284dcac6b9dbc2fcb71fdfc26a217b2ca4ede6ccd57476f52a587451ebe450d"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47678f39fa2a3da62724851107f438c8229a3470f533894b5568a39b40029c0c"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99713a6e2766b7a17147b309e8c915b32b07a25c9efd12ada79f217c9c778b3e"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524ee184d92f7c7304aa568e2db20f50c32d1d0caa235d8ddf10497566ea1a12"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df92aeac30af821f9acf819fc01b4afc3dfb829d2782884f8739fb52a8119a16"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de42e4edc296f520bb84954eb992a07a0ec5a02fecb834498415908469854a52"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d257f95b65806104b6b1ffca0ea53f4ef98454036df65b1eda3693534813ecd1"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60dec7201c0b10d6d11be00e8f2dbb6f40ef1828ee75ed739923799513db24c"},
|
||||
{file = "ruff-0.9.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d838b60007da7a39c046fcdd317293d10b845001f38bcb55ba766c3875b01e43"},
|
||||
{file = "ruff-0.9.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ccaf903108b899beb8e09a63ffae5869057ab649c1e9231c05ae354ebc62066c"},
|
||||
{file = "ruff-0.9.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f9567d135265d46e59d62dc60c0bfad10e9a6822e231f5b24032dba5a55be6b5"},
|
||||
{file = "ruff-0.9.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5f202f0d93738c28a89f8ed9eaba01b7be339e5d8d642c994347eaa81c6d75b8"},
|
||||
{file = "ruff-0.9.10-py3-none-win32.whl", hash = "sha256:bfb834e87c916521ce46b1788fbb8484966e5113c02df216680102e9eb960029"},
|
||||
{file = "ruff-0.9.10-py3-none-win_amd64.whl", hash = "sha256:f2160eeef3031bf4b17df74e307d4c5fb689a6f3a26a2de3f7ef4044e3c484f1"},
|
||||
{file = "ruff-0.9.10-py3-none-win_arm64.whl", hash = "sha256:5fd804c0327a5e5ea26615550e706942f348b197d5475ff34c19733aee4b2e69"},
|
||||
{file = "ruff-0.9.10.tar.gz", hash = "sha256:9bacb735d7bada9cfb0f2c227d3658fc443d90a727b47f206fb33f52f3c0eac7"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1929,4 +1929,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "f5cd0d1dafeb2b5c97d0ef27bef8a2235d4a1f54e3c60583d05ef582ac49c0e6"
|
||||
content-hash = "931772287f71c539575d601e6398423bf68e09ca87ae1a144057c7f5707cf978"
|
||||
|
||||
@@ -21,7 +21,7 @@ supabase = "^2.13.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
redis = "^5.2.1"
|
||||
ruff = "^0.9.6"
|
||||
ruff = "^0.9.10"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
@@ -2,13 +2,23 @@ DB_USER=postgres
|
||||
DB_PASS=your-super-secret-and-long-postgres-password
|
||||
DB_NAME=postgres
|
||||
DB_PORT=5432
|
||||
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@localhost:${DB_PORT}/${DB_NAME}?connect_timeout=60&schema=platform"
|
||||
DB_HOST=localhost
|
||||
DB_CONNECTION_LIMIT=12
|
||||
DB_CONNECT_TIMEOUT=60
|
||||
DB_POOL_TIMEOUT=300
|
||||
DB_SCHEMA=platform
|
||||
DATABASE_URL="postgresql://${DB_USER}:${DB_PASS}@${DB_HOST}:${DB_PORT}/${DB_NAME}?schema=${DB_SCHEMA}&connect_timeout=${DB_CONNECT_TIMEOUT}"
|
||||
PRISMA_SCHEMA="postgres/schema.prisma"
|
||||
|
||||
# EXECUTOR
|
||||
NUM_GRAPH_WORKERS=10
|
||||
NUM_NODE_WORKERS=3
|
||||
|
||||
BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
|
||||
# generate using `from cryptography.fernet import Fernet;Fernet.generate_key().decode()`
|
||||
ENCRYPTION_KEY='dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw='
|
||||
UNSUBSCRIBE_SECRET_KEY = 'HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio='
|
||||
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
@@ -163,6 +173,9 @@ EXA_API_KEY=
|
||||
# E2B
|
||||
E2B_API_KEY=
|
||||
|
||||
# Example API Key
|
||||
EXAMPLE_API_KEY=
|
||||
|
||||
# Mem0
|
||||
MEM0_API_KEY=
|
||||
|
||||
|
||||
@@ -1,75 +1 @@
|
||||
# AutoGPT Agent Server Advanced set up
|
||||
|
||||
This guide walks you through a dockerized set up, with an external DB (postgres)
|
||||
|
||||
## Setup
|
||||
|
||||
We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory:
|
||||
|
||||
0. Install Poetry
|
||||
```sh
|
||||
pip install poetry
|
||||
```
|
||||
|
||||
1. Configure Poetry to use .venv in your project directory
|
||||
```sh
|
||||
poetry config virtualenvs.in-project true
|
||||
```
|
||||
|
||||
2. Enter the poetry shell
|
||||
|
||||
```sh
|
||||
poetry shell
|
||||
```
|
||||
|
||||
3. Install dependencies
|
||||
|
||||
```sh
|
||||
poetry install
|
||||
```
|
||||
|
||||
4. Copy .env.example to .env
|
||||
|
||||
```sh
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
5. Generate the Prisma client
|
||||
|
||||
```sh
|
||||
poetry run prisma generate
|
||||
```
|
||||
|
||||
|
||||
> In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package:
|
||||
>
|
||||
> ```sh
|
||||
> pip uninstall prisma
|
||||
> ```
|
||||
>
|
||||
> Then run the generation again. The path *should* look something like this:
|
||||
> `<some path>/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma`
|
||||
|
||||
6. Run the postgres database from the /rnd folder
|
||||
|
||||
```sh
|
||||
cd autogpt_platform/
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
7. Run the migrations (from the backend folder)
|
||||
|
||||
```sh
|
||||
cd ../backend
|
||||
prisma migrate deploy
|
||||
```
|
||||
|
||||
## Running The Server
|
||||
|
||||
### Starting the server directly
|
||||
|
||||
Run the following command:
|
||||
|
||||
```sh
|
||||
poetry run app
|
||||
```
|
||||
[Advanced Setup (Dev Branch)](https://dev-docs.agpt.co/platform/advanced_setup/#autogpt_agent_server_advanced_set_up)
|
||||
@@ -1,210 +1 @@
|
||||
# AutoGPT Agent Server
|
||||
|
||||
This is an initial project for creating the next generation of agent execution, which is an AutoGPT agent server.
|
||||
The agent server will enable the creation of composite multi-agent systems that utilize AutoGPT agents and other non-agent components as its primitives.
|
||||
|
||||
## Docs
|
||||
|
||||
You can access the docs for the [AutoGPT Agent Server here](https://docs.agpt.co/server/setup).
|
||||
|
||||
## Setup
|
||||
|
||||
We use the Poetry to manage the dependencies. To set up the project, follow these steps inside this directory:
|
||||
|
||||
0. Install Poetry
|
||||
```sh
|
||||
pip install poetry
|
||||
```
|
||||
|
||||
1. Configure Poetry to use .venv in your project directory
|
||||
```sh
|
||||
poetry config virtualenvs.in-project true
|
||||
```
|
||||
|
||||
2. Enter the poetry shell
|
||||
|
||||
```sh
|
||||
poetry shell
|
||||
```
|
||||
|
||||
3. Install dependencies
|
||||
|
||||
```sh
|
||||
poetry install
|
||||
```
|
||||
|
||||
4. Copy .env.example to .env
|
||||
|
||||
```sh
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
5. Generate the Prisma client
|
||||
|
||||
```sh
|
||||
poetry run prisma generate
|
||||
```
|
||||
|
||||
|
||||
> In case Prisma generates the client for the global Python installation instead of the virtual environment, the current mitigation is to just uninstall the global Prisma package:
|
||||
>
|
||||
> ```sh
|
||||
> pip uninstall prisma
|
||||
> ```
|
||||
>
|
||||
> Then run the generation again. The path *should* look something like this:
|
||||
> `<some path>/pypoetry/virtualenvs/backend-TQIRSwR6-py3.12/bin/prisma`
|
||||
|
||||
6. Migrate the database. Be careful because this deletes current data in the database.
|
||||
|
||||
```sh
|
||||
docker compose up db -d
|
||||
poetry run prisma migrate deploy
|
||||
```
|
||||
|
||||
## Running The Server
|
||||
|
||||
### Starting the server without Docker
|
||||
|
||||
To run the server locally, start in the autogpt_platform folder:
|
||||
|
||||
```sh
|
||||
cd ..
|
||||
```
|
||||
|
||||
Run the following command to run database in docker but the application locally:
|
||||
|
||||
```sh
|
||||
docker compose --profile local up deps --build --detach
|
||||
cd backend
|
||||
poetry run app
|
||||
```
|
||||
|
||||
### Starting the server with Docker
|
||||
|
||||
Run the following command to build the dockerfiles:
|
||||
|
||||
```sh
|
||||
docker compose build
|
||||
```
|
||||
|
||||
Run the following command to run the app:
|
||||
|
||||
```sh
|
||||
docker compose up
|
||||
```
|
||||
|
||||
Run the following to automatically rebuild when code changes, in another terminal:
|
||||
|
||||
```sh
|
||||
docker compose watch
|
||||
```
|
||||
|
||||
Run the following command to shut down:
|
||||
|
||||
```sh
|
||||
docker compose down
|
||||
```
|
||||
|
||||
If you run into issues with dangling orphans, try:
|
||||
|
||||
```sh
|
||||
docker compose down --volumes --remove-orphans && docker-compose up --force-recreate --renew-anon-volumes --remove-orphans
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
To run the tests:
|
||||
|
||||
```sh
|
||||
poetry run test
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Formatting & Linting
|
||||
Auto formatter and linter are set up in the project. To run them:
|
||||
|
||||
Install:
|
||||
```sh
|
||||
poetry install --with dev
|
||||
```
|
||||
|
||||
Format the code:
|
||||
```sh
|
||||
poetry run format
|
||||
```
|
||||
|
||||
Lint the code:
|
||||
```sh
|
||||
poetry run lint
|
||||
```
|
||||
|
||||
## Project Outline
|
||||
|
||||
The current project has the following main modules:
|
||||
|
||||
### **blocks**
|
||||
|
||||
This module stores all the Agent Blocks, which are reusable components to build a graph that represents the agent's behavior.
|
||||
|
||||
### **data**
|
||||
|
||||
This module stores the logical model that is persisted in the database.
|
||||
It abstracts the database operations into functions that can be called by the service layer.
|
||||
Any code that interacts with Prisma objects or the database should reside in this module.
|
||||
The main models are:
|
||||
* `block`: anything related to the block used in the graph
|
||||
* `execution`: anything related to the execution graph execution
|
||||
* `graph`: anything related to the graph, node, and its relations
|
||||
|
||||
### **execution**
|
||||
|
||||
This module stores the business logic of executing the graph.
|
||||
It currently has the following main modules:
|
||||
* `manager`: A service that consumes the queue of the graph execution and executes the graph. It contains both pieces of logic.
|
||||
* `scheduler`: A service that triggers scheduled graph execution based on a cron expression. It pushes an execution request to the manager.
|
||||
|
||||
### **server**
|
||||
|
||||
This module stores the logic for the server API.
|
||||
It contains all the logic used for the API that allows the client to create, execute, and monitor the graph and its execution.
|
||||
This API service interacts with other services like those defined in `manager` and `scheduler`.
|
||||
|
||||
### **utils**
|
||||
|
||||
This module stores utility functions that are used across the project.
|
||||
Currently, it has two main modules:
|
||||
* `process`: A module that contains the logic to spawn a new process.
|
||||
* `service`: A module that serves as a parent class for all the services in the project.
|
||||
|
||||
## Service Communication
|
||||
|
||||
Currently, there are only 3 active services:
|
||||
|
||||
- AgentServer (the API, defined in `server.py`)
|
||||
- ExecutionManager (the executor, defined in `manager.py`)
|
||||
- ExecutionScheduler (the scheduler, defined in `scheduler.py`)
|
||||
|
||||
The services run in independent Python processes and communicate through an IPC.
|
||||
A communication layer (`service.py`) is created to decouple the communication library from the implementation.
|
||||
|
||||
Currently, the IPC is done using Pyro5 and abstracted in a way that allows a function decorated with `@expose` to be called from a different process.
|
||||
|
||||
|
||||
By default the daemons run on the following ports:
|
||||
|
||||
Execution Manager Daemon: 8002
|
||||
Execution Scheduler Daemon: 8003
|
||||
Rest Server Daemon: 8004
|
||||
|
||||
## Adding a New Agent Block
|
||||
|
||||
To add a new agent block, you need to create a new class that inherits from `Block` and provides the following information:
|
||||
* All the block code should live in the `blocks` (`backend.blocks`) module.
|
||||
* `input_schema`: the schema of the input data, represented by a Pydantic object.
|
||||
* `output_schema`: the schema of the output data, represented by a Pydantic object.
|
||||
* `run` method: the main logic of the block.
|
||||
* `test_input` & `test_output`: the sample input and output data for the block, which will be used to auto-test the block.
|
||||
* You can mock the functions declared in the block using the `test_mock` field for your unit tests.
|
||||
* Once you finish creating the block, you can test it by running `poetry run pytest -s test/block/test_block.py`.
|
||||
[Getting Started (Released)](https://docs.agpt.co/platform/getting-started/#autogpt_agent_server)
|
||||
@@ -32,7 +32,7 @@ def main(**kwargs):
|
||||
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
|
||||
"""
|
||||
|
||||
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
|
||||
from backend.executor import DatabaseManager, ExecutionManager, Scheduler
|
||||
from backend.notifications import NotificationManager
|
||||
from backend.server.rest_api import AgentServer
|
||||
from backend.server.ws_api import WebsocketServer
|
||||
@@ -40,7 +40,7 @@ def main(**kwargs):
|
||||
run_processes(
|
||||
DatabaseManager(),
|
||||
ExecutionManager(),
|
||||
ExecutionScheduler(),
|
||||
Scheduler(),
|
||||
NotificationManager(),
|
||||
WebsocketServer(),
|
||||
AgentServer(),
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Any, List
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util import json
|
||||
from backend.util.file import MediaFile, store_media_file
|
||||
from backend.util.mock import MockObject
|
||||
from backend.util.text import TextFormatter
|
||||
@@ -153,6 +154,9 @@ class FindInDictionaryBlock(Block):
|
||||
obj = input_data.input
|
||||
key = input_data.key
|
||||
|
||||
if isinstance(obj, str):
|
||||
obj = json.loads(obj)
|
||||
|
||||
if isinstance(obj, dict) and key in obj:
|
||||
yield "output", obj[key]
|
||||
elif isinstance(obj, list) and isinstance(key, int) and 0 <= key < len(obj):
|
||||
|
||||
@@ -51,6 +51,7 @@ class ExaContentsBlock(Block):
|
||||
description="List of document contents",
|
||||
default=[],
|
||||
)
|
||||
error: str = SchemaField(description="Error message if the request failed")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
|
||||
137
autogpt_platform/backend/backend/blocks/example/_api.py
Normal file
137
autogpt_platform/backend/backend/blocks/example/_api.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""
|
||||
API module for Example API integration.
|
||||
|
||||
This module provides a example of how to create a client for an API.
|
||||
"""
|
||||
|
||||
# We also have a Json Wrapper library available in backend.util.json
|
||||
from json import JSONDecodeError
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.model import APIKeyCredentials
|
||||
|
||||
# This is a wrapper around the requests library that is used to make API requests.
|
||||
from backend.util.request import Requests
|
||||
|
||||
|
||||
class ExampleAPIException(Exception):
|
||||
def __init__(self, message: str, status_code: int):
|
||||
super().__init__(message)
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class CreateResourceResponse(BaseModel):
|
||||
message: str
|
||||
is_funny: bool
|
||||
|
||||
|
||||
class GetResourceResponse(BaseModel):
|
||||
message: str
|
||||
is_funny: bool
|
||||
|
||||
|
||||
class ExampleClient:
|
||||
"""Client for the Example API"""
|
||||
|
||||
API_BASE_URL = "https://api.example.com/v1"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
credentials: Optional[APIKeyCredentials] = None,
|
||||
custom_requests: Optional[Requests] = None,
|
||||
):
|
||||
if custom_requests:
|
||||
self._requests = custom_requests
|
||||
else:
|
||||
headers: dict[str, str] = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
if credentials:
|
||||
headers["Authorization"] = credentials.auth_header()
|
||||
|
||||
self._requests = Requests(
|
||||
extra_headers=headers,
|
||||
raise_for_status=False,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _handle_response(response) -> Any:
|
||||
"""
|
||||
Handles API response and checks for errors.
|
||||
|
||||
Args:
|
||||
response: The response object from the request.
|
||||
|
||||
Returns:
|
||||
The parsed JSON response data.
|
||||
|
||||
Raises:
|
||||
ExampleAPIException: If the API request fails.
|
||||
"""
|
||||
if not response.ok:
|
||||
try:
|
||||
error_data = response.json()
|
||||
error_message = error_data.get("error", {}).get("message", "")
|
||||
except JSONDecodeError:
|
||||
error_message = response.text
|
||||
|
||||
raise ExampleAPIException(
|
||||
f"Example API request failed ({response.status_code}): {error_message}",
|
||||
response.status_code,
|
||||
)
|
||||
|
||||
response_data = response.json()
|
||||
if "errors" in response_data:
|
||||
# This is an example error and needs to be
|
||||
# replaced with how the real API returns errors
|
||||
error_messages = [
|
||||
error.get("message", "") for error in response_data["errors"]
|
||||
]
|
||||
raise ExampleAPIException(
|
||||
f"Example API returned errors: {', '.join(error_messages)}",
|
||||
response.status_code,
|
||||
)
|
||||
|
||||
return response_data
|
||||
|
||||
def get_resource(self, resource_id: str) -> GetResourceResponse:
|
||||
"""
|
||||
Fetches a resource from the Example API.
|
||||
|
||||
Args:
|
||||
resource_id: The ID of the resource to fetch.
|
||||
|
||||
Returns:
|
||||
The resource data as a GetResourceResponse object.
|
||||
|
||||
Raises:
|
||||
ExampleAPIException: If the API request fails.
|
||||
"""
|
||||
try:
|
||||
response = self._requests.get(
|
||||
f"{self.API_BASE_URL}/resources/{resource_id}"
|
||||
)
|
||||
return GetResourceResponse(**self._handle_response(response))
|
||||
except Exception as e:
|
||||
raise ExampleAPIException(f"Failed to get resource: {str(e)}", 500)
|
||||
|
||||
def create_resource(self, data: dict) -> CreateResourceResponse:
|
||||
"""
|
||||
Creates a new resource via the Example API.
|
||||
|
||||
Args:
|
||||
data: The resource data to create.
|
||||
|
||||
Returns:
|
||||
The created resource data as a CreateResourceResponse object.
|
||||
|
||||
Raises:
|
||||
ExampleAPIException: If the API request fails.
|
||||
"""
|
||||
try:
|
||||
response = self._requests.post(f"{self.API_BASE_URL}/resources", json=data)
|
||||
return CreateResourceResponse(**self._handle_response(response))
|
||||
except Exception as e:
|
||||
raise ExampleAPIException(f"Failed to create resource: {str(e)}", 500)
|
||||
37
autogpt_platform/backend/backend/blocks/example/_auth.py
Normal file
37
autogpt_platform/backend/backend/blocks/example/_auth.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""
|
||||
Authentication module for Example API integration.
|
||||
|
||||
This module provides credential types and test credentials for the Example API integration.
|
||||
It defines the structure for API key credentials used to authenticate with the Example API
|
||||
and provides mock credentials for testing purposes.
|
||||
"""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import SecretStr
|
||||
|
||||
from backend.data.model import APIKeyCredentials, CredentialsMetaInput
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
# Define the type of credentials input expected for Example API
|
||||
ExampleCredentialsInput = CredentialsMetaInput[
|
||||
Literal[ProviderName.EXAMPLE_PROVIDER], Literal["api_key"]
|
||||
]
|
||||
|
||||
|
||||
# Mock credentials for testing Example API integration
|
||||
TEST_CREDENTIALS = APIKeyCredentials(
|
||||
id="9191c4f0-498f-4235-a79c-59c0e37454d4",
|
||||
provider="example-provider",
|
||||
api_key=SecretStr("mock-example-api-key"),
|
||||
title="Mock Example API key",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
# Dictionary representation of test credentials for input fields
|
||||
TEST_CREDENTIALS_INPUT = {
|
||||
"provider": TEST_CREDENTIALS.provider,
|
||||
"id": TEST_CREDENTIALS.id,
|
||||
"type": TEST_CREDENTIALS.type,
|
||||
"title": TEST_CREDENTIALS.title,
|
||||
}
|
||||
154
autogpt_platform/backend/backend/blocks/example/example.py
Normal file
154
autogpt_platform/backend/backend/blocks/example/example.py
Normal file
@@ -0,0 +1,154 @@
|
||||
import logging
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import APIKeyCredentials, CredentialsField, SchemaField
|
||||
|
||||
from ._api import ExampleClient
|
||||
from ._auth import TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT, ExampleCredentialsInput
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GreetingMessage(BaseModel):
|
||||
message: str
|
||||
is_funny: bool
|
||||
|
||||
|
||||
class ExampleBlock(Block):
|
||||
|
||||
class Input(BlockSchema):
|
||||
name: str = SchemaField(
|
||||
description="The name of the example block", placeholder="Enter a name"
|
||||
)
|
||||
greetings: list[str] = SchemaField(
|
||||
description="The greetings to display", default=["Hello", "Hi", "Hey"]
|
||||
)
|
||||
is_funny: bool = SchemaField(
|
||||
description="Whether the block is funny",
|
||||
placeholder="True",
|
||||
default=True,
|
||||
# Advanced fields are moved to the "Advanced" dropdown in the UI
|
||||
advanced=True,
|
||||
)
|
||||
greeting_context: str = SchemaField(
|
||||
description="The context of the greeting",
|
||||
placeholder="Enter a context",
|
||||
default="The user is looking for an inspirational greeting",
|
||||
# Hidden fields are not shown in the UI at all
|
||||
hidden=True,
|
||||
)
|
||||
# Only if the block needs credentials
|
||||
credentials: ExampleCredentialsInput = CredentialsField(
|
||||
description="The credentials for the example block"
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
response: GreetingMessage = SchemaField(
|
||||
description="The response object generated by the example block."
|
||||
)
|
||||
all_responses: list[GreetingMessage] = SchemaField(
|
||||
description="All the responses from the example block."
|
||||
)
|
||||
greeting_count: int = SchemaField(
|
||||
description="The number of greetings in the input."
|
||||
)
|
||||
error: str = SchemaField(description="The error from the example block")
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
# The unique identifier for the block, this value will be persisted in the DB.
|
||||
# It should be unique and constant across the application run.
|
||||
# Use the UUID format for the ID.
|
||||
id="380694d5-3b2e-4130-bced-b43752b70de9",
|
||||
# The description of the block, explaining what the block does.
|
||||
description="The example block",
|
||||
# The set of categories that the block belongs to.
|
||||
# Each category is an instance of BlockCategory Enum.
|
||||
categories={BlockCategory.BASIC},
|
||||
# The schema, defined as a Pydantic model, for the input data.
|
||||
input_schema=ExampleBlock.Input,
|
||||
# The schema, defined as a Pydantic model, for the output data.
|
||||
output_schema=ExampleBlock.Output,
|
||||
# The list or single sample input data for the block, for testing.
|
||||
# This is an instance of the Input schema with sample values.
|
||||
test_input={
|
||||
"name": "Craig",
|
||||
"greetings": ["Hello", "Hi", "Hey"],
|
||||
"is_funny": True,
|
||||
"credentials": TEST_CREDENTIALS_INPUT,
|
||||
},
|
||||
# The list or single expected output if the test_input is run.
|
||||
# Each output is a tuple of (output_name, output_data).
|
||||
test_output=[
|
||||
("response", GreetingMessage(message="Hello, world!", is_funny=True)),
|
||||
(
|
||||
"response",
|
||||
GreetingMessage(message="Hello, world!", is_funny=True),
|
||||
), # We mock the function
|
||||
(
|
||||
"response",
|
||||
GreetingMessage(message="Hello, world!", is_funny=True),
|
||||
), # We mock the function
|
||||
(
|
||||
"all_responses",
|
||||
[
|
||||
GreetingMessage(message="Hello, world!", is_funny=True),
|
||||
GreetingMessage(message="Hello, world!", is_funny=True),
|
||||
GreetingMessage(message="Hello, world!", is_funny=True),
|
||||
],
|
||||
),
|
||||
("greeting_count", 3),
|
||||
],
|
||||
# Function names on the block implementation to mock on test run.
|
||||
# Each mock is a dictionary with function names as keys and mock implementations as values.
|
||||
test_mock={
|
||||
"my_function_that_can_be_mocked": lambda *args, **kwargs: GreetingMessage(
|
||||
message="Hello, world!", is_funny=True
|
||||
)
|
||||
},
|
||||
# The credentials required for testing the block.
|
||||
# This is an instance of APIKeyCredentials with sample values.
|
||||
test_credentials=TEST_CREDENTIALS,
|
||||
)
|
||||
|
||||
def my_function_that_can_be_mocked(
|
||||
self, name: str, credentials: APIKeyCredentials
|
||||
) -> GreetingMessage:
|
||||
logger.info("my_function_that_can_be_mocked called with input: %s", name)
|
||||
|
||||
# Use the ExampleClient from _api.py to make an API call
|
||||
client = ExampleClient(credentials=credentials)
|
||||
|
||||
# Create a sample resource using the client
|
||||
resource_data = {"name": name, "type": "greeting"}
|
||||
# If your API response object matches the return type of the function,
|
||||
# there is no need to convert the object. In this case we have a different
|
||||
# object type for the response and the return type of the function.
|
||||
return GreetingMessage(**client.create_resource(resource_data).model_dump())
|
||||
|
||||
def run(
|
||||
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
|
||||
) -> BlockOutput:
|
||||
"""
|
||||
The run function implements the block's core logic. It processes the input_data
|
||||
and yields the block's output.
|
||||
|
||||
In addition to credentials, the following parameters can be specified:
|
||||
graph_id: The ID of the graph containing this block.
|
||||
node_id: The ID of this block's node in the graph.
|
||||
graph_exec_id: The ID of the current graph execution.
|
||||
node_exec_id: The ID of the current node execution.
|
||||
user_id: The ID of the user executing the block.
|
||||
"""
|
||||
rtn_all_responses: list[GreetingMessage] = []
|
||||
# Here we deomonstrate best practice for blocks that need to yield multiple items.
|
||||
# We yield each item from the list to allow for operations on each element.
|
||||
# We also yield the complete list for situations when the full list is needed.
|
||||
for greeting in input_data.greetings:
|
||||
message = self.my_function_that_can_be_mocked(greeting, credentials)
|
||||
rtn_all_responses.append(message)
|
||||
yield "response", message
|
||||
yield "all_responses", rtn_all_responses
|
||||
yield "greeting_count", len(input_data.greetings)
|
||||
65
autogpt_platform/backend/backend/blocks/example/triggers.py
Normal file
65
autogpt_platform/backend/backend/blocks/example/triggers.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import logging
|
||||
|
||||
from backend.data.block import (
|
||||
Block,
|
||||
BlockCategory,
|
||||
BlockManualWebhookConfig,
|
||||
BlockOutput,
|
||||
BlockSchema,
|
||||
)
|
||||
from backend.data.model import SchemaField
|
||||
from backend.integrations.webhooks.example import ExampleWebhookEventType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExampleTriggerBlock(Block):
|
||||
"""
|
||||
A trigger block that is activated by an external webhook event.
|
||||
|
||||
Unlike standard blocks that are manually executed, trigger blocks are automatically
|
||||
activated when a webhook event is received from the specified provider.
|
||||
"""
|
||||
|
||||
class Input(BlockSchema):
|
||||
# The payload field is hidden because it's automatically populated by the webhook
|
||||
# system rather than being manually entered by the user
|
||||
payload: dict = SchemaField(hidden=True)
|
||||
|
||||
class Output(BlockSchema):
|
||||
event_data: dict = SchemaField(
|
||||
description="The contents of the example webhook event."
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
id="7c5933ce-d60c-42dd-9c4e-db82496474a3",
|
||||
description="This block will output the contents of an example webhook event.",
|
||||
categories={BlockCategory.BASIC},
|
||||
input_schema=ExampleTriggerBlock.Input,
|
||||
output_schema=ExampleTriggerBlock.Output,
|
||||
# The webhook_config is a key difference from standard blocks
|
||||
# It defines which external service can trigger this block and what type of events it responds to
|
||||
webhook_config=BlockManualWebhookConfig(
|
||||
provider="example_provider", # The external service that will send webhook events
|
||||
webhook_type=ExampleWebhookEventType.EXAMPLE_EVENT, # The specific event type this block responds to
|
||||
),
|
||||
# Test input for trigger blocks should mimic the payload structure that would be received from the webhook
|
||||
test_input=[
|
||||
{
|
||||
"payload": {
|
||||
"event_type": "example",
|
||||
"data": "Sample webhook data",
|
||||
}
|
||||
}
|
||||
],
|
||||
test_output=[
|
||||
("event_data", {"event_type": "example", "data": "Sample webhook data"})
|
||||
],
|
||||
)
|
||||
|
||||
def run(self, input_data: Input, **kwargs) -> BlockOutput:
|
||||
# For trigger blocks, the run method is called automatically when a webhook event is received
|
||||
# The payload from the webhook is passed in as input_data.payload
|
||||
logger.info("Example trigger block run with payload: %s", input_data.payload)
|
||||
yield "event_data", input_data.payload
|
||||
@@ -8,6 +8,7 @@ from pydantic import BaseModel
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from ._auth import (
|
||||
GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
@@ -150,8 +151,8 @@ class GmailReadBlock(Block):
|
||||
else None
|
||||
),
|
||||
token_uri="https://oauth2.googleapis.com/token",
|
||||
client_id=kwargs.get("client_id"),
|
||||
client_secret=kwargs.get("client_secret"),
|
||||
client_id=Settings().secrets.google_client_id,
|
||||
client_secret=Settings().secrets.google_client_secret,
|
||||
scopes=credentials.scopes,
|
||||
)
|
||||
return build("gmail", "v1", credentials=creds)
|
||||
|
||||
@@ -3,6 +3,7 @@ from googleapiclient.discovery import build
|
||||
|
||||
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
|
||||
from backend.data.model import SchemaField
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from ._auth import (
|
||||
GOOGLE_OAUTH_IS_CONFIGURED,
|
||||
@@ -86,8 +87,8 @@ class GoogleSheetsReadBlock(Block):
|
||||
else None
|
||||
),
|
||||
token_uri="https://oauth2.googleapis.com/token",
|
||||
client_id=kwargs.get("client_id"),
|
||||
client_secret=kwargs.get("client_secret"),
|
||||
client_id=Settings().secrets.google_client_id,
|
||||
client_secret=Settings().secrets.google_client_secret,
|
||||
scopes=credentials.scopes,
|
||||
)
|
||||
return build("sheets", "v4", credentials=creds)
|
||||
|
||||
@@ -186,7 +186,7 @@ class SmartDecisionMakerBlock(Block):
|
||||
@classmethod
|
||||
def get_missing_links(cls, data: BlockInput, links: list["Link"]) -> set[str]:
|
||||
# conversation_history & last_tool_output validation is handled differently
|
||||
return super().get_missing_links(
|
||||
missing_links = super().get_missing_links(
|
||||
data,
|
||||
[
|
||||
link
|
||||
@@ -196,6 +196,19 @@ class SmartDecisionMakerBlock(Block):
|
||||
],
|
||||
)
|
||||
|
||||
# Avoid executing the block if the last_tool_output is connected to a static
|
||||
# link, like StoreValueBlock or AgentInputBlock.
|
||||
if any(link.sink_name == "conversation_history" for link in links) and any(
|
||||
link.sink_name == "last_tool_output" and link.is_static
|
||||
for link in links
|
||||
):
|
||||
raise ValueError(
|
||||
"Last Tool Output can't be connected to a static (dashed line) "
|
||||
"link like the output of `StoreValue` or `AgentInput` block"
|
||||
)
|
||||
|
||||
return missing_links
|
||||
|
||||
@classmethod
|
||||
def get_missing_input(cls, data: BlockInput) -> set[str]:
|
||||
if missing_input := super().get_missing_input(data):
|
||||
|
||||
@@ -156,6 +156,10 @@ class CountdownTimerBlock(Block):
|
||||
days: Union[int, str] = SchemaField(
|
||||
advanced=False, description="Duration in days", default=0
|
||||
)
|
||||
repeat: int = SchemaField(
|
||||
description="Number of times to repeat the timer",
|
||||
default=1,
|
||||
)
|
||||
|
||||
class Output(BlockSchema):
|
||||
output_message: Any = SchemaField(
|
||||
@@ -187,5 +191,6 @@ class CountdownTimerBlock(Block):
|
||||
|
||||
total_seconds = seconds + minutes * 60 + hours * 3600 + days * 86400
|
||||
|
||||
time.sleep(total_seconds)
|
||||
yield "output_message", input_data.input_message
|
||||
for _ in range(input_data.repeat):
|
||||
time.sleep(total_seconds)
|
||||
yield "output_message", input_data.input_message
|
||||
|
||||
@@ -2,6 +2,7 @@ from typing import Type
|
||||
|
||||
from backend.blocks.ai_music_generator import AIMusicGeneratorBlock
|
||||
from backend.blocks.ai_shortform_video_block import AIShortformVideoCreatorBlock
|
||||
from backend.blocks.example.example import ExampleBlock
|
||||
from backend.blocks.ideogram import IdeogramModelBlock
|
||||
from backend.blocks.jina.embeddings import JinaEmbeddingBlock
|
||||
from backend.blocks.jina.search import ExtractWebsiteContentBlock, SearchTheWebBlock
|
||||
@@ -23,6 +24,7 @@ from backend.data.cost import BlockCost, BlockCostType
|
||||
from backend.integrations.credentials_store import (
|
||||
anthropic_credentials,
|
||||
did_credentials,
|
||||
example_credentials,
|
||||
groq_credentials,
|
||||
ideogram_credentials,
|
||||
jina_credentials,
|
||||
@@ -267,4 +269,16 @@ BLOCK_COSTS: dict[Type[Block], list[BlockCost]] = {
|
||||
)
|
||||
],
|
||||
SmartDecisionMakerBlock: LLM_COST,
|
||||
ExampleBlock: [
|
||||
BlockCost(
|
||||
cost_amount=1,
|
||||
cost_filter={
|
||||
"credentials": {
|
||||
"id": example_credentials.id,
|
||||
"provider": example_credentials.provider,
|
||||
"type": example_credentials.type,
|
||||
}
|
||||
},
|
||||
)
|
||||
],
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ from backend.util.settings import Settings
|
||||
settings = Settings()
|
||||
stripe.api_key = settings.secrets.stripe_api_key
|
||||
logger = logging.getLogger(__name__)
|
||||
base_url = settings.config.frontend_base_url or settings.config.platform_base_url
|
||||
|
||||
|
||||
class UserCreditBase(ABC):
|
||||
@@ -185,6 +186,14 @@ class UserCreditBase(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
async def create_billing_portal_session(user_id: str) -> str:
|
||||
session = stripe.billing_portal.Session.create(
|
||||
customer=await get_stripe_customer_id(user_id),
|
||||
return_url=base_url + "/profile/credits",
|
||||
)
|
||||
return session.url
|
||||
|
||||
@staticmethod
|
||||
def time_now() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
@@ -765,10 +774,8 @@ class UserCredit(UserCreditBase):
|
||||
ui_mode="hosted",
|
||||
payment_intent_data={"setup_future_usage": "off_session"},
|
||||
saved_payment_method_options={"payment_method_save": "enabled"},
|
||||
success_url=settings.config.frontend_base_url
|
||||
+ "/profile/credits?topup=success",
|
||||
cancel_url=settings.config.frontend_base_url
|
||||
+ "/profile/credits?topup=cancel",
|
||||
success_url=base_url + "/profile/credits?topup=success",
|
||||
cancel_url=base_url + "/profile/credits?topup=cancel",
|
||||
allow_promotion_codes=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import logging
|
||||
import os
|
||||
import zlib
|
||||
from contextlib import asynccontextmanager
|
||||
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
|
||||
from uuid import uuid4
|
||||
|
||||
from dotenv import load_dotenv
|
||||
@@ -15,7 +16,36 @@ load_dotenv()
|
||||
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma")
|
||||
os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA
|
||||
|
||||
prisma = Prisma(auto_register=True)
|
||||
|
||||
def add_param(url: str, key: str, value: str) -> str:
|
||||
p = urlparse(url)
|
||||
qs = dict(parse_qsl(p.query))
|
||||
qs[key] = value
|
||||
return urlunparse(p._replace(query=urlencode(qs)))
|
||||
|
||||
|
||||
DATABASE_URL = os.getenv("DATABASE_URL", "postgresql://localhost:5432")
|
||||
|
||||
CONN_LIMIT = os.getenv("DB_CONNECTION_LIMIT")
|
||||
if CONN_LIMIT:
|
||||
DATABASE_URL = add_param(DATABASE_URL, "connection_limit", CONN_LIMIT)
|
||||
|
||||
CONN_TIMEOUT = os.getenv("DB_CONNECT_TIMEOUT")
|
||||
if CONN_TIMEOUT:
|
||||
DATABASE_URL = add_param(DATABASE_URL, "connect_timeout", CONN_TIMEOUT)
|
||||
|
||||
POOL_TIMEOUT = os.getenv("DB_POOL_TIMEOUT")
|
||||
if POOL_TIMEOUT:
|
||||
DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT)
|
||||
|
||||
HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None
|
||||
|
||||
prisma = Prisma(
|
||||
auto_register=True,
|
||||
http={"timeout": HTTP_TIMEOUT},
|
||||
datasource={"url": DATABASE_URL},
|
||||
)
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
@@ -14,14 +13,14 @@ from prisma.models import (
|
||||
AgentNodeLink,
|
||||
StoreListingVersion,
|
||||
)
|
||||
from prisma.types import AgentGraphWhereInput
|
||||
from prisma.types import AgentGraphExecutionWhereInput, AgentGraphWhereInput
|
||||
from pydantic.fields import Field, computed_field
|
||||
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.blocks.basic import AgentInputBlock, AgentOutputBlock
|
||||
from backend.util import type
|
||||
from backend.util import type as type_utils
|
||||
|
||||
from .block import BlockInput, BlockType, get_block, get_blocks
|
||||
from .block import Block, BlockInput, BlockSchema, BlockType, get_block, get_blocks
|
||||
from .db import BaseDbModel, transaction
|
||||
from .execution import ExecutionResult, ExecutionStatus
|
||||
from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE
|
||||
@@ -71,13 +70,20 @@ class NodeModel(Node):
|
||||
|
||||
webhook: Optional[Webhook] = None
|
||||
|
||||
@property
|
||||
def block(self) -> Block[BlockSchema, BlockSchema]:
|
||||
block = get_block(self.block_id)
|
||||
if not block:
|
||||
raise ValueError(f"Block #{self.block_id} does not exist")
|
||||
return block
|
||||
|
||||
@staticmethod
|
||||
def from_db(node: AgentNode) -> "NodeModel":
|
||||
def from_db(node: AgentNode, for_export: bool = False) -> "NodeModel":
|
||||
obj = NodeModel(
|
||||
id=node.id,
|
||||
block_id=node.agentBlockId,
|
||||
input_default=type.convert(node.constantInput, dict[str, Any]),
|
||||
metadata=type.convert(node.metadata, dict[str, Any]),
|
||||
input_default=type_utils.convert(node.constantInput, dict[str, Any]),
|
||||
metadata=type_utils.convert(node.metadata, dict[str, Any]),
|
||||
graph_id=node.agentGraphId,
|
||||
graph_version=node.agentGraphVersion,
|
||||
webhook_id=node.webhookId,
|
||||
@@ -85,6 +91,8 @@ class NodeModel(Node):
|
||||
)
|
||||
obj.input_links = [Link.from_db(link) for link in node.Input or []]
|
||||
obj.output_links = [Link.from_db(link) for link in node.Output or []]
|
||||
if for_export:
|
||||
return obj.stripped_for_export()
|
||||
return obj
|
||||
|
||||
def is_triggered_by_event_type(self, event_type: str) -> bool:
|
||||
@@ -103,6 +111,51 @@ class NodeModel(Node):
|
||||
if event_filter[k] is True
|
||||
]
|
||||
|
||||
def stripped_for_export(self) -> "NodeModel":
|
||||
"""
|
||||
Returns a copy of the node model, stripped of any non-transferable properties
|
||||
"""
|
||||
stripped_node = self.model_copy(deep=True)
|
||||
# Remove credentials from node input
|
||||
if stripped_node.input_default:
|
||||
stripped_node.input_default = NodeModel._filter_secrets_from_node_input(
|
||||
stripped_node.input_default, self.block.input_schema.jsonschema()
|
||||
)
|
||||
|
||||
if (
|
||||
stripped_node.block.block_type == BlockType.INPUT
|
||||
and "value" in stripped_node.input_default
|
||||
):
|
||||
stripped_node.input_default["value"] = ""
|
||||
|
||||
# Remove webhook info
|
||||
stripped_node.webhook_id = None
|
||||
stripped_node.webhook = None
|
||||
|
||||
return stripped_node
|
||||
|
||||
@staticmethod
|
||||
def _filter_secrets_from_node_input(
|
||||
input_data: dict[str, Any], schema: dict[str, Any] | None
|
||||
) -> dict[str, Any]:
|
||||
sensitive_keys = ["credentials", "api_key", "password", "token", "secret"]
|
||||
field_schemas = schema.get("properties", {}) if schema else {}
|
||||
result = {}
|
||||
for key, value in input_data.items():
|
||||
field_schema: dict | None = field_schemas.get(key)
|
||||
if (field_schema and field_schema.get("secret", False)) or any(
|
||||
sensitive_key in key.lower() for sensitive_key in sensitive_keys
|
||||
):
|
||||
# This is a secret value -> filter this key-value pair out
|
||||
continue
|
||||
elif isinstance(value, dict):
|
||||
result[key] = NodeModel._filter_secrets_from_node_input(
|
||||
value, field_schema
|
||||
)
|
||||
else:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
# Fix 2-way reference Node <-> Webhook
|
||||
Webhook.model_rebuild()
|
||||
@@ -129,7 +182,7 @@ class GraphExecutionMeta(BaseDbModel):
|
||||
total_run_time = duration
|
||||
|
||||
try:
|
||||
stats = type.convert(_graph_exec.stats or {}, dict[str, Any])
|
||||
stats = type_utils.convert(_graph_exec.stats or {}, dict[str, Any])
|
||||
except ValueError:
|
||||
stats = {}
|
||||
|
||||
@@ -201,10 +254,9 @@ class GraphExecution(GraphExecutionMeta):
|
||||
)
|
||||
|
||||
|
||||
class Graph(BaseDbModel):
|
||||
class BaseGraph(BaseDbModel):
|
||||
version: int = 1
|
||||
is_active: bool = True
|
||||
is_template: bool = False
|
||||
name: str
|
||||
description: str
|
||||
nodes: list[Node] = []
|
||||
@@ -267,6 +319,10 @@ class Graph(BaseDbModel):
|
||||
}
|
||||
|
||||
|
||||
class Graph(BaseGraph):
|
||||
sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs, only used in export
|
||||
|
||||
|
||||
class GraphModel(Graph):
|
||||
user_id: str
|
||||
nodes: list[NodeModel] = [] # type: ignore
|
||||
@@ -290,31 +346,55 @@ class GraphModel(Graph):
|
||||
Reassigns all IDs in the graph to new UUIDs.
|
||||
This method can be used before storing a new graph to the database.
|
||||
"""
|
||||
if reassign_graph_id:
|
||||
graph_id_map = {
|
||||
self.id: str(uuid.uuid4()),
|
||||
**{sub_graph.id: str(uuid.uuid4()) for sub_graph in self.sub_graphs},
|
||||
}
|
||||
else:
|
||||
graph_id_map = {}
|
||||
|
||||
self._reassign_ids(self, user_id, graph_id_map)
|
||||
for sub_graph in self.sub_graphs:
|
||||
self._reassign_ids(sub_graph, user_id, graph_id_map)
|
||||
|
||||
@staticmethod
|
||||
def _reassign_ids(
|
||||
graph: BaseGraph,
|
||||
user_id: str,
|
||||
graph_id_map: dict[str, str],
|
||||
):
|
||||
|
||||
# Reassign Graph ID
|
||||
id_map = {node.id: str(uuid.uuid4()) for node in self.nodes}
|
||||
if reassign_graph_id:
|
||||
self.id = str(uuid.uuid4())
|
||||
if graph.id in graph_id_map:
|
||||
graph.id = graph_id_map[graph.id]
|
||||
|
||||
# Reassign Node IDs
|
||||
for node in self.nodes:
|
||||
id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes}
|
||||
for node in graph.nodes:
|
||||
node.id = id_map[node.id]
|
||||
|
||||
# Reassign Link IDs
|
||||
for link in self.links:
|
||||
for link in graph.links:
|
||||
link.source_id = id_map[link.source_id]
|
||||
link.sink_id = id_map[link.sink_id]
|
||||
|
||||
# Reassign User IDs for agent blocks
|
||||
for node in self.nodes:
|
||||
for node in graph.nodes:
|
||||
if node.block_id != AgentExecutorBlock().id:
|
||||
continue
|
||||
node.input_default["user_id"] = user_id
|
||||
node.input_default.setdefault("data", {})
|
||||
|
||||
self.validate_graph()
|
||||
if (graph_id := node.input_default.get("graph_id")) in graph_id_map:
|
||||
node.input_default["graph_id"] = graph_id_map[graph_id]
|
||||
|
||||
def validate_graph(self, for_run: bool = False):
|
||||
self._validate_graph(self, for_run)
|
||||
for sub_graph in self.sub_graphs:
|
||||
self._validate_graph(sub_graph, for_run)
|
||||
|
||||
@staticmethod
|
||||
def _validate_graph(graph: BaseGraph, for_run: bool = False):
|
||||
def sanitize(name):
|
||||
sanitized_name = name.split("_#_")[0].split("_@_")[0].split("_$_")[0]
|
||||
if sanitized_name.startswith("tools_^_"):
|
||||
@@ -326,11 +406,11 @@ class GraphModel(Graph):
|
||||
agent_nodes = set()
|
||||
nodes_block = {
|
||||
node.id: block
|
||||
for node in self.nodes
|
||||
for node in graph.nodes
|
||||
if (block := get_block(node.block_id)) is not None
|
||||
}
|
||||
|
||||
for node in self.nodes:
|
||||
for node in graph.nodes:
|
||||
if (block := nodes_block.get(node.id)) is None:
|
||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||
|
||||
@@ -343,11 +423,11 @@ class GraphModel(Graph):
|
||||
|
||||
input_links = defaultdict(list)
|
||||
|
||||
for link in self.links:
|
||||
for link in graph.links:
|
||||
input_links[link.sink_id].append(link)
|
||||
|
||||
# Nodes: required fields are filled or connected and dependencies are satisfied
|
||||
for node in self.nodes:
|
||||
for node in graph.nodes:
|
||||
if (block := nodes_block.get(node.id)) is None:
|
||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||
|
||||
@@ -408,7 +488,7 @@ class GraphModel(Graph):
|
||||
f"Node {block.name} #{node.id}: Field `{field_name}` requires [{', '.join(missing_deps)}] to be set"
|
||||
)
|
||||
|
||||
node_map = {v.id: v for v in self.nodes}
|
||||
node_map = {v.id: v for v in graph.nodes}
|
||||
|
||||
def is_static_output_block(nid: str) -> bool:
|
||||
bid = node_map[nid].block_id
|
||||
@@ -416,7 +496,7 @@ class GraphModel(Graph):
|
||||
return b.static_output if b else False
|
||||
|
||||
# Links: links are connected and the connected pin data type are compatible.
|
||||
for link in self.links:
|
||||
for link in graph.links:
|
||||
source = (link.source_id, link.source_name)
|
||||
sink = (link.sink_id, link.sink_name)
|
||||
prefix = f"Link {source} <-> {sink}"
|
||||
@@ -457,18 +537,20 @@ class GraphModel(Graph):
|
||||
link.is_static = True # Each value block output should be static.
|
||||
|
||||
@staticmethod
|
||||
def from_db(graph: AgentGraph, for_export: bool = False):
|
||||
def from_db(
|
||||
graph: AgentGraph,
|
||||
for_export: bool = False,
|
||||
sub_graphs: list[AgentGraph] | None = None,
|
||||
):
|
||||
return GraphModel(
|
||||
id=graph.id,
|
||||
user_id=graph.userId,
|
||||
user_id=graph.userId if not for_export else "",
|
||||
version=graph.version,
|
||||
is_active=graph.isActive,
|
||||
is_template=graph.isTemplate,
|
||||
name=graph.name or "",
|
||||
description=graph.description or "",
|
||||
nodes=[
|
||||
NodeModel.from_db(GraphModel._process_node(node, for_export))
|
||||
for node in graph.AgentNodes or []
|
||||
NodeModel.from_db(node, for_export) for node in graph.AgentNodes or []
|
||||
],
|
||||
links=list(
|
||||
{
|
||||
@@ -477,59 +559,12 @@ class GraphModel(Graph):
|
||||
for link in (node.Input or []) + (node.Output or [])
|
||||
}
|
||||
),
|
||||
sub_graphs=[
|
||||
GraphModel.from_db(sub_graph, for_export)
|
||||
for sub_graph in sub_graphs or []
|
||||
],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _process_node(node: AgentNode, for_export: bool) -> AgentNode:
|
||||
if for_export:
|
||||
# Remove credentials from node input
|
||||
if node.constantInput:
|
||||
constant_input = type.convert(node.constantInput, dict[str, Any])
|
||||
constant_input = GraphModel._hide_node_input_credentials(constant_input)
|
||||
node.constantInput = Json(constant_input)
|
||||
|
||||
# Remove webhook info
|
||||
node.webhookId = None
|
||||
node.Webhook = None
|
||||
|
||||
return node
|
||||
|
||||
@staticmethod
|
||||
def _hide_node_input_credentials(input_data: dict[str, Any]) -> dict[str, Any]:
|
||||
sensitive_keys = ["credentials", "api_key", "password", "token", "secret"]
|
||||
result = {}
|
||||
for key, value in input_data.items():
|
||||
if isinstance(value, dict):
|
||||
result[key] = GraphModel._hide_node_input_credentials(value)
|
||||
elif isinstance(value, str) and any(
|
||||
sensitive_key in key.lower() for sensitive_key in sensitive_keys
|
||||
):
|
||||
# Skip this key-value pair in the result
|
||||
continue
|
||||
else:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
def clean_graph(self):
|
||||
blocks = [block() for block in get_blocks().values()]
|
||||
|
||||
input_blocks = [
|
||||
node
|
||||
for node in self.nodes
|
||||
if next(
|
||||
(
|
||||
b
|
||||
for b in blocks
|
||||
if b.id == node.block_id and b.block_type == BlockType.INPUT
|
||||
),
|
||||
None,
|
||||
)
|
||||
]
|
||||
|
||||
for node in self.nodes:
|
||||
if any(input_block.id == node.id for input_block in input_blocks):
|
||||
node.input_default["value"] = ""
|
||||
|
||||
|
||||
# --------------------- CRUD functions --------------------- #
|
||||
|
||||
@@ -559,14 +594,14 @@ async def set_node_webhook(node_id: str, webhook_id: str | None) -> NodeModel:
|
||||
|
||||
async def get_graphs(
|
||||
user_id: str,
|
||||
filter_by: Literal["active", "template"] | None = "active",
|
||||
filter_by: Literal["active"] | None = "active",
|
||||
) -> list[GraphModel]:
|
||||
"""
|
||||
Retrieves graph metadata objects.
|
||||
Default behaviour is to get all currently active graphs.
|
||||
|
||||
Args:
|
||||
filter_by: An optional filter to either select templates or active graphs.
|
||||
filter_by: An optional filter to either select graphs.
|
||||
user_id: The ID of the user that owns the graph.
|
||||
|
||||
Returns:
|
||||
@@ -576,8 +611,6 @@ async def get_graphs(
|
||||
|
||||
if filter_by == "active":
|
||||
where_clause["isActive"] = True
|
||||
elif filter_by == "template":
|
||||
where_clause["isTemplate"] = True
|
||||
|
||||
graphs = await AgentGraph.prisma().find_many(
|
||||
where=where_clause,
|
||||
@@ -597,18 +630,20 @@ async def get_graphs(
|
||||
return graph_models
|
||||
|
||||
|
||||
# TODO: move execution stuff to .execution
|
||||
async def get_graphs_executions(user_id: str) -> list[GraphExecutionMeta]:
|
||||
executions = await AgentGraphExecution.prisma().find_many(
|
||||
where={"isDeleted": False, "userId": user_id},
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
return [GraphExecutionMeta.from_db(execution) for execution in executions]
|
||||
async def get_graph_executions(
|
||||
graph_id: Optional[str] = None,
|
||||
user_id: Optional[str] = None,
|
||||
) -> list[GraphExecutionMeta]:
|
||||
where_filter: AgentGraphExecutionWhereInput = {
|
||||
"isDeleted": False,
|
||||
}
|
||||
if user_id:
|
||||
where_filter["userId"] = user_id
|
||||
if graph_id:
|
||||
where_filter["agentGraphId"] = graph_id
|
||||
|
||||
|
||||
async def get_graph_executions(graph_id: str, user_id: str) -> list[GraphExecutionMeta]:
|
||||
executions = await AgentGraphExecution.prisma().find_many(
|
||||
where={"agentGraphId": graph_id, "isDeleted": False, "userId": user_id},
|
||||
where=where_filter,
|
||||
order={"createdAt": "desc"},
|
||||
)
|
||||
return [GraphExecutionMeta.from_db(execution) for execution in executions]
|
||||
@@ -664,21 +699,18 @@ async def get_graph_metadata(graph_id: str, version: int | None = None) -> Graph
|
||||
description=graph.description or "",
|
||||
version=graph.version,
|
||||
is_active=graph.isActive,
|
||||
is_template=graph.isTemplate,
|
||||
)
|
||||
|
||||
|
||||
async def get_graph(
|
||||
graph_id: str,
|
||||
version: int | None = None,
|
||||
template: bool = False, # note: currently not in use; TODO: remove from DB entirely
|
||||
user_id: str | None = None,
|
||||
for_export: bool = False,
|
||||
) -> GraphModel | None:
|
||||
"""
|
||||
Retrieves a graph from the DB.
|
||||
Defaults to the version with `is_active` if `version` is not passed,
|
||||
or the latest version with `is_template` if `template=True`.
|
||||
Defaults to the version with `is_active` if `version` is not passed.
|
||||
|
||||
Returns `None` if the record is not found.
|
||||
"""
|
||||
@@ -688,8 +720,6 @@ async def get_graph(
|
||||
|
||||
if version is not None:
|
||||
where_clause["version"] = version
|
||||
elif not template:
|
||||
where_clause["isActive"] = True
|
||||
|
||||
graph = await AgentGraph.prisma().find_first(
|
||||
where=where_clause,
|
||||
@@ -713,9 +743,62 @@ async def get_graph(
|
||||
):
|
||||
return None
|
||||
|
||||
if for_export:
|
||||
sub_graphs = await _get_sub_graphs(graph)
|
||||
return GraphModel.from_db(
|
||||
graph=graph,
|
||||
sub_graphs=sub_graphs,
|
||||
for_export=for_export,
|
||||
)
|
||||
|
||||
return GraphModel.from_db(graph, for_export)
|
||||
|
||||
|
||||
async def _get_sub_graphs(graph: AgentGraph) -> list[AgentGraph]:
|
||||
"""
|
||||
Iteratively fetches all sub-graphs of a given graph, and flattens them into a list.
|
||||
This call involves a DB fetch in batch, breadth-first, per-level of graph depth.
|
||||
On each DB fetch we will only fetch the sub-graphs that are not already in the list.
|
||||
"""
|
||||
sub_graphs = {graph.id: graph}
|
||||
search_graphs = [graph]
|
||||
agent_block_id = AgentExecutorBlock().id
|
||||
|
||||
while search_graphs:
|
||||
sub_graph_ids = [
|
||||
(graph_id, graph_version)
|
||||
for graph in search_graphs
|
||||
for node in graph.AgentNodes or []
|
||||
if (
|
||||
node.AgentBlock
|
||||
and node.AgentBlock.id == agent_block_id
|
||||
and (graph_id := dict(node.constantInput).get("graph_id"))
|
||||
and (graph_version := dict(node.constantInput).get("graph_version"))
|
||||
)
|
||||
]
|
||||
if not sub_graph_ids:
|
||||
break
|
||||
|
||||
graphs = await AgentGraph.prisma().find_many(
|
||||
where={
|
||||
"OR": [
|
||||
{
|
||||
"id": graph_id,
|
||||
"version": graph_version,
|
||||
"userId": graph.userId, # Ensure the sub-graph is owned by the same user
|
||||
}
|
||||
for graph_id, graph_version in sub_graph_ids
|
||||
] # type: ignore
|
||||
},
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
)
|
||||
|
||||
search_graphs = [graph for graph in graphs if graph.id not in sub_graphs]
|
||||
sub_graphs.update({graph.id: graph for graph in search_graphs})
|
||||
|
||||
return [g for g in sub_graphs.values() if g.id != graph.id]
|
||||
|
||||
|
||||
async def get_connected_output_nodes(node_id: str) -> list[tuple[Link, Node]]:
|
||||
links = await AgentNodeLink.prisma().find_many(
|
||||
where={"agentNodeSourceId": node_id},
|
||||
@@ -779,50 +862,56 @@ async def create_graph(graph: Graph, user_id: str) -> GraphModel:
|
||||
async with transaction() as tx:
|
||||
await __create_graph(tx, graph, user_id)
|
||||
|
||||
if created_graph := await get_graph(
|
||||
graph.id, graph.version, template=graph.is_template, user_id=user_id
|
||||
):
|
||||
if created_graph := await get_graph(graph.id, graph.version, user_id=user_id):
|
||||
return created_graph
|
||||
|
||||
raise ValueError(f"Created graph {graph.id} v{graph.version} is not in DB")
|
||||
|
||||
|
||||
async def __create_graph(tx, graph: Graph, user_id: str):
|
||||
await AgentGraph.prisma(tx).create(
|
||||
data={
|
||||
"id": graph.id,
|
||||
"version": graph.version,
|
||||
"name": graph.name,
|
||||
"description": graph.description,
|
||||
"isTemplate": graph.is_template,
|
||||
"isActive": graph.is_active,
|
||||
"userId": user_id,
|
||||
"AgentNodes": {
|
||||
"create": [
|
||||
{
|
||||
"id": node.id,
|
||||
"agentBlockId": node.block_id,
|
||||
"constantInput": Json(node.input_default),
|
||||
"metadata": Json(node.metadata),
|
||||
}
|
||||
for node in graph.nodes
|
||||
]
|
||||
},
|
||||
}
|
||||
graphs = [graph] + graph.sub_graphs
|
||||
|
||||
await AgentGraph.prisma(tx).create_many(
|
||||
data=[
|
||||
{
|
||||
"id": graph.id,
|
||||
"version": graph.version,
|
||||
"name": graph.name,
|
||||
"description": graph.description,
|
||||
"isActive": graph.is_active,
|
||||
"userId": user_id,
|
||||
}
|
||||
for graph in graphs
|
||||
]
|
||||
)
|
||||
|
||||
await asyncio.gather(
|
||||
*[
|
||||
AgentNodeLink.prisma(tx).create(
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"sourceName": link.source_name,
|
||||
"sinkName": link.sink_name,
|
||||
"agentNodeSourceId": link.source_id,
|
||||
"agentNodeSinkId": link.sink_id,
|
||||
"isStatic": link.is_static,
|
||||
}
|
||||
)
|
||||
await AgentNode.prisma(tx).create_many(
|
||||
data=[
|
||||
{
|
||||
"id": node.id,
|
||||
"agentGraphId": graph.id,
|
||||
"agentGraphVersion": graph.version,
|
||||
"agentBlockId": node.block_id,
|
||||
"constantInput": Json(node.input_default),
|
||||
"metadata": Json(node.metadata),
|
||||
"webhookId": node.webhook_id,
|
||||
}
|
||||
for graph in graphs
|
||||
for node in graph.nodes
|
||||
]
|
||||
)
|
||||
|
||||
await AgentNodeLink.prisma(tx).create_many(
|
||||
data=[
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"sourceName": link.source_name,
|
||||
"sinkName": link.sink_name,
|
||||
"agentNodeSourceId": link.source_id,
|
||||
"agentNodeSinkId": link.sink_id,
|
||||
"isStatic": link.is_static,
|
||||
}
|
||||
for graph in graphs
|
||||
for link in graph.links
|
||||
]
|
||||
)
|
||||
|
||||
@@ -32,3 +32,15 @@ GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
|
||||
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
|
||||
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
|
||||
}
|
||||
|
||||
|
||||
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
|
||||
return {
|
||||
"Agent": {
|
||||
"include": {
|
||||
**AGENT_GRAPH_INCLUDE,
|
||||
"AgentGraphExecution": {"where": {"userId": user_id}},
|
||||
}
|
||||
},
|
||||
"Creator": True,
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from enum import Enum
|
||||
from typing import Annotated, Any, Generic, Optional, TypeVar, Union
|
||||
|
||||
@@ -18,19 +18,25 @@ from .db import transaction
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
T_co = TypeVar("T_co", bound="BaseNotificationData", covariant=True)
|
||||
NotificationDataType_co = TypeVar(
|
||||
"NotificationDataType_co", bound="BaseNotificationData", covariant=True
|
||||
)
|
||||
SummaryParamsType_co = TypeVar(
|
||||
"SummaryParamsType_co", bound="BaseSummaryParams", covariant=True
|
||||
)
|
||||
|
||||
|
||||
class QueueType(Enum):
|
||||
IMMEDIATE = "immediate" # Send right away (errors, critical notifications)
|
||||
HOURLY = "hourly" # Batch for up to an hour (usage reports)
|
||||
DAILY = "daily" # Daily digest (summary notifications)
|
||||
BATCH = "batch" # Batch for up to an hour (usage reports)
|
||||
SUMMARY = "summary" # Daily digest (summary notifications)
|
||||
BACKOFF = "backoff" # Backoff strategy (exponential backoff)
|
||||
ADMIN = "admin" # Admin notifications (errors, critical notifications)
|
||||
|
||||
|
||||
class BaseNotificationData(BaseModel):
|
||||
pass
|
||||
class Config:
|
||||
extra = "allow"
|
||||
|
||||
|
||||
class AgentRunData(BaseNotificationData):
|
||||
@@ -47,6 +53,13 @@ class ZeroBalanceData(BaseNotificationData):
|
||||
last_transaction_time: datetime
|
||||
top_up_link: str
|
||||
|
||||
@field_validator("last_transaction_time")
|
||||
@classmethod
|
||||
def validate_timezone(cls, value: datetime):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
class LowBalanceData(BaseNotificationData):
|
||||
agent_name: str = Field(..., description="Name of the agent")
|
||||
@@ -75,6 +88,13 @@ class ContinuousAgentErrorData(BaseNotificationData):
|
||||
error_time: datetime
|
||||
attempts: int = Field(..., description="Number of retry attempts made")
|
||||
|
||||
@field_validator("start_time", "error_time")
|
||||
@classmethod
|
||||
def validate_timezone(cls, value: datetime):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
class BaseSummaryData(BaseNotificationData):
|
||||
total_credits_used: float
|
||||
@@ -87,18 +107,53 @@ class BaseSummaryData(BaseNotificationData):
|
||||
cost_breakdown: dict[str, float]
|
||||
|
||||
|
||||
class BaseSummaryParams(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
class DailySummaryParams(BaseSummaryParams):
|
||||
date: datetime
|
||||
|
||||
@field_validator("date")
|
||||
def validate_timezone(cls, value):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
class WeeklySummaryParams(BaseSummaryParams):
|
||||
start_date: datetime
|
||||
end_date: datetime
|
||||
|
||||
@field_validator("start_date", "end_date")
|
||||
def validate_timezone(cls, value):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
class DailySummaryData(BaseSummaryData):
|
||||
date: datetime
|
||||
|
||||
@field_validator("date")
|
||||
def validate_timezone(cls, value):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
class WeeklySummaryData(BaseSummaryData):
|
||||
start_date: datetime
|
||||
end_date: datetime
|
||||
week_number: int
|
||||
year: int
|
||||
|
||||
@field_validator("start_date", "end_date")
|
||||
def validate_timezone(cls, value):
|
||||
if value.tzinfo is None:
|
||||
raise ValueError("datetime must have timezone information")
|
||||
return value
|
||||
|
||||
|
||||
class MonthlySummaryData(BaseSummaryData):
|
||||
class MonthlySummaryData(BaseNotificationData):
|
||||
month: int
|
||||
year: int
|
||||
|
||||
@@ -125,6 +180,7 @@ NotificationData = Annotated[
|
||||
WeeklySummaryData,
|
||||
DailySummaryData,
|
||||
RefundRequestData,
|
||||
BaseSummaryData,
|
||||
],
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
@@ -134,15 +190,22 @@ class NotificationEventDTO(BaseModel):
|
||||
user_id: str
|
||||
type: NotificationType
|
||||
data: dict
|
||||
created_at: datetime = Field(default_factory=datetime.now)
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc))
|
||||
retry_count: int = 0
|
||||
|
||||
|
||||
class NotificationEventModel(BaseModel, Generic[T_co]):
|
||||
class SummaryParamsEventDTO(BaseModel):
|
||||
user_id: str
|
||||
type: NotificationType
|
||||
data: T_co
|
||||
created_at: datetime = Field(default_factory=datetime.now)
|
||||
data: dict
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc))
|
||||
|
||||
|
||||
class NotificationEventModel(BaseModel, Generic[NotificationDataType_co]):
|
||||
user_id: str
|
||||
type: NotificationType
|
||||
data: NotificationDataType_co
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc))
|
||||
|
||||
@property
|
||||
def strategy(self) -> QueueType:
|
||||
@@ -159,7 +222,14 @@ class NotificationEventModel(BaseModel, Generic[T_co]):
|
||||
return NotificationTypeOverride(self.type).template
|
||||
|
||||
|
||||
def get_data_type(
|
||||
class SummaryParamsEventModel(BaseModel, Generic[SummaryParamsType_co]):
|
||||
user_id: str
|
||||
type: NotificationType
|
||||
data: SummaryParamsType_co
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc))
|
||||
|
||||
|
||||
def get_notif_data_type(
|
||||
notification_type: NotificationType,
|
||||
) -> type[BaseNotificationData]:
|
||||
return {
|
||||
@@ -176,11 +246,20 @@ def get_data_type(
|
||||
}[notification_type]
|
||||
|
||||
|
||||
def get_summary_params_type(
|
||||
notification_type: NotificationType,
|
||||
) -> type[BaseSummaryParams]:
|
||||
return {
|
||||
NotificationType.DAILY_SUMMARY: DailySummaryParams,
|
||||
NotificationType.WEEKLY_SUMMARY: WeeklySummaryParams,
|
||||
}[notification_type]
|
||||
|
||||
|
||||
class NotificationBatch(BaseModel):
|
||||
user_id: str
|
||||
events: list[NotificationEvent]
|
||||
strategy: QueueType
|
||||
last_update: datetime = datetime.now()
|
||||
last_update: datetime = Field(default_factory=lambda: datetime.now(tz=timezone.utc))
|
||||
|
||||
|
||||
class NotificationResult(BaseModel):
|
||||
@@ -196,15 +275,15 @@ class NotificationTypeOverride:
|
||||
def strategy(self) -> QueueType:
|
||||
BATCHING_RULES = {
|
||||
# These are batched by the notification service
|
||||
NotificationType.AGENT_RUN: QueueType.IMMEDIATE,
|
||||
NotificationType.AGENT_RUN: QueueType.BATCH,
|
||||
# These are batched by the notification service, but with a backoff strategy
|
||||
NotificationType.ZERO_BALANCE: QueueType.BACKOFF,
|
||||
NotificationType.LOW_BALANCE: QueueType.IMMEDIATE,
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: QueueType.BACKOFF,
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: QueueType.BACKOFF,
|
||||
NotificationType.DAILY_SUMMARY: QueueType.DAILY,
|
||||
NotificationType.WEEKLY_SUMMARY: QueueType.DAILY,
|
||||
NotificationType.MONTHLY_SUMMARY: QueueType.DAILY,
|
||||
NotificationType.DAILY_SUMMARY: QueueType.SUMMARY,
|
||||
NotificationType.WEEKLY_SUMMARY: QueueType.SUMMARY,
|
||||
NotificationType.MONTHLY_SUMMARY: QueueType.SUMMARY,
|
||||
NotificationType.REFUND_REQUEST: QueueType.ADMIN,
|
||||
NotificationType.REFUND_PROCESSED: QueueType.ADMIN,
|
||||
}
|
||||
@@ -258,12 +337,51 @@ class NotificationPreference(BaseModel):
|
||||
)
|
||||
daily_limit: int = 10 # Max emails per day
|
||||
emails_sent_today: int = 0
|
||||
last_reset_date: datetime = Field(default_factory=datetime.now)
|
||||
last_reset_date: datetime = Field(
|
||||
default_factory=lambda: datetime.now(timezone.utc)
|
||||
)
|
||||
|
||||
|
||||
class UserNotificationEventDTO(BaseModel):
|
||||
type: NotificationType
|
||||
data: dict
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
@staticmethod
|
||||
def from_db(model: NotificationEvent) -> "UserNotificationEventDTO":
|
||||
return UserNotificationEventDTO(
|
||||
type=model.type,
|
||||
data=dict(model.data),
|
||||
created_at=model.createdAt,
|
||||
updated_at=model.updatedAt,
|
||||
)
|
||||
|
||||
|
||||
class UserNotificationBatchDTO(BaseModel):
|
||||
user_id: str
|
||||
type: NotificationType
|
||||
notifications: list[UserNotificationEventDTO]
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
@staticmethod
|
||||
def from_db(model: UserNotificationBatch) -> "UserNotificationBatchDTO":
|
||||
return UserNotificationBatchDTO(
|
||||
user_id=model.userId,
|
||||
type=model.type,
|
||||
notifications=[
|
||||
UserNotificationEventDTO.from_db(notification)
|
||||
for notification in model.notifications or []
|
||||
],
|
||||
created_at=model.createdAt,
|
||||
updated_at=model.updatedAt,
|
||||
)
|
||||
|
||||
|
||||
def get_batch_delay(notification_type: NotificationType) -> timedelta:
|
||||
return {
|
||||
NotificationType.AGENT_RUN: timedelta(seconds=1),
|
||||
NotificationType.AGENT_RUN: timedelta(minutes=60),
|
||||
NotificationType.ZERO_BALANCE: timedelta(minutes=60),
|
||||
NotificationType.LOW_BALANCE: timedelta(minutes=60),
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: timedelta(minutes=60),
|
||||
@@ -274,19 +392,15 @@ def get_batch_delay(notification_type: NotificationType) -> timedelta:
|
||||
async def create_or_add_to_user_notification_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
data: str, # type: 'NotificationEventModel'
|
||||
) -> dict:
|
||||
notification_data: NotificationEventModel,
|
||||
) -> UserNotificationBatchDTO:
|
||||
try:
|
||||
logger.info(
|
||||
f"Creating or adding to notification batch for {user_id} with type {notification_type} and data {data}"
|
||||
f"Creating or adding to notification batch for {user_id} with type {notification_type} and data {notification_data}"
|
||||
)
|
||||
|
||||
notification_data = NotificationEventModel[
|
||||
get_data_type(notification_type)
|
||||
].model_validate_json(data)
|
||||
|
||||
# Serialize the data
|
||||
json_data: Json = Json(notification_data.data.model_dump_json())
|
||||
json_data: Json = Json(notification_data.data.model_dump())
|
||||
|
||||
# First try to find existing batch
|
||||
existing_batch = await UserNotificationBatch.prisma().find_unique(
|
||||
@@ -317,7 +431,7 @@ async def create_or_add_to_user_notification_batch(
|
||||
},
|
||||
include={"notifications": True},
|
||||
)
|
||||
return resp.model_dump()
|
||||
return UserNotificationBatchDTO.from_db(resp)
|
||||
else:
|
||||
async with transaction() as tx:
|
||||
notification_event = await tx.notificationevent.create(
|
||||
@@ -339,27 +453,33 @@ async def create_or_add_to_user_notification_batch(
|
||||
raise DatabaseError(
|
||||
f"Failed to add notification event {notification_event.id} to existing batch {existing_batch.id}"
|
||||
)
|
||||
return resp.model_dump()
|
||||
return UserNotificationBatchDTO.from_db(resp)
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to create or add to notification batch for user {user_id} and type {notification_type}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
async def get_user_notification_last_message_in_batch(
|
||||
async def get_user_notification_oldest_message_in_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
) -> NotificationEvent | None:
|
||||
) -> UserNotificationEventDTO | None:
|
||||
try:
|
||||
batch = await UserNotificationBatch.prisma().find_first(
|
||||
where={"userId": user_id, "type": notification_type},
|
||||
order={"createdAt": "desc"},
|
||||
include={"notifications": True},
|
||||
)
|
||||
if not batch:
|
||||
return None
|
||||
if not batch.notifications:
|
||||
return None
|
||||
return batch.notifications[-1]
|
||||
sorted_notifications = sorted(batch.notifications, key=lambda x: x.createdAt)
|
||||
|
||||
return (
|
||||
UserNotificationEventDTO.from_db(sorted_notifications[0])
|
||||
if sorted_notifications
|
||||
else None
|
||||
)
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get user notification last message in batch for user {user_id} and type {notification_type}: {e}"
|
||||
@@ -394,13 +514,34 @@ async def empty_user_notification_batch(
|
||||
async def get_user_notification_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
) -> UserNotificationBatch | None:
|
||||
) -> UserNotificationBatchDTO | None:
|
||||
try:
|
||||
return await UserNotificationBatch.prisma().find_first(
|
||||
batch = await UserNotificationBatch.prisma().find_first(
|
||||
where={"userId": user_id, "type": notification_type},
|
||||
include={"notifications": True},
|
||||
)
|
||||
return UserNotificationBatchDTO.from_db(batch) if batch else None
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get user notification batch for user {user_id} and type {notification_type}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
async def get_all_batches_by_type(
|
||||
notification_type: NotificationType,
|
||||
) -> list[UserNotificationBatchDTO]:
|
||||
try:
|
||||
batches = await UserNotificationBatch.prisma().find_many(
|
||||
where={
|
||||
"type": notification_type,
|
||||
"notifications": {
|
||||
"some": {} # Only return batches with at least one notification
|
||||
},
|
||||
},
|
||||
include={"notifications": True},
|
||||
)
|
||||
return [UserNotificationBatchDTO.from_db(batch) for batch in batches]
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get all batches by type {notification_type}: {e}"
|
||||
) from e
|
||||
|
||||
247
autogpt_platform/backend/backend/data/onboarding.py
Normal file
247
autogpt_platform/backend/backend/data/onboarding.py
Normal file
@@ -0,0 +1,247 @@
|
||||
import re
|
||||
from typing import Any, Optional
|
||||
|
||||
import prisma
|
||||
import pydantic
|
||||
from prisma import Json
|
||||
from prisma.models import (
|
||||
AgentGraph,
|
||||
AgentGraphExecution,
|
||||
StoreListingVersion,
|
||||
UserOnboarding,
|
||||
)
|
||||
from prisma.types import UserOnboardingUpdateInput
|
||||
|
||||
from backend.server.v2.library.db import set_is_deleted_for_library_agent
|
||||
from backend.server.v2.store.db import get_store_agent_details
|
||||
from backend.server.v2.store.model import StoreAgentDetails
|
||||
|
||||
# Mapping from user reason id to categories to search for when choosing agent to show
|
||||
REASON_MAPPING: dict[str, list[str]] = {
|
||||
"content_marketing": ["writing", "marketing", "creative"],
|
||||
"business_workflow_automation": ["business", "productivity"],
|
||||
"data_research": ["data", "research"],
|
||||
"ai_innovation": ["development", "research"],
|
||||
"personal_productivity": ["personal", "productivity"],
|
||||
}
|
||||
|
||||
|
||||
class UserOnboardingUpdate(pydantic.BaseModel):
|
||||
step: int
|
||||
usageReason: Optional[str] = None
|
||||
integrations: list[str] = pydantic.Field(default_factory=list)
|
||||
otherIntegrations: Optional[str] = None
|
||||
selectedAgentCreator: Optional[str] = None
|
||||
selectedAgentSlug: Optional[str] = None
|
||||
agentInput: Optional[dict[str, Any]] = None
|
||||
isCompleted: bool = False
|
||||
|
||||
|
||||
async def get_user_onboarding(user_id: str):
|
||||
return await UserOnboarding.prisma().upsert(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"create": {"userId": user_id}, # type: ignore
|
||||
"update": {},
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
async def update_user_onboarding(user_id: str, data: UserOnboardingUpdate):
|
||||
# Get the user onboarding data
|
||||
user_onboarding = await get_user_onboarding(user_id)
|
||||
update: UserOnboardingUpdateInput = {
|
||||
"step": data.step,
|
||||
"isCompleted": data.isCompleted,
|
||||
}
|
||||
if data.usageReason:
|
||||
update["usageReason"] = data.usageReason
|
||||
if data.integrations:
|
||||
update["integrations"] = data.integrations
|
||||
if data.otherIntegrations:
|
||||
update["otherIntegrations"] = data.otherIntegrations
|
||||
if data.selectedAgentSlug and data.selectedAgentCreator:
|
||||
update["selectedAgentSlug"] = data.selectedAgentSlug
|
||||
update["selectedAgentCreator"] = data.selectedAgentCreator
|
||||
# Check if slug changes
|
||||
if (
|
||||
user_onboarding.selectedAgentCreator
|
||||
and user_onboarding.selectedAgentSlug
|
||||
and user_onboarding.selectedAgentSlug != data.selectedAgentSlug
|
||||
):
|
||||
store_agent = await get_store_agent_details(
|
||||
user_onboarding.selectedAgentCreator, user_onboarding.selectedAgentSlug
|
||||
)
|
||||
store_listing = await StoreListingVersion.prisma().find_unique_or_raise(
|
||||
where={"id": store_agent.store_listing_version_id}
|
||||
)
|
||||
agent_graph = await AgentGraph.prisma().find_first(
|
||||
where={"id": store_listing.agentId, "version": store_listing.version}
|
||||
)
|
||||
execution_count = await AgentGraphExecution.prisma().count(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentGraphId": store_listing.agentId,
|
||||
"agentGraphVersion": store_listing.version,
|
||||
}
|
||||
)
|
||||
# If there was no execution and graph doesn't belong to the user,
|
||||
# mark the agent as deleted
|
||||
if execution_count == 0 and agent_graph and agent_graph.userId != user_id:
|
||||
await set_is_deleted_for_library_agent(
|
||||
user_id, store_listing.agentId, store_listing.agentVersion, True
|
||||
)
|
||||
if data.agentInput:
|
||||
update["agentInput"] = Json(data.agentInput)
|
||||
|
||||
return await UserOnboarding.prisma().upsert(
|
||||
where={"userId": user_id},
|
||||
data={
|
||||
"create": {"userId": user_id, **update}, # type: ignore
|
||||
"update": update,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def clean_and_split(text: str) -> list[str]:
|
||||
"""
|
||||
Removes all special characters from a string, truncates it to 100 characters,
|
||||
and splits it by whitespace and commas.
|
||||
|
||||
Args:
|
||||
text (str): The input string.
|
||||
|
||||
Returns:
|
||||
list[str]: A list of cleaned words.
|
||||
"""
|
||||
# Remove all special characters (keep only alphanumeric and whitespace)
|
||||
cleaned_text = re.sub(r"[^a-zA-Z0-9\s,]", "", text.strip()[:100])
|
||||
|
||||
# Split by whitespace and commas
|
||||
words = re.split(r"[\s,]+", cleaned_text)
|
||||
|
||||
# Remove empty strings from the list
|
||||
words = [word.lower() for word in words if word]
|
||||
|
||||
return words
|
||||
|
||||
|
||||
def calculate_points(
|
||||
agent, categories: list[str], custom: list[str], integrations: list[str]
|
||||
) -> int:
|
||||
"""
|
||||
Calculates the total points for an agent based on the specified criteria.
|
||||
|
||||
Args:
|
||||
agent: The agent object.
|
||||
categories (list[str]): List of categories to match.
|
||||
words (list[str]): List of words to match in the description.
|
||||
|
||||
Returns:
|
||||
int: Total points for the agent.
|
||||
"""
|
||||
points = 0
|
||||
|
||||
# 1. Category Matches
|
||||
matched_categories = sum(
|
||||
1 for category in categories if category in agent.categories
|
||||
)
|
||||
points += matched_categories * 100
|
||||
|
||||
# 2. Description Word Matches
|
||||
description_words = agent.description.split() # Split description into words
|
||||
matched_words = sum(1 for word in custom if word in description_words)
|
||||
points += matched_words * 100
|
||||
|
||||
matched_words = sum(1 for word in integrations if word in description_words)
|
||||
points += matched_words * 50
|
||||
|
||||
# 3. Featured Bonus
|
||||
if agent.featured:
|
||||
points += 50
|
||||
|
||||
# 4. Rating Bonus
|
||||
points += agent.rating * 10
|
||||
|
||||
# 5. Runs Bonus
|
||||
runs_points = min(agent.runs / 1000 * 100, 100) # Cap at 100 points
|
||||
points += runs_points
|
||||
|
||||
return int(points)
|
||||
|
||||
|
||||
async def get_recommended_agents(user_id: str) -> list[StoreAgentDetails]:
|
||||
user_onboarding = await get_user_onboarding(user_id)
|
||||
categories = REASON_MAPPING.get(user_onboarding.usageReason or "", [])
|
||||
|
||||
where_clause: dict[str, Any] = {}
|
||||
|
||||
custom = clean_and_split((user_onboarding.usageReason or "").lower())
|
||||
|
||||
if categories:
|
||||
where_clause["OR"] = [
|
||||
{"categories": {"has": category}} for category in categories
|
||||
]
|
||||
else:
|
||||
where_clause["OR"] = [
|
||||
{"description": {"contains": word, "mode": "insensitive"}}
|
||||
for word in custom
|
||||
]
|
||||
|
||||
where_clause["OR"] += [
|
||||
{"description": {"contains": word, "mode": "insensitive"}}
|
||||
for word in user_onboarding.integrations
|
||||
]
|
||||
|
||||
agents = await prisma.models.StoreAgent.prisma().find_many(
|
||||
where=prisma.types.StoreAgentWhereInput(**where_clause),
|
||||
order=[
|
||||
{"featured": "desc"},
|
||||
{"runs": "desc"},
|
||||
{"rating": "desc"},
|
||||
],
|
||||
)
|
||||
|
||||
if len(agents) < 2:
|
||||
agents += await prisma.models.StoreAgent.prisma().find_many(
|
||||
where={
|
||||
"listing_id": {"not_in": [agent.listing_id for agent in agents]},
|
||||
},
|
||||
order=[
|
||||
{"featured": "desc"},
|
||||
{"runs": "desc"},
|
||||
{"rating": "desc"},
|
||||
],
|
||||
take=2 - len(agents),
|
||||
)
|
||||
|
||||
# Calculate points for the first 30 agents and choose the top 2
|
||||
agent_points = []
|
||||
for agent in agents[:50]:
|
||||
points = calculate_points(
|
||||
agent, categories, custom, user_onboarding.integrations
|
||||
)
|
||||
agent_points.append((agent, points))
|
||||
|
||||
agent_points.sort(key=lambda x: x[1], reverse=True)
|
||||
recommended_agents = [agent for agent, _ in agent_points[:2]]
|
||||
|
||||
return [
|
||||
StoreAgentDetails(
|
||||
store_listing_version_id=agent.storeListingVersionId,
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_video=agent.agent_video or "",
|
||||
agent_image=agent.agent_image,
|
||||
creator=agent.creator_username,
|
||||
creator_avatar=agent.creator_avatar,
|
||||
sub_heading=agent.sub_heading,
|
||||
description=agent.description,
|
||||
categories=agent.categories,
|
||||
runs=agent.runs,
|
||||
rating=agent.rating,
|
||||
versions=agent.versions,
|
||||
last_updated=agent.updated_at,
|
||||
)
|
||||
for agent in recommended_agents
|
||||
]
|
||||
@@ -35,7 +35,7 @@ class BaseRedisEventBus(Generic[M], ABC):
|
||||
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
|
||||
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
|
||||
channel_name = f"{self.event_bus_name}/{channel_key}"
|
||||
logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
|
||||
logger.debug(f"[{channel_name}] Publishing an event to Redis {message}")
|
||||
return message, channel_name
|
||||
|
||||
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
|
||||
@@ -44,7 +44,7 @@ class BaseRedisEventBus(Generic[M], ABC):
|
||||
return None
|
||||
try:
|
||||
data = json.loads(msg["data"])
|
||||
logger.info(f"Consuming an event from Redis {data}")
|
||||
logger.debug(f"Consuming an event from Redis {data}")
|
||||
return self.Model(**data)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to parse event result from Redis {msg} {e}")
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, cast
|
||||
from urllib.parse import quote_plus
|
||||
|
||||
from autogpt_libs.auth.models import DEFAULT_USER_ID
|
||||
from fastapi import HTTPException
|
||||
@@ -14,6 +18,7 @@ from backend.data.model import UserIntegrations, UserMetadata, UserMetadataRaw
|
||||
from backend.data.notifications import NotificationPreference, NotificationPreferenceDTO
|
||||
from backend.server.v2.store.exceptions import DatabaseError
|
||||
from backend.util.encryption import JSONCryptor
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -334,3 +339,59 @@ async def get_user_email_verification(user_id: str) -> bool:
|
||||
raise DatabaseError(
|
||||
f"Failed to get email verification status for user {user_id}: {e}"
|
||||
) from e
|
||||
|
||||
|
||||
def generate_unsubscribe_link(user_id: str) -> str:
|
||||
"""Generate a link to unsubscribe from all notifications"""
|
||||
# Create an HMAC using a secret key
|
||||
secret_key = Settings().secrets.unsubscribe_secret_key
|
||||
signature = hmac.new(
|
||||
secret_key.encode("utf-8"), user_id.encode("utf-8"), hashlib.sha256
|
||||
).digest()
|
||||
|
||||
# Create a token that combines the user_id and signature
|
||||
token = base64.urlsafe_b64encode(
|
||||
f"{user_id}:{signature.hex()}".encode("utf-8")
|
||||
).decode("utf-8")
|
||||
logger.info(f"Generating unsubscribe link for user {user_id}")
|
||||
|
||||
base_url = Settings().config.platform_base_url
|
||||
return f"{base_url}/api/email/unsubscribe?token={quote_plus(token)}"
|
||||
|
||||
|
||||
async def unsubscribe_user_by_token(token: str) -> None:
|
||||
"""Unsubscribe a user from all notifications using the token"""
|
||||
try:
|
||||
# Decode the token
|
||||
decoded = base64.urlsafe_b64decode(token).decode("utf-8")
|
||||
user_id, received_signature_hex = decoded.split(":", 1)
|
||||
|
||||
# Verify the signature
|
||||
secret_key = Settings().secrets.unsubscribe_secret_key
|
||||
expected_signature = hmac.new(
|
||||
secret_key.encode("utf-8"), user_id.encode("utf-8"), hashlib.sha256
|
||||
).digest()
|
||||
|
||||
if not hmac.compare_digest(expected_signature.hex(), received_signature_hex):
|
||||
raise ValueError("Invalid token signature")
|
||||
|
||||
user = await get_user_by_id(user_id)
|
||||
await update_user_notification_preference(
|
||||
user.id,
|
||||
NotificationPreferenceDTO(
|
||||
email=user.email,
|
||||
daily_limit=0,
|
||||
preferences={
|
||||
NotificationType.AGENT_RUN: False,
|
||||
NotificationType.ZERO_BALANCE: False,
|
||||
NotificationType.LOW_BALANCE: False,
|
||||
NotificationType.BLOCK_EXECUTION_FAILED: False,
|
||||
NotificationType.CONTINUOUS_AGENT_ERROR: False,
|
||||
NotificationType.DAILY_SUMMARY: False,
|
||||
NotificationType.WEEKLY_SUMMARY: False,
|
||||
NotificationType.MONTHLY_SUMMARY: False,
|
||||
},
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
raise DatabaseError(f"Failed to unsubscribe user by token {token}: {e}") from e
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from .database import DatabaseManager
|
||||
from .manager import ExecutionManager
|
||||
from .scheduler import ExecutionScheduler
|
||||
from .scheduler import Scheduler
|
||||
|
||||
__all__ = [
|
||||
"DatabaseManager",
|
||||
"ExecutionManager",
|
||||
"ExecutionScheduler",
|
||||
"Scheduler",
|
||||
]
|
||||
|
||||
@@ -20,9 +20,20 @@ from backend.data.graph import (
|
||||
get_graph_metadata,
|
||||
get_node,
|
||||
)
|
||||
from backend.data.notifications import (
|
||||
create_or_add_to_user_notification_batch,
|
||||
empty_user_notification_batch,
|
||||
get_all_batches_by_type,
|
||||
get_user_notification_batch,
|
||||
get_user_notification_oldest_message_in_batch,
|
||||
)
|
||||
from backend.data.user import (
|
||||
get_active_user_ids_in_timerange,
|
||||
get_user_email_by_id,
|
||||
get_user_email_verification,
|
||||
get_user_integrations,
|
||||
get_user_metadata,
|
||||
get_user_notification_preference,
|
||||
update_user_integrations,
|
||||
update_user_metadata,
|
||||
)
|
||||
@@ -80,3 +91,24 @@ class DatabaseManager(AppService):
|
||||
update_user_metadata = exposed_run_and_wait(update_user_metadata)
|
||||
get_user_integrations = exposed_run_and_wait(get_user_integrations)
|
||||
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
||||
|
||||
# User Comms - async
|
||||
get_active_user_ids_in_timerange = exposed_run_and_wait(
|
||||
get_active_user_ids_in_timerange
|
||||
)
|
||||
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
|
||||
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
|
||||
get_user_notification_preference = exposed_run_and_wait(
|
||||
get_user_notification_preference
|
||||
)
|
||||
|
||||
# Notifications - async
|
||||
create_or_add_to_user_notification_batch = exposed_run_and_wait(
|
||||
create_or_add_to_user_notification_batch
|
||||
)
|
||||
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
|
||||
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
|
||||
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
|
||||
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
|
||||
get_user_notification_oldest_message_in_batch
|
||||
)
|
||||
|
||||
@@ -109,7 +109,10 @@ class LogMetadata:
|
||||
logger.exception(msg, extra={"json_fields": {**self.metadata, **extra}})
|
||||
|
||||
def _wrap(self, msg: str, **extra):
|
||||
return f"{self.prefix} {msg} {extra}"
|
||||
extra_msg = str(extra or "")
|
||||
if len(extra_msg) > 1000:
|
||||
extra_msg = extra_msg[:1000] + "..."
|
||||
return f"{self.prefix} {msg} {extra_msg}"
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
import logging
|
||||
import os
|
||||
from enum import Enum
|
||||
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
|
||||
|
||||
from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_EXECUTED
|
||||
from apscheduler.job import Job as JobObj
|
||||
from apscheduler.jobstores.memory import MemoryJobStore
|
||||
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
|
||||
from apscheduler.schedulers.blocking import BlockingScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
from dotenv import load_dotenv
|
||||
from prisma.enums import NotificationType
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy import MetaData, create_engine
|
||||
|
||||
from backend.data.block import BlockInput
|
||||
from backend.executor.manager import ExecutionManager
|
||||
from backend.notifications.notifications import NotificationManager
|
||||
from backend.util.service import AppService, expose, get_service_client
|
||||
from backend.util.settings import Config
|
||||
|
||||
@@ -42,7 +46,7 @@ config = Config()
|
||||
|
||||
|
||||
def log(msg, **kwargs):
|
||||
logger.info("[ExecutionScheduler] " + msg, **kwargs)
|
||||
logger.info("[Scheduler] " + msg, **kwargs)
|
||||
|
||||
|
||||
def job_listener(event):
|
||||
@@ -58,8 +62,15 @@ def get_execution_client() -> ExecutionManager:
|
||||
return get_service_client(ExecutionManager)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_notification_client():
|
||||
from backend.notifications import NotificationManager
|
||||
|
||||
return get_service_client(NotificationManager)
|
||||
|
||||
|
||||
def execute_graph(**kwargs):
|
||||
args = JobArgs(**kwargs)
|
||||
args = ExecutionJobArgs(**kwargs)
|
||||
try:
|
||||
log(f"Executing recurring job for graph #{args.graph_id}")
|
||||
get_execution_client().add_execution(
|
||||
@@ -72,7 +83,32 @@ def execute_graph(**kwargs):
|
||||
logger.exception(f"Error executing graph {args.graph_id}: {e}")
|
||||
|
||||
|
||||
class JobArgs(BaseModel):
|
||||
def process_existing_batches(**kwargs):
|
||||
args = NotificationJobArgs(**kwargs)
|
||||
try:
|
||||
log(
|
||||
f"Processing existing batches for notification type {args.notification_types}"
|
||||
)
|
||||
get_notification_client().process_existing_batches(args.notification_types)
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing existing batches: {e}")
|
||||
|
||||
|
||||
def process_weekly_summary(**kwargs):
|
||||
try:
|
||||
log("Processing weekly summary")
|
||||
get_notification_client().queue_weekly_summary()
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing weekly summary: {e}")
|
||||
|
||||
|
||||
class Jobstores(Enum):
|
||||
EXECUTION = "execution"
|
||||
BATCHED_NOTIFICATIONS = "batched_notifications"
|
||||
WEEKLY_NOTIFICATIONS = "weekly_notifications"
|
||||
|
||||
|
||||
class ExecutionJobArgs(BaseModel):
|
||||
graph_id: str
|
||||
input_data: BlockInput
|
||||
user_id: str
|
||||
@@ -80,14 +116,14 @@ class JobArgs(BaseModel):
|
||||
cron: str
|
||||
|
||||
|
||||
class JobInfo(JobArgs):
|
||||
class ExecutionJobInfo(ExecutionJobArgs):
|
||||
id: str
|
||||
name: str
|
||||
next_run_time: str
|
||||
|
||||
@staticmethod
|
||||
def from_db(job_args: JobArgs, job_obj: JobObj) -> "JobInfo":
|
||||
return JobInfo(
|
||||
def from_db(job_args: ExecutionJobArgs, job_obj: JobObj) -> "ExecutionJobInfo":
|
||||
return ExecutionJobInfo(
|
||||
id=job_obj.id,
|
||||
name=job_obj.name,
|
||||
next_run_time=job_obj.next_run_time.isoformat(),
|
||||
@@ -95,7 +131,29 @@ class JobInfo(JobArgs):
|
||||
)
|
||||
|
||||
|
||||
class ExecutionScheduler(AppService):
|
||||
class NotificationJobArgs(BaseModel):
|
||||
notification_types: list[NotificationType]
|
||||
cron: str
|
||||
|
||||
|
||||
class NotificationJobInfo(NotificationJobArgs):
|
||||
id: str
|
||||
name: str
|
||||
next_run_time: str
|
||||
|
||||
@staticmethod
|
||||
def from_db(
|
||||
job_args: NotificationJobArgs, job_obj: JobObj
|
||||
) -> "NotificationJobInfo":
|
||||
return NotificationJobInfo(
|
||||
id=job_obj.id,
|
||||
name=job_obj.name,
|
||||
next_run_time=job_obj.next_run_time.isoformat(),
|
||||
**job_args.model_dump(),
|
||||
)
|
||||
|
||||
|
||||
class Scheduler(AppService):
|
||||
scheduler: BlockingScheduler
|
||||
|
||||
@classmethod
|
||||
@@ -111,19 +169,38 @@ class ExecutionScheduler(AppService):
|
||||
def execution_client(self) -> ExecutionManager:
|
||||
return get_service_client(ExecutionManager)
|
||||
|
||||
@property
|
||||
@thread_cached
|
||||
def notification_client(self) -> NotificationManager:
|
||||
return get_service_client(NotificationManager)
|
||||
|
||||
def run_service(self):
|
||||
load_dotenv()
|
||||
db_schema, db_url = _extract_schema_from_url(os.getenv("DATABASE_URL"))
|
||||
self.scheduler = BlockingScheduler(
|
||||
jobstores={
|
||||
"default": SQLAlchemyJobStore(
|
||||
Jobstores.EXECUTION.value: SQLAlchemyJobStore(
|
||||
engine=create_engine(
|
||||
url=db_url,
|
||||
pool_size=self.db_pool_size(),
|
||||
max_overflow=0,
|
||||
),
|
||||
metadata=MetaData(schema=db_schema),
|
||||
)
|
||||
# this one is pre-existing so it keeps the
|
||||
# default table name.
|
||||
tablename="apscheduler_jobs",
|
||||
),
|
||||
Jobstores.BATCHED_NOTIFICATIONS.value: SQLAlchemyJobStore(
|
||||
engine=create_engine(
|
||||
url=db_url,
|
||||
pool_size=self.db_pool_size(),
|
||||
max_overflow=0,
|
||||
),
|
||||
metadata=MetaData(schema=db_schema),
|
||||
tablename="apscheduler_jobs_batched_notifications",
|
||||
),
|
||||
# These don't really need persistence
|
||||
Jobstores.WEEKLY_NOTIFICATIONS.value: MemoryJobStore(),
|
||||
}
|
||||
)
|
||||
self.scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
|
||||
@@ -137,8 +214,8 @@ class ExecutionScheduler(AppService):
|
||||
cron: str,
|
||||
input_data: BlockInput,
|
||||
user_id: str,
|
||||
) -> JobInfo:
|
||||
job_args = JobArgs(
|
||||
) -> ExecutionJobInfo:
|
||||
job_args = ExecutionJobArgs(
|
||||
graph_id=graph_id,
|
||||
input_data=input_data,
|
||||
user_id=user_id,
|
||||
@@ -150,37 +227,80 @@ class ExecutionScheduler(AppService):
|
||||
CronTrigger.from_crontab(cron),
|
||||
kwargs=job_args.model_dump(),
|
||||
replace_existing=True,
|
||||
jobstore=Jobstores.EXECUTION.value,
|
||||
)
|
||||
log(f"Added job {job.id} with cron schedule '{cron}' input data: {input_data}")
|
||||
return JobInfo.from_db(job_args, job)
|
||||
return ExecutionJobInfo.from_db(job_args, job)
|
||||
|
||||
@expose
|
||||
def delete_schedule(self, schedule_id: str, user_id: str) -> JobInfo:
|
||||
job = self.scheduler.get_job(schedule_id)
|
||||
def delete_schedule(self, schedule_id: str, user_id: str) -> ExecutionJobInfo:
|
||||
job = self.scheduler.get_job(schedule_id, jobstore=Jobstores.EXECUTION.value)
|
||||
if not job:
|
||||
log(f"Job {schedule_id} not found.")
|
||||
raise ValueError(f"Job #{schedule_id} not found.")
|
||||
|
||||
job_args = JobArgs(**job.kwargs)
|
||||
job_args = ExecutionJobArgs(**job.kwargs)
|
||||
if job_args.user_id != user_id:
|
||||
raise ValueError("User ID does not match the job's user ID.")
|
||||
|
||||
log(f"Deleting job {schedule_id}")
|
||||
job.remove()
|
||||
|
||||
return JobInfo.from_db(job_args, job)
|
||||
return ExecutionJobInfo.from_db(job_args, job)
|
||||
|
||||
@expose
|
||||
def get_execution_schedules(
|
||||
self, graph_id: str | None = None, user_id: str | None = None
|
||||
) -> list[JobInfo]:
|
||||
) -> list[ExecutionJobInfo]:
|
||||
schedules = []
|
||||
for job in self.scheduler.get_jobs():
|
||||
job_args = JobArgs(**job.kwargs)
|
||||
for job in self.scheduler.get_jobs(jobstore=Jobstores.EXECUTION.value):
|
||||
logger.info(
|
||||
f"Found job {job.id} with cron schedule {job.trigger} and args {job.kwargs}"
|
||||
)
|
||||
job_args = ExecutionJobArgs(**job.kwargs)
|
||||
if (
|
||||
job.next_run_time is not None
|
||||
and (graph_id is None or job_args.graph_id == graph_id)
|
||||
and (user_id is None or job_args.user_id == user_id)
|
||||
):
|
||||
schedules.append(JobInfo.from_db(job_args, job))
|
||||
schedules.append(ExecutionJobInfo.from_db(job_args, job))
|
||||
return schedules
|
||||
|
||||
@expose
|
||||
def add_batched_notification_schedule(
|
||||
self,
|
||||
notification_types: list[NotificationType],
|
||||
data: dict,
|
||||
cron: str,
|
||||
) -> NotificationJobInfo:
|
||||
job_args = NotificationJobArgs(
|
||||
notification_types=notification_types,
|
||||
cron=cron,
|
||||
)
|
||||
job = self.scheduler.add_job(
|
||||
process_existing_batches,
|
||||
CronTrigger.from_crontab(cron),
|
||||
kwargs=job_args.model_dump(),
|
||||
replace_existing=True,
|
||||
jobstore=Jobstores.BATCHED_NOTIFICATIONS.value,
|
||||
)
|
||||
log(f"Added job {job.id} with cron schedule '{cron}' input data: {data}")
|
||||
return NotificationJobInfo.from_db(job_args, job)
|
||||
|
||||
@expose
|
||||
def add_weekly_notification_schedule(self, cron: str) -> NotificationJobInfo:
|
||||
|
||||
job = self.scheduler.add_job(
|
||||
process_weekly_summary,
|
||||
CronTrigger.from_crontab(cron),
|
||||
kwargs={},
|
||||
replace_existing=True,
|
||||
jobstore=Jobstores.WEEKLY_NOTIFICATIONS.value,
|
||||
)
|
||||
log(f"Added job {job.id} with cron schedule '{cron}'")
|
||||
return NotificationJobInfo.from_db(
|
||||
NotificationJobArgs(
|
||||
cron=cron, notification_types=[NotificationType.WEEKLY_SUMMARY]
|
||||
),
|
||||
job,
|
||||
)
|
||||
|
||||
@@ -169,7 +169,16 @@ zerobounce_credentials = APIKeyCredentials(
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
example_credentials = APIKeyCredentials(
|
||||
id="a2b7f68f-aa6a-4995-99ec-b45b40d33498",
|
||||
provider="example-provider",
|
||||
api_key=SecretStr(settings.secrets.example_api_key),
|
||||
title="Use Credits for Example",
|
||||
expires_at=None,
|
||||
)
|
||||
|
||||
DEFAULT_CREDENTIALS = [
|
||||
example_credentials,
|
||||
ollama_credentials,
|
||||
revid_credentials,
|
||||
ideogram_credentials,
|
||||
@@ -225,6 +234,8 @@ class IntegrationCredentialsStore:
|
||||
all_credentials.append(ollama_credentials)
|
||||
|
||||
# These will only be added if the API key is set
|
||||
if settings.secrets.example_api_key:
|
||||
all_credentials.append(example_credentials)
|
||||
if settings.secrets.revid_api_key:
|
||||
all_credentials.append(revid_credentials)
|
||||
if settings.secrets.ideogram_api_key:
|
||||
|
||||
@@ -10,6 +10,7 @@ class ProviderName(str, Enum):
|
||||
D_ID = "d_id"
|
||||
E2B = "e2b"
|
||||
EXA = "exa"
|
||||
EXAMPLE_PROVIDER = "example-provider"
|
||||
FAL = "fal"
|
||||
GITHUB = "github"
|
||||
GOOGLE = "google"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .compass import CompassWebhookManager
|
||||
from .example import ExampleWebhookManager
|
||||
from .github import GithubWebhooksManager
|
||||
from .slant3d import Slant3DWebhooksManager
|
||||
|
||||
@@ -15,6 +16,7 @@ WEBHOOK_MANAGERS_BY_NAME: dict["ProviderName", type["BaseWebhooksManager"]] = {
|
||||
CompassWebhookManager,
|
||||
GithubWebhooksManager,
|
||||
Slant3DWebhooksManager,
|
||||
ExampleWebhookManager,
|
||||
]
|
||||
}
|
||||
# --8<-- [end:WEBHOOK_MANAGERS_BY_NAME]
|
||||
|
||||
@@ -0,0 +1,147 @@
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from fastapi import Request
|
||||
from strenum import StrEnum
|
||||
|
||||
from backend.data import integrations
|
||||
from backend.data.model import APIKeyCredentials, Credentials
|
||||
from backend.integrations.providers import ProviderName
|
||||
|
||||
from ._manual_base import ManualWebhookManagerBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExampleWebhookEventType(StrEnum):
|
||||
EXAMPLE_EVENT = "example_event"
|
||||
ANOTHER_EXAMPLE_EVENT = "another_example_event"
|
||||
|
||||
|
||||
# ExampleWebhookManager is a class that manages webhooks for a hypothetical provider.
|
||||
# It extends ManualWebhookManagerBase, which provides base functionality for manual webhook management.
|
||||
class ExampleWebhookManager(ManualWebhookManagerBase):
|
||||
# Define the provider name for this webhook manager.
|
||||
PROVIDER_NAME = ProviderName.EXAMPLE_PROVIDER
|
||||
# Define the types of webhooks this manager can handle.
|
||||
WebhookEventType = ExampleWebhookEventType
|
||||
|
||||
BASE_URL = "https://api.example.com"
|
||||
|
||||
@classmethod
|
||||
async def validate_payload(
|
||||
cls, webhook: integrations.Webhook, request: Request
|
||||
) -> tuple[dict, str]:
|
||||
"""
|
||||
Validate the incoming webhook payload.
|
||||
|
||||
Args:
|
||||
webhook (integrations.Webhook): The webhook object.
|
||||
request (Request): The incoming request object.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing the payload as a dictionary and the event type as a string.
|
||||
"""
|
||||
# Extract the JSON payload from the request.
|
||||
payload = await request.json()
|
||||
# Set the event type based on the webhook type in the payload.
|
||||
event_type = payload.get("webhook_type", ExampleWebhookEventType.EXAMPLE_EVENT)
|
||||
|
||||
# For the payload its better to return a pydantic model
|
||||
# rather than a weakly typed dict here
|
||||
return payload, event_type
|
||||
|
||||
async def _register_webhook(
|
||||
self,
|
||||
credentials: Credentials,
|
||||
webhook_type: str,
|
||||
resource: str,
|
||||
events: list[str],
|
||||
ingress_url: str,
|
||||
secret: str,
|
||||
) -> tuple[str, dict]:
|
||||
"""
|
||||
Register a new webhook with the provider.
|
||||
|
||||
Args:
|
||||
credentials (Credentials): The credentials required for authentication.
|
||||
webhook_type (str): The type of webhook to register.
|
||||
resource (str): The resource associated with the webhook.
|
||||
events (list[str]): The list of events to subscribe to.
|
||||
ingress_url (str): The URL where the webhook will send data.
|
||||
secret (str): A secret for securing the webhook.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing the provider's webhook ID, if any, and the webhook configuration as a dictionary.
|
||||
"""
|
||||
# Ensure the credentials are of the correct type.
|
||||
if not isinstance(credentials, APIKeyCredentials):
|
||||
raise ValueError("API key is required to register a webhook")
|
||||
|
||||
# Prepare the headers for the request, including the API key.
|
||||
headers = {
|
||||
"api-key": credentials.api_key.get_secret_value(),
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
# Prepare the payload for the request. Note that the events list is not used.
|
||||
# This is just a fake example
|
||||
payload = {"endPoint": ingress_url}
|
||||
|
||||
# Send a POST request to register the webhook.
|
||||
response = requests.post(
|
||||
f"{self.BASE_URL}/example/webhookSubscribe", headers=headers, json=payload
|
||||
)
|
||||
|
||||
# Check if the response indicates a failure.
|
||||
if not response.ok:
|
||||
error = response.json().get("error", "Unknown error")
|
||||
raise RuntimeError(f"Failed to register webhook: {error}")
|
||||
|
||||
# Prepare the webhook configuration to return.
|
||||
webhook_config = {
|
||||
"endpoint": ingress_url,
|
||||
"provider": self.PROVIDER_NAME,
|
||||
"events": ["example_event"],
|
||||
"type": webhook_type,
|
||||
}
|
||||
|
||||
return "", webhook_config
|
||||
|
||||
async def _deregister_webhook(
|
||||
self, webhook: integrations.Webhook, credentials: Credentials
|
||||
) -> None:
|
||||
"""
|
||||
Deregister a webhook with the provider.
|
||||
|
||||
Args:
|
||||
webhook (integrations.Webhook): The webhook object to deregister.
|
||||
credentials (Credentials): The credentials associated with the webhook.
|
||||
|
||||
Raises:
|
||||
ValueError: If the webhook doesn't belong to the credentials or if deregistration fails.
|
||||
"""
|
||||
if webhook.credentials_id != credentials.id:
|
||||
raise ValueError(
|
||||
f"Webhook #{webhook.id} does not belong to credentials {credentials.id}"
|
||||
)
|
||||
|
||||
if not isinstance(credentials, APIKeyCredentials):
|
||||
raise ValueError("API key is required to deregister a webhook")
|
||||
|
||||
headers = {
|
||||
"api-key": credentials.api_key.get_secret_value(),
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
# Construct the delete URL based on the webhook information
|
||||
delete_url = f"{self.BASE_URL}/example/webhooks/{webhook.provider_webhook_id}"
|
||||
|
||||
response = requests.delete(delete_url, headers=headers)
|
||||
|
||||
if response.status_code not in [204, 404]:
|
||||
# 204 means successful deletion, 404 means the webhook was already deleted
|
||||
error = response.json().get("error", "Unknown error")
|
||||
raise ValueError(f"Failed to delete webhook: {error}")
|
||||
|
||||
# If we reach here, the webhook was successfully deleted or didn't exist
|
||||
@@ -7,9 +7,9 @@ from prisma.enums import NotificationType
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.notifications import (
|
||||
NotificationDataType_co,
|
||||
NotificationEventModel,
|
||||
NotificationTypeOverride,
|
||||
T_co,
|
||||
)
|
||||
from backend.util.settings import Settings
|
||||
from backend.util.text import TextFormatter
|
||||
@@ -48,7 +48,11 @@ class EmailSender:
|
||||
self,
|
||||
notification: NotificationType,
|
||||
user_email: str,
|
||||
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
|
||||
data: (
|
||||
NotificationEventModel[NotificationDataType_co]
|
||||
| list[NotificationEventModel[NotificationDataType_co]]
|
||||
),
|
||||
user_unsub_link: str | None = None,
|
||||
):
|
||||
"""Send an email to a user using a template pulled from the notification type"""
|
||||
if not self.postmark:
|
||||
@@ -56,20 +60,34 @@ class EmailSender:
|
||||
return
|
||||
template = self._get_template(notification)
|
||||
|
||||
base_url = (
|
||||
settings.config.frontend_base_url or settings.config.platform_base_url
|
||||
)
|
||||
|
||||
# Handle the case when data is a list
|
||||
template_data = data
|
||||
if isinstance(data, list):
|
||||
# Create a dictionary with a 'notifications' key containing the list
|
||||
template_data = {"notifications": data}
|
||||
|
||||
try:
|
||||
subject, full_message = self.formatter.format_email(
|
||||
base_template=template.base_template,
|
||||
subject_template=template.subject_template,
|
||||
content_template=template.body_template,
|
||||
data=data,
|
||||
unsubscribe_link="https://platform.agpt.co/profile/settings",
|
||||
data=template_data,
|
||||
unsubscribe_link=f"{base_url}/profile/settings",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error formatting full message: {e}")
|
||||
raise e
|
||||
|
||||
self._send_email(user_email, subject, full_message)
|
||||
self._send_email(
|
||||
user_email=user_email,
|
||||
user_unsubscribe_link=user_unsub_link,
|
||||
subject=subject,
|
||||
body=full_message,
|
||||
)
|
||||
|
||||
def _get_template(self, notification: NotificationType):
|
||||
# convert the notification type to a notification type override
|
||||
@@ -90,7 +108,13 @@ class EmailSender:
|
||||
base_template=base_template,
|
||||
)
|
||||
|
||||
def _send_email(self, user_email: str, subject: str, body: str):
|
||||
def _send_email(
|
||||
self,
|
||||
user_email: str,
|
||||
subject: str,
|
||||
body: str,
|
||||
user_unsubscribe_link: str | None = None,
|
||||
):
|
||||
if not self.postmark:
|
||||
logger.warning("Email tried to send without postmark configured")
|
||||
return
|
||||
@@ -100,4 +124,13 @@ class EmailSender:
|
||||
To=user_email,
|
||||
Subject=subject,
|
||||
HtmlBody=body,
|
||||
# Headers default to None internally so this is fine
|
||||
Headers=(
|
||||
{
|
||||
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
|
||||
"List-Unsubscribe": f"<{user_unsubscribe_link}>",
|
||||
}
|
||||
if user_unsubscribe_link
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
@@ -1,27 +1,36 @@
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Callable
|
||||
|
||||
import aio_pika
|
||||
from aio_pika.exceptions import QueueEmpty
|
||||
from autogpt_libs.utils.cache import thread_cached
|
||||
from prisma.enums import NotificationType
|
||||
from pydantic import BaseModel
|
||||
|
||||
from backend.data.notifications import (
|
||||
BaseSummaryData,
|
||||
BaseSummaryParams,
|
||||
DailySummaryData,
|
||||
DailySummaryParams,
|
||||
NotificationEventDTO,
|
||||
NotificationEventModel,
|
||||
NotificationResult,
|
||||
NotificationTypeOverride,
|
||||
QueueType,
|
||||
get_data_type,
|
||||
SummaryParamsEventDTO,
|
||||
SummaryParamsEventModel,
|
||||
WeeklySummaryData,
|
||||
WeeklySummaryParams,
|
||||
get_batch_delay,
|
||||
get_notif_data_type,
|
||||
get_summary_params_type,
|
||||
)
|
||||
from backend.data.rabbitmq import Exchange, ExchangeType, Queue, RabbitMQConfig
|
||||
from backend.data.user import (
|
||||
get_user_email_by_id,
|
||||
get_user_email_verification,
|
||||
get_user_notification_preference,
|
||||
)
|
||||
from backend.data.user import generate_unsubscribe_link
|
||||
from backend.notifications.email import EmailSender
|
||||
from backend.util.service import AppService, expose
|
||||
from backend.util.service import AppService, expose, get_service_client
|
||||
from backend.util.settings import Settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -59,6 +68,26 @@ def create_notification_config() -> RabbitMQConfig:
|
||||
"x-dead-letter-routing-key": "failed.admin",
|
||||
},
|
||||
),
|
||||
# Summary notification queues
|
||||
Queue(
|
||||
name="summary_notifications",
|
||||
exchange=notification_exchange,
|
||||
routing_key="notification.summary.#",
|
||||
arguments={
|
||||
"x-dead-letter-exchange": dead_letter_exchange.name,
|
||||
"x-dead-letter-routing-key": "failed.summary",
|
||||
},
|
||||
),
|
||||
# Batch Queue
|
||||
Queue(
|
||||
name="batch_notifications",
|
||||
exchange=notification_exchange,
|
||||
routing_key="notification.batch.#",
|
||||
arguments={
|
||||
"x-dead-letter-exchange": dead_letter_exchange.name,
|
||||
"x-dead-letter-routing-key": "failed.batch",
|
||||
},
|
||||
),
|
||||
# Failed notifications queue
|
||||
Queue(
|
||||
name="failed_notifications",
|
||||
@@ -76,12 +105,25 @@ def create_notification_config() -> RabbitMQConfig:
|
||||
)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_scheduler():
|
||||
from backend.executor import Scheduler
|
||||
|
||||
return get_service_client(Scheduler)
|
||||
|
||||
|
||||
@thread_cached
|
||||
def get_db():
|
||||
from backend.executor.database import DatabaseManager
|
||||
|
||||
return get_service_client(DatabaseManager)
|
||||
|
||||
|
||||
class NotificationManager(AppService):
|
||||
"""Service for handling notifications with batching support"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.use_db = True
|
||||
self.rabbitmq_config = create_notification_config()
|
||||
self.running = True
|
||||
self.email_sender = EmailSender()
|
||||
@@ -90,19 +132,165 @@ class NotificationManager(AppService):
|
||||
def get_port(cls) -> int:
|
||||
return settings.config.notification_service_port
|
||||
|
||||
def get_routing_key(self, event: NotificationEventModel) -> str:
|
||||
def get_routing_key(self, event_type: NotificationType) -> str:
|
||||
strategy = NotificationTypeOverride(event_type).strategy
|
||||
"""Get the appropriate routing key for an event"""
|
||||
if event.strategy == QueueType.IMMEDIATE:
|
||||
return f"notification.immediate.{event.type.value}"
|
||||
elif event.strategy == QueueType.BACKOFF:
|
||||
return f"notification.backoff.{event.type.value}"
|
||||
elif event.strategy == QueueType.ADMIN:
|
||||
return f"notification.admin.{event.type.value}"
|
||||
elif event.strategy == QueueType.HOURLY:
|
||||
return f"notification.hourly.{event.type.value}"
|
||||
elif event.strategy == QueueType.DAILY:
|
||||
return f"notification.daily.{event.type.value}"
|
||||
return f"notification.{event.type.value}"
|
||||
if strategy == QueueType.IMMEDIATE:
|
||||
return f"notification.immediate.{event_type.value}"
|
||||
elif strategy == QueueType.BACKOFF:
|
||||
return f"notification.backoff.{event_type.value}"
|
||||
elif strategy == QueueType.ADMIN:
|
||||
return f"notification.admin.{event_type.value}"
|
||||
elif strategy == QueueType.BATCH:
|
||||
return f"notification.batch.{event_type.value}"
|
||||
elif strategy == QueueType.SUMMARY:
|
||||
return f"notification.summary.{event_type.value}"
|
||||
return f"notification.{event_type.value}"
|
||||
|
||||
@expose
|
||||
def queue_weekly_summary(self):
|
||||
"""Process weekly summary for specified notification types"""
|
||||
try:
|
||||
logger.info("Processing weekly summary queuing operation")
|
||||
processed_count = 0
|
||||
current_time = datetime.now(tz=timezone.utc)
|
||||
start_time = current_time - timedelta(days=7)
|
||||
users = get_db().get_active_user_ids_in_timerange(
|
||||
end_time=current_time.isoformat(),
|
||||
start_time=start_time.isoformat(),
|
||||
)
|
||||
for user in users:
|
||||
|
||||
self._queue_scheduled_notification(
|
||||
SummaryParamsEventDTO(
|
||||
user_id=user,
|
||||
type=NotificationType.WEEKLY_SUMMARY,
|
||||
data=WeeklySummaryParams(
|
||||
start_date=start_time,
|
||||
end_date=current_time,
|
||||
).model_dump(),
|
||||
),
|
||||
)
|
||||
processed_count += 1
|
||||
|
||||
logger.info(f"Processed {processed_count} weekly summaries into queue")
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing weekly summary: {e}")
|
||||
|
||||
@expose
|
||||
def process_existing_batches(self, notification_types: list[NotificationType]):
|
||||
"""Process existing batches for specified notification types"""
|
||||
try:
|
||||
processed_count = 0
|
||||
current_time = datetime.now(tz=timezone.utc)
|
||||
|
||||
for notification_type in notification_types:
|
||||
# Get all batches for this notification type
|
||||
batches = get_db().get_all_batches_by_type(notification_type)
|
||||
|
||||
for batch in batches:
|
||||
# Check if batch has aged out
|
||||
oldest_message = (
|
||||
get_db().get_user_notification_oldest_message_in_batch(
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
)
|
||||
|
||||
if not oldest_message:
|
||||
# this should never happen
|
||||
logger.error(
|
||||
f"Batch for user {batch.user_id} and type {notification_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!"
|
||||
)
|
||||
continue
|
||||
|
||||
max_delay = get_batch_delay(notification_type)
|
||||
|
||||
# If batch has aged out, process it
|
||||
if oldest_message.created_at + max_delay < current_time:
|
||||
recipient_email = get_db().get_user_email_by_id(batch.user_id)
|
||||
|
||||
if not recipient_email:
|
||||
logger.error(
|
||||
f"User email not found for user {batch.user_id}"
|
||||
)
|
||||
continue
|
||||
|
||||
should_send = self._should_email_user_based_on_preference(
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
|
||||
if not should_send:
|
||||
logger.debug(
|
||||
f"User {batch.user_id} does not want to receive {notification_type} notifications"
|
||||
)
|
||||
# Clear the batch
|
||||
get_db().empty_user_notification_batch(
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
continue
|
||||
|
||||
batch_data = get_db().get_user_notification_batch(
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
|
||||
if not batch_data or not batch_data.notifications:
|
||||
logger.error(
|
||||
f"Batch data not found for user {batch.user_id}"
|
||||
)
|
||||
# Clear the batch
|
||||
get_db().empty_user_notification_batch(
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
continue
|
||||
|
||||
unsub_link = generate_unsubscribe_link(batch.user_id)
|
||||
|
||||
events = [
|
||||
NotificationEventModel[
|
||||
get_notif_data_type(db_event.type)
|
||||
].model_validate(
|
||||
{
|
||||
"user_id": batch.user_id,
|
||||
"type": db_event.type,
|
||||
"data": db_event.data,
|
||||
"created_at": db_event.created_at,
|
||||
}
|
||||
)
|
||||
for db_event in batch_data.notifications
|
||||
]
|
||||
logger.info(f"{events=}")
|
||||
|
||||
self.email_sender.send_templated(
|
||||
notification=notification_type,
|
||||
user_email=recipient_email,
|
||||
data=events,
|
||||
user_unsub_link=unsub_link,
|
||||
)
|
||||
|
||||
# Clear the batch
|
||||
get_db().empty_user_notification_batch(
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
|
||||
processed_count += 1
|
||||
|
||||
logger.info(f"Processed {processed_count} aged batches")
|
||||
return {
|
||||
"success": True,
|
||||
"processed_count": processed_count,
|
||||
"notification_types": [nt.value for nt in notification_types],
|
||||
"timestamp": current_time.isoformat(),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing batches: {e}")
|
||||
return {
|
||||
"success": False,
|
||||
"error": str(e),
|
||||
"notification_types": [nt.value for nt in notification_types],
|
||||
"timestamp": datetime.now(tz=timezone.utc).isoformat(),
|
||||
}
|
||||
|
||||
@expose
|
||||
def queue_notification(self, event: NotificationEventDTO) -> NotificationResult:
|
||||
@@ -111,9 +299,9 @@ class NotificationManager(AppService):
|
||||
logger.info(f"Received Request to queue {event=}")
|
||||
# Workaround for not being able to serialize generics over the expose bus
|
||||
parsed_event = NotificationEventModel[
|
||||
get_data_type(event.type)
|
||||
get_notif_data_type(event.type)
|
||||
].model_validate(event.model_dump())
|
||||
routing_key = self.get_routing_key(parsed_event)
|
||||
routing_key = self.get_routing_key(parsed_event.type)
|
||||
message = parsed_event.model_dump_json()
|
||||
|
||||
logger.info(f"Received Request to queue {message=}")
|
||||
@@ -140,22 +328,160 @@ class NotificationManager(AppService):
|
||||
logger.exception(f"Error queueing notification: {e}")
|
||||
return NotificationResult(success=False, message=str(e))
|
||||
|
||||
def _queue_scheduled_notification(self, event: SummaryParamsEventDTO):
|
||||
"""Queue a scheduled notification - exposed method for other services to call"""
|
||||
try:
|
||||
logger.info(f"Received Request to queue scheduled notification {event=}")
|
||||
|
||||
parsed_event = SummaryParamsEventModel[
|
||||
get_summary_params_type(event.type)
|
||||
].model_validate(event.model_dump())
|
||||
|
||||
routing_key = self.get_routing_key(event.type)
|
||||
message = parsed_event.model_dump_json()
|
||||
|
||||
logger.info(f"Received Request to queue {message=}")
|
||||
|
||||
exchange = "notifications"
|
||||
|
||||
# Publish to RabbitMQ
|
||||
self.run_and_wait(
|
||||
self.rabbit.publish_message(
|
||||
routing_key=routing_key,
|
||||
message=message,
|
||||
exchange=next(
|
||||
ex for ex in self.rabbit_config.exchanges if ex.name == exchange
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error queueing notification: {e}")
|
||||
|
||||
def _should_email_user_based_on_preference(
|
||||
self, user_id: str, event_type: NotificationType
|
||||
) -> bool:
|
||||
"""Check if a user wants to receive a notification based on their preferences and email verification status"""
|
||||
validated_email = self.run_and_wait(get_user_email_verification(user_id))
|
||||
preference = self.run_and_wait(
|
||||
get_user_notification_preference(user_id)
|
||||
).preferences.get(event_type, True)
|
||||
validated_email = get_db().get_user_email_verification(user_id)
|
||||
preference = (
|
||||
get_db()
|
||||
.get_user_notification_preference(user_id)
|
||||
.preferences.get(event_type, True)
|
||||
)
|
||||
# only if both are true, should we email this person
|
||||
return validated_email and preference
|
||||
|
||||
def _gather_summary_data(
|
||||
self, user_id: str, event_type: NotificationType, params: BaseSummaryParams
|
||||
) -> BaseSummaryData:
|
||||
"""Gathers the data to build a summary notification"""
|
||||
|
||||
logger.info(
|
||||
f"Gathering summary data for {user_id} and {event_type} wiht {params=}"
|
||||
)
|
||||
|
||||
# total_credits_used = self.run_and_wait(
|
||||
# get_total_credits_used(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# total_executions = self.run_and_wait(
|
||||
# get_total_executions(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# most_used_agent = self.run_and_wait(
|
||||
# get_most_used_agent(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# execution_times = self.run_and_wait(
|
||||
# get_execution_time(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
# runs = self.run_and_wait(
|
||||
# get_runs(user_id, start_time, end_time)
|
||||
# )
|
||||
total_credits_used = 3.0
|
||||
total_executions = 2
|
||||
most_used_agent = {"name": "Some"}
|
||||
execution_times = [1, 2, 3]
|
||||
runs = [{"status": "COMPLETED"}, {"status": "FAILED"}]
|
||||
|
||||
successful_runs = len([run for run in runs if run["status"] == "COMPLETED"])
|
||||
failed_runs = len([run for run in runs if run["status"] != "COMPLETED"])
|
||||
average_execution_time = (
|
||||
sum(execution_times) / len(execution_times) if execution_times else 0
|
||||
)
|
||||
# cost_breakdown = self.run_and_wait(
|
||||
# get_cost_breakdown(user_id, start_time, end_time)
|
||||
# )
|
||||
|
||||
cost_breakdown = {
|
||||
"agent1": 1.0,
|
||||
"agent2": 2.0,
|
||||
}
|
||||
|
||||
if event_type == NotificationType.DAILY_SUMMARY and isinstance(
|
||||
params, DailySummaryParams
|
||||
):
|
||||
return DailySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent["name"],
|
||||
total_execution_time=sum(execution_times),
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
date=params.date,
|
||||
)
|
||||
elif event_type == NotificationType.WEEKLY_SUMMARY and isinstance(
|
||||
params, WeeklySummaryParams
|
||||
):
|
||||
return WeeklySummaryData(
|
||||
total_credits_used=total_credits_used,
|
||||
total_executions=total_executions,
|
||||
most_used_agent=most_used_agent["name"],
|
||||
total_execution_time=sum(execution_times),
|
||||
successful_runs=successful_runs,
|
||||
failed_runs=failed_runs,
|
||||
average_execution_time=average_execution_time,
|
||||
cost_breakdown=cost_breakdown,
|
||||
start_date=params.start_date,
|
||||
end_date=params.end_date,
|
||||
)
|
||||
else:
|
||||
raise ValueError("Invalid event type or params")
|
||||
|
||||
def _should_batch(
|
||||
self, user_id: str, event_type: NotificationType, event: NotificationEventModel
|
||||
) -> bool:
|
||||
|
||||
get_db().create_or_add_to_user_notification_batch(user_id, event_type, event)
|
||||
|
||||
oldest_message = get_db().get_user_notification_oldest_message_in_batch(
|
||||
user_id, event_type
|
||||
)
|
||||
if not oldest_message:
|
||||
logger.error(
|
||||
f"Batch for user {user_id} and type {event_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!"
|
||||
)
|
||||
return False
|
||||
oldest_age = oldest_message.created_at
|
||||
|
||||
max_delay = get_batch_delay(event_type)
|
||||
|
||||
if oldest_age + max_delay < datetime.now(tz=timezone.utc):
|
||||
logger.info(f"Batch for user {user_id} and type {event_type} is old enough")
|
||||
return True
|
||||
logger.info(
|
||||
f"Batch for user {user_id} and type {event_type} is not old enough: {oldest_age + max_delay} < {datetime.now(tz=timezone.utc)} max_delay={max_delay}"
|
||||
)
|
||||
return False
|
||||
|
||||
def _parse_message(self, message: str) -> NotificationEvent | None:
|
||||
try:
|
||||
event = NotificationEventDTO.model_validate_json(message)
|
||||
model = NotificationEventModel[
|
||||
get_data_type(event.type)
|
||||
get_notif_data_type(event.type)
|
||||
].model_validate_json(message)
|
||||
return NotificationEvent(event=event, model=model)
|
||||
except Exception as e:
|
||||
@@ -175,7 +501,7 @@ class NotificationManager(AppService):
|
||||
self.email_sender.send_templated(event.type, recipient_email, model)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing notification: {e}")
|
||||
logger.exception(f"Error processing notification for admin queue: {e}")
|
||||
return False
|
||||
|
||||
def _process_immediate(self, message: str) -> bool:
|
||||
@@ -188,7 +514,7 @@ class NotificationManager(AppService):
|
||||
model = parsed.model
|
||||
logger.debug(f"Processing immediate notification: {model}")
|
||||
|
||||
recipient_email = self.run_and_wait(get_user_email_by_id(event.user_id))
|
||||
recipient_email = get_db().get_user_email_by_id(event.user_id)
|
||||
if not recipient_email:
|
||||
logger.error(f"User email not found for user {event.user_id}")
|
||||
return False
|
||||
@@ -202,10 +528,126 @@ class NotificationManager(AppService):
|
||||
)
|
||||
return True
|
||||
|
||||
self.email_sender.send_templated(event.type, recipient_email, model)
|
||||
unsub_link = generate_unsubscribe_link(event.user_id)
|
||||
|
||||
self.email_sender.send_templated(
|
||||
notification=event.type,
|
||||
user_email=recipient_email,
|
||||
data=model,
|
||||
user_unsub_link=unsub_link,
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing notification: {e}")
|
||||
logger.exception(f"Error processing notification for immediate queue: {e}")
|
||||
return False
|
||||
|
||||
def _process_batch(self, message: str) -> bool:
|
||||
"""Process a single notification with a batching strategy, returning whether to put into the failed queue"""
|
||||
try:
|
||||
parsed = self._parse_message(message)
|
||||
if not parsed:
|
||||
return False
|
||||
event = parsed.event
|
||||
model = parsed.model
|
||||
logger.info(f"Processing batch notification: {model}")
|
||||
|
||||
recipient_email = get_db().get_user_email_by_id(event.user_id)
|
||||
if not recipient_email:
|
||||
logger.error(f"User email not found for user {event.user_id}")
|
||||
return False
|
||||
|
||||
should_send = self._should_email_user_based_on_preference(
|
||||
event.user_id, event.type
|
||||
)
|
||||
if not should_send:
|
||||
logger.info(
|
||||
f"User {event.user_id} does not want to receive {event.type} notifications"
|
||||
)
|
||||
return True
|
||||
|
||||
should_send = self._should_batch(event.user_id, event.type, model)
|
||||
|
||||
if not should_send:
|
||||
logger.info("Batch not old enough to send")
|
||||
return False
|
||||
batch = get_db().get_user_notification_batch(event.user_id, event.type)
|
||||
if not batch or not batch.notifications:
|
||||
logger.error(f"Batch not found for user {event.user_id}")
|
||||
return False
|
||||
unsub_link = generate_unsubscribe_link(event.user_id)
|
||||
|
||||
batch_messages = [
|
||||
NotificationEventModel[
|
||||
get_notif_data_type(db_event.type)
|
||||
].model_validate(
|
||||
{
|
||||
"user_id": event.user_id,
|
||||
"type": db_event.type,
|
||||
"data": db_event.data,
|
||||
"created_at": db_event.created_at,
|
||||
}
|
||||
)
|
||||
for db_event in batch.notifications
|
||||
]
|
||||
|
||||
self.email_sender.send_templated(
|
||||
notification=event.type,
|
||||
user_email=recipient_email,
|
||||
data=batch_messages,
|
||||
user_unsub_link=unsub_link,
|
||||
)
|
||||
# only empty the batch if we sent the email successfully
|
||||
get_db().empty_user_notification_batch(event.user_id, event.type)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing notification for batch queue: {e}")
|
||||
return False
|
||||
|
||||
def _process_summary(self, message: str) -> bool:
|
||||
"""Process a single notification with a summary strategy, returning whether to put into the failed queue"""
|
||||
try:
|
||||
logger.info(f"Processing summary notification: {message}")
|
||||
event = SummaryParamsEventDTO.model_validate_json(message)
|
||||
model = SummaryParamsEventModel[
|
||||
get_summary_params_type(event.type)
|
||||
].model_validate_json(message)
|
||||
|
||||
logger.info(f"Processing summary notification: {model}")
|
||||
|
||||
recipient_email = get_db().get_user_email_by_id(event.user_id)
|
||||
if not recipient_email:
|
||||
logger.error(f"User email not found for user {event.user_id}")
|
||||
return False
|
||||
should_send = self._should_email_user_based_on_preference(
|
||||
event.user_id, event.type
|
||||
)
|
||||
if not should_send:
|
||||
logger.info(
|
||||
f"User {event.user_id} does not want to receive {event.type} notifications"
|
||||
)
|
||||
return True
|
||||
|
||||
summary_data = self._gather_summary_data(
|
||||
event.user_id, event.type, model.data
|
||||
)
|
||||
|
||||
unsub_link = generate_unsubscribe_link(event.user_id)
|
||||
|
||||
data = NotificationEventModel(
|
||||
user_id=event.user_id,
|
||||
type=event.type,
|
||||
data=summary_data,
|
||||
)
|
||||
|
||||
self.email_sender.send_templated(
|
||||
notification=event.type,
|
||||
user_email=recipient_email,
|
||||
data=data,
|
||||
user_unsub_link=unsub_link,
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.exception(f"Error processing notification for summary queue: {e}")
|
||||
return False
|
||||
|
||||
def _run_queue(
|
||||
@@ -240,15 +682,34 @@ class NotificationManager(AppService):
|
||||
def run_service(self):
|
||||
logger.info(f"[{self.service_name}] Started notification service")
|
||||
|
||||
# Set up scheduler for batch processing of all notification types
|
||||
# this can be changed later to spawn differnt cleanups on different schedules
|
||||
try:
|
||||
get_scheduler().add_batched_notification_schedule(
|
||||
notification_types=list(NotificationType),
|
||||
data={},
|
||||
cron="0 * * * *",
|
||||
)
|
||||
# get_scheduler().add_weekly_notification_schedule(
|
||||
# # weekly on Friday at 12pm
|
||||
# cron="0 12 * * 5",
|
||||
# )
|
||||
logger.info("Scheduled notification cleanup")
|
||||
except Exception as e:
|
||||
logger.error(f"Error scheduling notification cleanup: {e}")
|
||||
|
||||
# Set up queue consumers
|
||||
channel = self.run_and_wait(self.rabbit.get_channel())
|
||||
|
||||
immediate_queue = self.run_and_wait(
|
||||
channel.get_queue("immediate_notifications")
|
||||
)
|
||||
batch_queue = self.run_and_wait(channel.get_queue("batch_notifications"))
|
||||
|
||||
admin_queue = self.run_and_wait(channel.get_queue("admin_notifications"))
|
||||
|
||||
summary_queue = self.run_and_wait(channel.get_queue("summary_notifications"))
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
self._run_queue(
|
||||
@@ -261,6 +722,17 @@ class NotificationManager(AppService):
|
||||
process_func=self._process_admin_message,
|
||||
error_queue_name="admin_notifications",
|
||||
)
|
||||
self._run_queue(
|
||||
queue=batch_queue,
|
||||
process_func=self._process_batch,
|
||||
error_queue_name="batch_notifications",
|
||||
)
|
||||
|
||||
self._run_queue(
|
||||
queue=summary_queue,
|
||||
process_func=self._process_summary,
|
||||
error_queue_name="summary_notifications",
|
||||
)
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{# Agent Run #}
|
||||
{# Template variables:
|
||||
notification.data: the stuff below but a list of them
|
||||
data.agent_name: the name of the agent
|
||||
data.credits_used: the number of credits used by the agent
|
||||
data.node_count: the number of nodes the agent ran on
|
||||
@@ -7,90 +8,135 @@ data.execution_time: the time it took to run the agent
|
||||
data.graph_id: the id of the graph the agent ran on
|
||||
data.outputs: the list of outputs of the agent
|
||||
#}
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
line-height: 165%;
|
||||
margin-top: 0;
|
||||
margin-bottom: 10px;
|
||||
">
|
||||
Your agent, <strong>{{ data.agent_name }}</strong>, has completed its run!
|
||||
</p>
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-size: 16px;
|
||||
line-height: 165%;
|
||||
margin-top: 0;
|
||||
margin-bottom: 20px;
|
||||
padding-left: 20px;
|
||||
">
|
||||
<p style="margin-bottom: 10px;"><strong>Time Taken:</strong> {{ data.execution_time | int }} seconds</p>
|
||||
<p style="margin-bottom: 10px;"><strong>Nodes Used:</strong> {{ data.node_count }}</p>
|
||||
<p style="margin-bottom: 10px;"><strong>Cost:</strong> ${{ "{:.2f}".format((data.credits_used|float)/100) }}</p>
|
||||
</p>
|
||||
{% if data.outputs and data.outputs|length > 0 %}
|
||||
<div style="
|
||||
margin-left: 15px;
|
||||
margin-bottom: 20px;
|
||||
">
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #070629;
|
||||
font-weight: 600;
|
||||
font-size: 16px;
|
||||
margin-bottom: 10px;
|
||||
">
|
||||
Results:
|
||||
</p>
|
||||
|
||||
{% for output in data.outputs %}
|
||||
<div style="
|
||||
margin-left: 15px;
|
||||
margin-bottom: 15px;
|
||||
">
|
||||
<p style="
|
||||
font-family: 'Poppins', sans-serif;
|
||||
color: #5D23BB;
|
||||
font-weight: 500;
|
||||
font-size: 16px;
|
||||
margin-top: 0;
|
||||
margin-bottom: 8px;
|
||||
">
|
||||
{{ output.name }}
|
||||
{% if notifications is defined %}
|
||||
{# BATCH MODE #}
|
||||
<div style="font-family: 'Poppins', sans-serif; color: #070629;">
|
||||
<h2 style="color: #5D23BB; margin-bottom: 15px;">Agent Run Summary</h2>
|
||||
<p style="font-size: 16px; line-height: 165%; margin-top: 0; margin-bottom: 15px;">
|
||||
<strong>{{ notifications|length }}</strong> agent runs have completed!
|
||||
</p>
|
||||
|
||||
{% for key, value in output.items() %}
|
||||
{% if key != 'name' %}
|
||||
<div style="
|
||||
margin-left: 15px;
|
||||
background-color: #f5f5ff;
|
||||
padding: 8px 12px;
|
||||
border-radius: 4px;
|
||||
font-family: 'Roboto Mono', monospace;
|
||||
white-space: pre-wrap;
|
||||
word-break: break-word;
|
||||
overflow-wrap: break-word;
|
||||
max-width: 100%;
|
||||
overflow-x: auto;
|
||||
margin-top: 5px;
|
||||
margin-bottom: 10px;
|
||||
line-height: 1.4;
|
||||
">
|
||||
{% if value is iterable and value is not string %}
|
||||
{% if value|length == 1 %}
|
||||
{{ value[0] }}
|
||||
{% else %}
|
||||
[{% for item in value %}{{ item }}{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{{ value }}
|
||||
{% endif %}
|
||||
</div>
|
||||
{# Calculate summary stats #}
|
||||
{% set total_time = 0 %}
|
||||
{% set total_nodes = 0 %}
|
||||
{% set total_credits = 0 %}
|
||||
{% set agent_names = [] %}
|
||||
|
||||
{% for notification in notifications %}
|
||||
{% set total_time = total_time + notification.data.execution_time %}
|
||||
{% set total_nodes = total_nodes + notification.data.node_count %}
|
||||
{% set total_credits = total_credits + notification.data.credits_used %}
|
||||
{% if notification.data.agent_name not in agent_names %}
|
||||
{% set agent_names = agent_names + [notification.data.agent_name] %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
<div style="background-color: #f8f7ff; border-radius: 8px; padding: 15px; margin-bottom: 25px;">
|
||||
<h3 style="margin-top: 0; margin-bottom: 10px; color: #5D23BB;">Summary</h3>
|
||||
<p style="margin: 5px 0;"><strong>Agents:</strong> {{ agent_names|join(", ") }}</p>
|
||||
<p style="margin: 5px 0;"><strong>Total Time:</strong> {{ total_time | int }} seconds</p>
|
||||
<p style="margin: 5px 0;"><strong>Total Nodes:</strong> {{ total_nodes }}</p>
|
||||
<p style="margin: 5px 0;"><strong>Total Cost:</strong> ${{ "{:.2f}".format((total_credits|float)/100) }}</p>
|
||||
</div>
|
||||
|
||||
<h3 style="margin-top: 25px; margin-bottom: 15px; color: #5D23BB;">Individual Runs</h3>
|
||||
|
||||
{% for notification in notifications %}
|
||||
<div style="margin-bottom: 30px; border-left: 3px solid #5D23BB; padding-left: 15px;">
|
||||
<p style="font-size: 16px; font-weight: 600; margin-top: 0; margin-bottom: 10px;">
|
||||
Agent: <strong>{{ notification.data.agent_name }}</strong>
|
||||
</p>
|
||||
|
||||
<div style="margin-left: 10px;">
|
||||
<p style="margin: 5px 0;"><strong>Time:</strong> {{ notification.data.execution_time | int }} seconds</p>
|
||||
<p style="margin: 5px 0;"><strong>Nodes:</strong> {{ notification.data.node_count }}</p>
|
||||
<p style="margin: 5px 0;"><strong>Cost:</strong> ${{ "{:.2f}".format((notification.data.credits_used|float)/100) }}</p>
|
||||
</div>
|
||||
|
||||
{% if notification.data.outputs and notification.data.outputs|length > 0 %}
|
||||
<div style="margin-left: 10px; margin-top: 15px;">
|
||||
<p style="font-weight: 600; margin-bottom: 10px;">Results:</p>
|
||||
|
||||
{% for output in notification.data.outputs %}
|
||||
<div style="margin-left: 10px; margin-bottom: 12px;">
|
||||
<p style="color: #5D23BB; font-weight: 500; margin-top: 0; margin-bottom: 5px;">
|
||||
{{ output.name }}
|
||||
</p>
|
||||
|
||||
{% for key, value in output.items() %}
|
||||
{% if key != 'name' %}
|
||||
<div style="margin-left: 10px; background-color: #f5f5ff; padding: 8px 12px; border-radius: 4px;
|
||||
font-family: 'Roboto Mono', monospace; white-space: pre-wrap; word-break: break-word;
|
||||
overflow-wrap: break-word; max-width: 100%; overflow-x: auto; margin-top: 3px;
|
||||
margin-bottom: 8px; line-height: 1.4;">
|
||||
{% if value is iterable and value is not string %}
|
||||
{% if value|length == 1 %}
|
||||
{{ value[0] }}
|
||||
{% else %}
|
||||
[{% for item in value %}{{ item }}{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{{ value }}
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
|
||||
{% else %}
|
||||
{# SINGLE NOTIFICATION MODE - Original template #}
|
||||
<p style="font-family: 'Poppins', sans-serif; color: #070629; font-size: 16px; line-height: 165%;
|
||||
margin-top: 0; margin-bottom: 10px;">
|
||||
Your agent, <strong>{{ data.agent_name }}</strong>, has completed its run!
|
||||
</p>
|
||||
|
||||
<p style="font-family: 'Poppins', sans-serif; color: #070629; font-size: 16px; line-height: 165%;
|
||||
margin-top: 0; margin-bottom: 20px; padding-left: 20px;">
|
||||
<p style="margin-bottom: 10px;"><strong>Time Taken:</strong> {{ data.execution_time | int }} seconds</p>
|
||||
<p style="margin-bottom: 10px;"><strong>Nodes Used:</strong> {{ data.node_count }}</p>
|
||||
<p style="margin-bottom: 10px;"><strong>Cost:</strong> ${{ "{:.2f}".format((data.credits_used|float)/100) }}</p>
|
||||
</p>
|
||||
|
||||
{% if data.outputs and data.outputs|length > 0 %}
|
||||
<div style="margin-left: 15px; margin-bottom: 20px;">
|
||||
<p style="font-family: 'Poppins', sans-serif; color: #070629; font-weight: 600;
|
||||
font-size: 16px; margin-bottom: 10px;">
|
||||
Results:
|
||||
</p>
|
||||
|
||||
{% for output in data.outputs %}
|
||||
<div style="margin-left: 15px; margin-bottom: 15px;">
|
||||
<p style="font-family: 'Poppins', sans-serif; color: #5D23BB; font-weight: 500;
|
||||
font-size: 16px; margin-top: 0; margin-bottom: 8px;">
|
||||
{{ output.name }}
|
||||
</p>
|
||||
|
||||
{% for key, value in output.items() %}
|
||||
{% if key != 'name' %}
|
||||
<div style="margin-left: 15px; background-color: #f5f5ff; padding: 8px 12px; border-radius: 4px;
|
||||
font-family: 'Roboto Mono', monospace; white-space: pre-wrap; word-break: break-word;
|
||||
overflow-wrap: break-word; max-width: 100%; overflow-x: auto; margin-top: 5px;
|
||||
margin-bottom: 10px; line-height: 1.4;">
|
||||
{% if value is iterable and value is not string %}
|
||||
{% if value|length == 1 %}
|
||||
{{ value[0] }}
|
||||
{% else %}
|
||||
[{% for item in value %}{{ item }}{% if not loop.last %}, {% endif %}{% endfor %}]
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{{ value }}
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
{# Weekly Summary #}
|
||||
{# Template variables:
|
||||
data: the stuff below
|
||||
data.start_date: the start date of the summary
|
||||
data.end_date: the end date of the summary
|
||||
data.total_credits_used: the total credits used during the summary
|
||||
data.total_executions: the total number of executions during the summary
|
||||
data.most_used_agent: the most used agent's nameduring the summary
|
||||
data.total_execution_time: the total execution time during the summary
|
||||
data.successful_runs: the total number of successful runs during the summary
|
||||
data.failed_runs: the total number of failed runs during the summary
|
||||
data.average_execution_time: the average execution time during the summary
|
||||
data.cost_breakdown: the cost breakdown during the summary
|
||||
#}
|
||||
|
||||
<h1>Weekly Summary</h1>
|
||||
|
||||
<p>Start Date: {{ data.start_date }}</p>
|
||||
<p>End Date: {{ data.end_date }}</p>
|
||||
<p>Total Credits Used: {{ data.total_credits_used }}</p>
|
||||
<p>Total Executions: {{ data.total_executions }}</p>
|
||||
<p>Most Used Agent: {{ data.most_used_agent }}</p>
|
||||
<p>Total Execution Time: {{ data.total_execution_time }}</p>
|
||||
<p>Successful Runs: {{ data.successful_runs }}</p>
|
||||
<p>Failed Runs: {{ data.failed_runs }}</p>
|
||||
<p>Average Execution Time: {{ data.average_execution_time }}</p>
|
||||
<p>Cost Breakdown: {{ data.cost_breakdown }}</p>
|
||||
@@ -1,5 +1,5 @@
|
||||
from backend.app import run_processes
|
||||
from backend.executor import DatabaseManager, ExecutionScheduler
|
||||
from backend.executor import DatabaseManager, Scheduler
|
||||
from backend.notifications.notifications import NotificationManager
|
||||
from backend.server.rest_api import AgentServer
|
||||
|
||||
@@ -11,7 +11,7 @@ def main():
|
||||
run_processes(
|
||||
NotificationManager(),
|
||||
DatabaseManager(),
|
||||
ExecutionScheduler(),
|
||||
Scheduler(),
|
||||
AgentServer(),
|
||||
)
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import backend.server.routers.v1
|
||||
import backend.server.v2.library.db
|
||||
import backend.server.v2.library.model
|
||||
import backend.server.v2.library.routes
|
||||
import backend.server.v2.otto.routes
|
||||
import backend.server.v2.postmark.postmark
|
||||
import backend.server.v2.store.model
|
||||
import backend.server.v2.store.routes
|
||||
@@ -68,8 +69,7 @@ docs_url = (
|
||||
app = fastapi.FastAPI(
|
||||
title="AutoGPT Agent Server",
|
||||
description=(
|
||||
"This server is used to execute agents that are created by the "
|
||||
"AutoGPT system."
|
||||
"This server is used to execute agents that are created by the AutoGPT system."
|
||||
),
|
||||
summary="AutoGPT Agent Server",
|
||||
version="0.1",
|
||||
@@ -102,6 +102,10 @@ app.include_router(
|
||||
app.include_router(
|
||||
backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
|
||||
)
|
||||
app.include_router(
|
||||
backend.server.v2.otto.routes.router, tags=["v2"], prefix="/api/otto"
|
||||
)
|
||||
|
||||
app.include_router(
|
||||
backend.server.v2.postmark.postmark.router,
|
||||
tags=["v2", "email"],
|
||||
@@ -150,9 +154,10 @@ class AgentServer(backend.util.service.AppProcess):
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
user_id: str,
|
||||
for_export: bool = False,
|
||||
):
|
||||
return await backend.server.routers.v1.get_graph(
|
||||
graph_id, user_id, graph_version
|
||||
graph_id, user_id, graph_version, for_export
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -38,18 +38,23 @@ from backend.data.credit import (
|
||||
TransactionHistory,
|
||||
get_auto_top_up,
|
||||
get_block_costs,
|
||||
get_stripe_customer_id,
|
||||
get_user_credit_model,
|
||||
set_auto_top_up,
|
||||
)
|
||||
from backend.data.notifications import NotificationPreference, NotificationPreferenceDTO
|
||||
from backend.data.onboarding import (
|
||||
UserOnboardingUpdate,
|
||||
get_recommended_agents,
|
||||
get_user_onboarding,
|
||||
update_user_onboarding,
|
||||
)
|
||||
from backend.data.user import (
|
||||
get_or_create_user,
|
||||
get_user_notification_preference,
|
||||
update_user_email,
|
||||
update_user_notification_preference,
|
||||
)
|
||||
from backend.executor import ExecutionManager, ExecutionScheduler, scheduler
|
||||
from backend.executor import ExecutionManager, Scheduler, scheduler
|
||||
from backend.integrations.creds_manager import IntegrationCredentialsManager
|
||||
from backend.integrations.webhooks.graph_lifecycle_hooks import (
|
||||
on_graph_activate,
|
||||
@@ -78,8 +83,8 @@ def execution_manager_client() -> ExecutionManager:
|
||||
|
||||
|
||||
@thread_cached
|
||||
def execution_scheduler_client() -> ExecutionScheduler:
|
||||
return get_service_client(ExecutionScheduler)
|
||||
def execution_scheduler_client() -> Scheduler:
|
||||
return get_service_client(Scheduler)
|
||||
|
||||
|
||||
settings = Settings()
|
||||
@@ -152,6 +157,38 @@ async def update_preferences(
|
||||
return output
|
||||
|
||||
|
||||
########################################################
|
||||
##################### Onboarding #######################
|
||||
########################################################
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding", tags=["onboarding"], dependencies=[Depends(auth_middleware)]
|
||||
)
|
||||
async def get_onboarding(user_id: Annotated[str, Depends(get_user_id)]):
|
||||
return await get_user_onboarding(user_id)
|
||||
|
||||
|
||||
@v1_router.patch(
|
||||
"/onboarding", tags=["onboarding"], dependencies=[Depends(auth_middleware)]
|
||||
)
|
||||
async def update_onboarding(
|
||||
user_id: Annotated[str, Depends(get_user_id)], data: UserOnboardingUpdate
|
||||
):
|
||||
return await update_user_onboarding(user_id, data)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
"/onboarding/agents",
|
||||
tags=["onboarding"],
|
||||
dependencies=[Depends(auth_middleware)],
|
||||
)
|
||||
async def get_onboarding_agents(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
):
|
||||
return await get_recommended_agents(user_id)
|
||||
|
||||
|
||||
########################################################
|
||||
##################### Blocks ###########################
|
||||
########################################################
|
||||
@@ -303,15 +340,7 @@ async def stripe_webhook(request: Request):
|
||||
async def manage_payment_method(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> dict[str, str]:
|
||||
session = stripe.billing_portal.Session.create(
|
||||
customer=await get_stripe_customer_id(user_id),
|
||||
return_url=settings.config.frontend_base_url + "/profile/credits",
|
||||
)
|
||||
if not session:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Failed to create billing portal session"
|
||||
)
|
||||
return {"url": session.url}
|
||||
return {"url": await _user_credit_model.create_billing_portal_session(user_id)}
|
||||
|
||||
|
||||
@v1_router.get(path="/credits/transactions", dependencies=[Depends(auth_middleware)])
|
||||
@@ -367,10 +396,10 @@ async def get_graph(
|
||||
graph_id: str,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
version: int | None = None,
|
||||
hide_credentials: bool = False,
|
||||
for_export: bool = False,
|
||||
) -> graph_db.GraphModel:
|
||||
graph = await graph_db.get_graph(
|
||||
graph_id, version, user_id=user_id, for_export=hide_credentials
|
||||
graph_id, version, user_id=user_id, for_export=for_export
|
||||
)
|
||||
if not graph:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
@@ -400,6 +429,7 @@ async def create_new_graph(
|
||||
) -> graph_db.GraphModel:
|
||||
graph = graph_db.make_graph_model(create_graph.graph, user_id)
|
||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||
graph.validate_graph(for_run=False)
|
||||
|
||||
graph = await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
@@ -451,17 +481,10 @@ async def update_graph(
|
||||
latest_version_number = max(g.version for g in existing_versions)
|
||||
graph.version = latest_version_number + 1
|
||||
|
||||
latest_version_graph = next(
|
||||
v for v in existing_versions if v.version == latest_version_number
|
||||
)
|
||||
current_active_version = next((v for v in existing_versions if v.is_active), None)
|
||||
if latest_version_graph.is_template != graph.is_template:
|
||||
raise HTTPException(
|
||||
400, detail="Changing is_template on an existing graph is forbidden"
|
||||
)
|
||||
graph.is_active = not graph.is_template
|
||||
graph = graph_db.make_graph_model(graph, user_id)
|
||||
graph.reassign_ids(user_id=user_id)
|
||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||
graph.validate_graph(for_run=False)
|
||||
|
||||
new_graph_version = await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
@@ -592,7 +615,7 @@ async def stop_graph_run(
|
||||
async def get_graphs_executions(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> list[graph_db.GraphExecutionMeta]:
|
||||
return await graph_db.get_graphs_executions(user_id=user_id)
|
||||
return await graph_db.get_graph_executions(user_id=user_id)
|
||||
|
||||
|
||||
@v1_router.get(
|
||||
@@ -663,7 +686,7 @@ class ScheduleCreationRequest(pydantic.BaseModel):
|
||||
async def create_schedule(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
schedule: ScheduleCreationRequest,
|
||||
) -> scheduler.JobInfo:
|
||||
) -> scheduler.ExecutionJobInfo:
|
||||
graph = await graph_db.get_graph(
|
||||
schedule.graph_id, schedule.graph_version, user_id=user_id
|
||||
)
|
||||
@@ -705,7 +728,7 @@ def delete_schedule(
|
||||
def get_execution_schedules(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
graph_id: str | None = None,
|
||||
) -> list[scheduler.JobInfo]:
|
||||
) -> list[scheduler.ExecutionJobInfo]:
|
||||
return execution_scheduler_client().get_execution_schedules(
|
||||
user_id=user_id,
|
||||
graph_id=graph_id,
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
@@ -9,12 +8,13 @@ import prisma.models
|
||||
import prisma.types
|
||||
|
||||
import backend.data.graph
|
||||
import backend.data.includes
|
||||
import backend.server.model
|
||||
import backend.server.v2.library.model as library_model
|
||||
import backend.server.v2.store.exceptions as store_exceptions
|
||||
import backend.server.v2.store.image_gen as store_image_gen
|
||||
import backend.server.v2.store.media as store_media
|
||||
from backend.data.db import locked_transaction
|
||||
from backend.data.includes import library_agent_include
|
||||
from backend.util.settings import Config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -92,15 +92,7 @@ async def list_library_agents(
|
||||
try:
|
||||
library_agents = await prisma.models.LibraryAgent.prisma().find_many(
|
||||
where=where_clause,
|
||||
include={
|
||||
"Agent": {
|
||||
"include": {
|
||||
**backend.data.includes.AGENT_GRAPH_INCLUDE,
|
||||
"AgentGraphExecution": {"where": {"userId": user_id}},
|
||||
}
|
||||
},
|
||||
"Creator": True,
|
||||
},
|
||||
include=library_agent_include(user_id),
|
||||
order=order_by,
|
||||
skip=(page - 1) * page_size,
|
||||
take=page_size,
|
||||
@@ -112,10 +104,24 @@ async def list_library_agents(
|
||||
logger.debug(
|
||||
f"Retrieved {len(library_agents)} library agents for user #{user_id}"
|
||||
)
|
||||
|
||||
# Only pass valid agents to the response
|
||||
valid_library_agents: list[library_model.LibraryAgent] = []
|
||||
|
||||
for agent in library_agents:
|
||||
try:
|
||||
library_agent = library_model.LibraryAgent.from_db(agent)
|
||||
valid_library_agents.append(library_agent)
|
||||
except Exception as e:
|
||||
# Skip this agent if there was an error
|
||||
logger.error(
|
||||
f"Error parsing LibraryAgent when getting library agents from db: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
# Return the response with only valid agents
|
||||
return library_model.LibraryAgentResponse(
|
||||
agents=[
|
||||
library_model.LibraryAgent.from_db(agent) for agent in library_agents
|
||||
],
|
||||
agents=valid_library_agents,
|
||||
pagination=backend.server.model.Pagination(
|
||||
total_items=agent_count,
|
||||
total_pages=(agent_count + page_size - 1) // page_size,
|
||||
@@ -151,15 +157,7 @@ async def get_library_agent(id: str, user_id: str) -> library_model.LibraryAgent
|
||||
"userId": user_id,
|
||||
"isDeleted": False,
|
||||
},
|
||||
include={
|
||||
"Agent": {
|
||||
"include": {
|
||||
**backend.data.includes.AGENT_GRAPH_INCLUDE,
|
||||
"AgentGraphExecution": {"where": {"userId": user_id}},
|
||||
}
|
||||
},
|
||||
"Creator": True,
|
||||
},
|
||||
include=library_agent_include(user_id),
|
||||
)
|
||||
|
||||
if not library_agent:
|
||||
@@ -187,12 +185,7 @@ async def add_generated_agent_image(
|
||||
try:
|
||||
if not (image_url := await store_media.check_media_exists(user_id, filename)):
|
||||
# Generate agent image as JPEG
|
||||
if config.use_agent_image_generation_v2:
|
||||
image = await asyncio.to_thread(
|
||||
store_image_gen.generate_agent_image_v2, graph=graph
|
||||
)
|
||||
else:
|
||||
image = await store_image_gen.generate_agent_image(agent=graph)
|
||||
image = await store_image_gen.generate_agent_image(graph)
|
||||
|
||||
# Create UploadFile with the correct filename and content_type
|
||||
image_file = fastapi.UploadFile(file=image, filename=filename)
|
||||
@@ -378,51 +371,67 @@ async def add_store_agent_to_library(
|
||||
)
|
||||
|
||||
try:
|
||||
store_listing_version = (
|
||||
await prisma.models.StoreListingVersion.prisma().find_unique(
|
||||
where={"id": store_listing_version_id}, include={"Agent": True}
|
||||
)
|
||||
)
|
||||
if not store_listing_version or not store_listing_version.Agent:
|
||||
logger.warning(
|
||||
f"Store listing version not found: {store_listing_version_id}"
|
||||
)
|
||||
raise store_exceptions.AgentNotFoundError(
|
||||
f"Store listing version {store_listing_version_id} not found or invalid"
|
||||
async with locked_transaction(f"user_trx_{user_id}"):
|
||||
store_listing_version = (
|
||||
await prisma.models.StoreListingVersion.prisma().find_unique(
|
||||
where={"id": store_listing_version_id}, include={"Agent": True}
|
||||
)
|
||||
)
|
||||
if not store_listing_version or not store_listing_version.Agent:
|
||||
logger.warning(
|
||||
f"Store listing version not found: {store_listing_version_id}"
|
||||
)
|
||||
raise store_exceptions.AgentNotFoundError(
|
||||
f"Store listing version {store_listing_version_id} not found or invalid"
|
||||
)
|
||||
|
||||
store_agent = store_listing_version.Agent
|
||||
if store_agent.userId == user_id:
|
||||
logger.warning(
|
||||
f"User #{user_id} attempted to add their own agent to their library"
|
||||
)
|
||||
raise store_exceptions.DatabaseError("Cannot add own agent to library")
|
||||
graph = store_listing_version.Agent
|
||||
if graph.userId == user_id:
|
||||
logger.warning(
|
||||
f"User #{user_id} attempted to add their own agent to their library"
|
||||
)
|
||||
raise store_exceptions.DatabaseError("Cannot add own agent to library")
|
||||
|
||||
# Check if user already has this agent
|
||||
existing_library_agent = await prisma.models.LibraryAgent.prisma().find_first(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentId": store_agent.id,
|
||||
"agentVersion": store_agent.version,
|
||||
}
|
||||
)
|
||||
if existing_library_agent:
|
||||
# Check if user already has this agent
|
||||
existing_library_agent = (
|
||||
await prisma.models.LibraryAgent.prisma().find_first(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentId": graph.id,
|
||||
"agentVersion": graph.version,
|
||||
},
|
||||
include=library_agent_include(user_id),
|
||||
)
|
||||
)
|
||||
if existing_library_agent:
|
||||
if existing_library_agent.isDeleted:
|
||||
# Even if agent exists it needs to be marked as not deleted
|
||||
await set_is_deleted_for_library_agent(
|
||||
user_id, graph.id, graph.version, False
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"User #{user_id} already has graph #{graph.id} "
|
||||
"in their library"
|
||||
)
|
||||
return library_model.LibraryAgent.from_db(existing_library_agent)
|
||||
|
||||
# Create LibraryAgent entry
|
||||
added_agent = await prisma.models.LibraryAgent.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"agentId": graph.id,
|
||||
"agentVersion": graph.version,
|
||||
"isCreatedByUser": False,
|
||||
},
|
||||
include=library_agent_include(user_id),
|
||||
)
|
||||
logger.debug(
|
||||
f"User #{user_id} already has agent #{store_agent.id} in their library"
|
||||
f"Added graph #{graph.id} "
|
||||
f"for store listing #{store_listing_version.id} "
|
||||
f"to library for user #{user_id}"
|
||||
)
|
||||
return library_model.LibraryAgent.from_db(existing_library_agent)
|
||||
|
||||
# Create LibraryAgent entry
|
||||
added_agent = await prisma.models.LibraryAgent.prisma().create(
|
||||
data={
|
||||
"userId": user_id,
|
||||
"agentId": store_agent.id,
|
||||
"agentVersion": store_agent.version,
|
||||
"isCreatedByUser": False,
|
||||
}
|
||||
)
|
||||
logger.debug(f"Added agent #{store_agent.id} to library for user #{user_id}")
|
||||
return library_model.LibraryAgent.from_db(added_agent)
|
||||
return library_model.LibraryAgent.from_db(added_agent)
|
||||
|
||||
except store_exceptions.AgentNotFoundError:
|
||||
# Reraise for external handling.
|
||||
@@ -432,6 +441,45 @@ async def add_store_agent_to_library(
|
||||
raise store_exceptions.DatabaseError("Failed to add agent to library") from e
|
||||
|
||||
|
||||
async def set_is_deleted_for_library_agent(
|
||||
user_id: str, agent_id: str, agent_version: int, is_deleted: bool
|
||||
) -> None:
|
||||
"""
|
||||
Changes the isDeleted flag for a library agent.
|
||||
|
||||
Args:
|
||||
user_id: The user's library from which the agent is being removed.
|
||||
agent_id: The ID of the agent to remove.
|
||||
agent_version: The version of the agent to remove.
|
||||
is_deleted: Whether the agent is being marked as deleted.
|
||||
|
||||
Raises:
|
||||
DatabaseError: If there's an issue updating the Library
|
||||
"""
|
||||
logger.debug(
|
||||
f"Setting isDeleted={is_deleted} for agent {agent_id} v{agent_version} "
|
||||
f"in library for user {user_id}"
|
||||
)
|
||||
try:
|
||||
logger.warning(
|
||||
f"Setting isDeleted={is_deleted} for agent {agent_id} v{agent_version} in library for user {user_id}"
|
||||
)
|
||||
count = await prisma.models.LibraryAgent.prisma().update_many(
|
||||
where={
|
||||
"userId": user_id,
|
||||
"agentId": agent_id,
|
||||
"agentVersion": agent_version,
|
||||
},
|
||||
data={"isDeleted": is_deleted},
|
||||
)
|
||||
logger.warning(f"Updated {count} isDeleted library agents")
|
||||
except prisma.errors.PrismaError as e:
|
||||
logger.error(f"Database error setting agent isDeleted: {e}")
|
||||
raise store_exceptions.DatabaseError(
|
||||
"Failed to set agent isDeleted in library"
|
||||
) from e
|
||||
|
||||
|
||||
##############################################
|
||||
########### Presets DB Functions #############
|
||||
##############################################
|
||||
|
||||
@@ -3,20 +3,11 @@ from datetime import datetime
|
||||
import prisma.errors
|
||||
import prisma.models
|
||||
import pytest
|
||||
from prisma import Prisma
|
||||
|
||||
import backend.server.v2.library.db as db
|
||||
import backend.server.v2.store.exceptions
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
async def setup_prisma():
|
||||
# Don't register client if already registered
|
||||
try:
|
||||
Prisma()
|
||||
except prisma.errors.ClientAlreadyRegisteredError:
|
||||
pass
|
||||
yield
|
||||
from backend.data.db import connect
|
||||
from backend.data.includes import library_agent_include
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -31,7 +22,6 @@ async def test_get_library_agents(mocker):
|
||||
userId="test-user",
|
||||
isActive=True,
|
||||
createdAt=datetime.now(),
|
||||
isTemplate=False,
|
||||
)
|
||||
]
|
||||
|
||||
@@ -56,7 +46,6 @@ async def test_get_library_agents(mocker):
|
||||
userId="other-user",
|
||||
isActive=True,
|
||||
createdAt=datetime.now(),
|
||||
isTemplate=False,
|
||||
),
|
||||
)
|
||||
]
|
||||
@@ -91,10 +80,11 @@ async def test_get_library_agents(mocker):
|
||||
assert result.pagination.page_size == 50
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_add_agent_to_library(mocker):
|
||||
await connect()
|
||||
# Mock data
|
||||
mock_store_listing = prisma.models.StoreListingVersion(
|
||||
mock_store_listing_data = prisma.models.StoreListingVersion(
|
||||
id="version123",
|
||||
version=1,
|
||||
createdAt=datetime.now(),
|
||||
@@ -119,21 +109,37 @@ async def test_add_agent_to_library(mocker):
|
||||
userId="creator",
|
||||
isActive=True,
|
||||
createdAt=datetime.now(),
|
||||
isTemplate=False,
|
||||
),
|
||||
)
|
||||
|
||||
mock_library_agent_data = prisma.models.LibraryAgent(
|
||||
id="ua1",
|
||||
userId="test-user",
|
||||
agentId=mock_store_listing_data.agentId,
|
||||
agentVersion=1,
|
||||
isCreatedByUser=False,
|
||||
isDeleted=False,
|
||||
isArchived=False,
|
||||
createdAt=datetime.now(),
|
||||
updatedAt=datetime.now(),
|
||||
isFavorite=False,
|
||||
useGraphIsActiveVersion=True,
|
||||
Agent=mock_store_listing_data.Agent,
|
||||
)
|
||||
|
||||
# Mock prisma calls
|
||||
mock_store_listing_version = mocker.patch(
|
||||
"prisma.models.StoreListingVersion.prisma"
|
||||
)
|
||||
mock_store_listing_version.return_value.find_unique = mocker.AsyncMock(
|
||||
return_value=mock_store_listing
|
||||
return_value=mock_store_listing_data
|
||||
)
|
||||
|
||||
mock_library_agent = mocker.patch("prisma.models.LibraryAgent.prisma")
|
||||
mock_library_agent.return_value.find_first = mocker.AsyncMock(return_value=None)
|
||||
mock_library_agent.return_value.create = mocker.AsyncMock()
|
||||
mock_library_agent.return_value.create = mocker.AsyncMock(
|
||||
return_value=mock_library_agent_data
|
||||
)
|
||||
|
||||
# Call function
|
||||
await db.add_store_agent_to_library("version123", "test-user")
|
||||
@@ -147,17 +153,20 @@ async def test_add_agent_to_library(mocker):
|
||||
"userId": "test-user",
|
||||
"agentId": "agent1",
|
||||
"agentVersion": 1,
|
||||
}
|
||||
},
|
||||
include=library_agent_include("test-user"),
|
||||
)
|
||||
mock_library_agent.return_value.create.assert_called_once_with(
|
||||
data=prisma.types.LibraryAgentCreateInput(
|
||||
userId="test-user", agentId="agent1", agentVersion=1, isCreatedByUser=False
|
||||
)
|
||||
),
|
||||
include=library_agent_include("test-user"),
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_add_agent_to_library_not_found(mocker):
|
||||
await connect()
|
||||
# Mock prisma calls
|
||||
mock_store_listing_version = mocker.patch(
|
||||
"prisma.models.StoreListingVersion.prisma"
|
||||
|
||||
@@ -2,11 +2,14 @@ import datetime
|
||||
|
||||
import prisma.fields
|
||||
import prisma.models
|
||||
import pytest
|
||||
|
||||
import backend.server.v2.library.model as library_model
|
||||
from backend.util import json
|
||||
|
||||
|
||||
def test_agent_preset_from_db():
|
||||
@pytest.mark.asyncio
|
||||
async def test_agent_preset_from_db():
|
||||
# Create mock DB agent
|
||||
db_agent = prisma.models.AgentPreset(
|
||||
id="test-agent-123",
|
||||
@@ -24,7 +27,7 @@ def test_agent_preset_from_db():
|
||||
id="input-123",
|
||||
time=datetime.datetime.now(),
|
||||
name="input1",
|
||||
data=prisma.fields.Json({"type": "string", "value": "test value"}),
|
||||
data=json.dumps({"type": "string", "value": "test value"}), # type: ignore
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
@@ -106,7 +106,7 @@ async def add_marketplace_agent_to_library(
|
||||
user_id: ID of the authenticated user.
|
||||
|
||||
Returns:
|
||||
201 (Created) on success.
|
||||
library_model.LibraryAgent: Agent added to the library
|
||||
|
||||
Raises:
|
||||
HTTPException(404): If the listing version is not found.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import datetime
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
@@ -30,49 +29,48 @@ app.dependency_overrides[autogpt_auth_lib.auth_middleware] = override_auth_middl
|
||||
app.dependency_overrides[autogpt_auth_lib.depends.get_user_id] = override_get_user_id
|
||||
|
||||
|
||||
def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
|
||||
mocked_value = [
|
||||
library_model.LibraryAgentResponse(
|
||||
agents=[
|
||||
library_model.LibraryAgent(
|
||||
id="test-agent-1",
|
||||
agent_id="test-agent-1",
|
||||
agent_version=1,
|
||||
name="Test Agent 1",
|
||||
description="Test Description 1",
|
||||
image_url=None,
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
new_output=False,
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
library_model.LibraryAgent(
|
||||
id="test-agent-2",
|
||||
agent_id="test-agent-2",
|
||||
agent_version=1,
|
||||
name="Test Agent 2",
|
||||
description="Test Description 2",
|
||||
image_url=None,
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
new_output=False,
|
||||
can_access_graph=False,
|
||||
is_latest_version=True,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
],
|
||||
pagination=server_model.Pagination(
|
||||
total_items=2, total_pages=1, current_page=1, page_size=50
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
|
||||
mocked_value = library_model.LibraryAgentResponse(
|
||||
agents=[
|
||||
library_model.LibraryAgent(
|
||||
id="test-agent-1",
|
||||
agent_id="test-agent-1",
|
||||
agent_version=1,
|
||||
name="Test Agent 1",
|
||||
description="Test Description 1",
|
||||
image_url=None,
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
new_output=False,
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
library_model.LibraryAgent(
|
||||
id="test-agent-2",
|
||||
agent_id="test-agent-2",
|
||||
agent_version=1,
|
||||
name="Test Agent 2",
|
||||
description="Test Description 2",
|
||||
image_url=None,
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
new_output=False,
|
||||
can_access_graph=False,
|
||||
is_latest_version=True,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
],
|
||||
pagination=server_model.Pagination(
|
||||
total_items=2, total_pages=1, current_page=1, page_size=50
|
||||
),
|
||||
]
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
|
||||
)
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.list_library_agents")
|
||||
mock_db_call.return_value = mocked_value
|
||||
|
||||
response = client.get("/agents?search_term=test")
|
||||
@@ -94,7 +92,7 @@ def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
|
||||
|
||||
|
||||
def test_get_library_agents_error(mocker: pytest_mock.MockFixture):
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.list_library_agents")
|
||||
mock_db_call.side_effect = Exception("Test error")
|
||||
|
||||
response = client.get("/agents?search_term=test")
|
||||
|
||||
34
autogpt_platform/backend/backend/server/v2/otto/models.py
Normal file
34
autogpt_platform/backend/backend/server/v2/otto/models.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class Document(BaseModel):
|
||||
url: str
|
||||
relevance_score: float
|
||||
|
||||
|
||||
class ApiResponse(BaseModel):
|
||||
answer: str
|
||||
documents: list[Document]
|
||||
success: bool
|
||||
|
||||
|
||||
class GraphData(BaseModel):
|
||||
nodes: list[Dict[str, Any]]
|
||||
edges: list[Dict[str, Any]]
|
||||
graph_name: Optional[str] = None
|
||||
graph_description: Optional[str] = None
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
query: str
|
||||
response: str
|
||||
|
||||
|
||||
class ChatRequest(BaseModel):
|
||||
query: str
|
||||
conversation_history: list[Message]
|
||||
message_id: str
|
||||
include_graph_data: bool = False
|
||||
graph_id: Optional[str] = None
|
||||
26
autogpt_platform/backend/backend/server/v2/otto/routes.py
Normal file
26
autogpt_platform/backend/backend/server/v2/otto/routes.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import logging
|
||||
|
||||
from autogpt_libs.auth.middleware import auth_middleware
|
||||
from fastapi import APIRouter, Depends
|
||||
|
||||
from backend.server.utils import get_user_id
|
||||
|
||||
from .models import ApiResponse, ChatRequest
|
||||
from .service import OttoService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/ask", response_model=ApiResponse, dependencies=[Depends(auth_middleware)]
|
||||
)
|
||||
async def proxy_otto_request(
|
||||
request: ChatRequest, user_id: str = Depends(get_user_id)
|
||||
) -> ApiResponse:
|
||||
"""
|
||||
Proxy requests to Otto API while adding necessary security headers and logging.
|
||||
Requires an authenticated user.
|
||||
"""
|
||||
return await OttoService.ask(request, user_id)
|
||||
138
autogpt_platform/backend/backend/server/v2/otto/service.py
Normal file
138
autogpt_platform/backend/backend/server/v2/otto/service.py
Normal file
@@ -0,0 +1,138 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import aiohttp
|
||||
from fastapi import HTTPException
|
||||
|
||||
from backend.data import graph as graph_db
|
||||
from backend.data.block import get_block
|
||||
from backend.util.settings import Settings
|
||||
|
||||
from .models import ApiResponse, ChatRequest, GraphData
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
settings = Settings()
|
||||
|
||||
OTTO_API_URL = settings.config.otto_api_url
|
||||
|
||||
|
||||
class OttoService:
|
||||
@staticmethod
|
||||
async def _fetch_graph_data(
|
||||
request: ChatRequest, user_id: str
|
||||
) -> Optional[GraphData]:
|
||||
"""Fetch graph data if requested and available."""
|
||||
if not (request.include_graph_data and request.graph_id):
|
||||
return None
|
||||
|
||||
try:
|
||||
graph = await graph_db.get_graph(request.graph_id, user_id=user_id)
|
||||
if not graph:
|
||||
return None
|
||||
|
||||
nodes_data = []
|
||||
for node in graph.nodes:
|
||||
block = get_block(node.block_id)
|
||||
if not block:
|
||||
continue
|
||||
|
||||
node_data = {
|
||||
"id": node.id,
|
||||
"block_id": node.block_id,
|
||||
"block_name": block.name,
|
||||
"block_type": (
|
||||
block.block_type.value if hasattr(block, "block_type") else None
|
||||
),
|
||||
"data": {
|
||||
k: v
|
||||
for k, v in (node.input_default or {}).items()
|
||||
if k not in ["credentials"] # Exclude sensitive data
|
||||
},
|
||||
}
|
||||
nodes_data.append(node_data)
|
||||
|
||||
# Create a GraphData object with the required fields
|
||||
return GraphData(
|
||||
nodes=nodes_data,
|
||||
edges=[],
|
||||
graph_name=graph.name,
|
||||
graph_description=graph.description,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch graph data: {str(e)}")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def ask(request: ChatRequest, user_id: str) -> ApiResponse:
|
||||
"""
|
||||
Send request to Otto API and handle the response.
|
||||
"""
|
||||
# Check if Otto API URL is configured
|
||||
if not OTTO_API_URL:
|
||||
logger.error("Otto API URL is not configured")
|
||||
raise HTTPException(
|
||||
status_code=503, detail="Otto service is not configured"
|
||||
)
|
||||
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
# If graph data is requested, fetch it
|
||||
graph_data = await OttoService._fetch_graph_data(request, user_id)
|
||||
|
||||
# Prepare the payload with optional graph data
|
||||
payload = {
|
||||
"query": request.query,
|
||||
"conversation_history": [
|
||||
msg.model_dump() for msg in request.conversation_history
|
||||
],
|
||||
"user_id": user_id,
|
||||
"message_id": request.message_id,
|
||||
}
|
||||
|
||||
if graph_data:
|
||||
payload["graph_data"] = graph_data.model_dump()
|
||||
|
||||
logger.info(f"Sending request to Otto API for user {user_id}")
|
||||
logger.debug(f"Request payload: {payload}")
|
||||
|
||||
async with session.post(
|
||||
OTTO_API_URL,
|
||||
json=payload,
|
||||
headers=headers,
|
||||
timeout=aiohttp.ClientTimeout(total=60),
|
||||
) as response:
|
||||
if response.status != 200:
|
||||
error_text = await response.text()
|
||||
logger.error(f"Otto API error: {error_text}")
|
||||
raise HTTPException(
|
||||
status_code=response.status,
|
||||
detail=f"Otto API request failed: {error_text}",
|
||||
)
|
||||
|
||||
data = await response.json()
|
||||
logger.info(
|
||||
f"Successfully received response from Otto API for user {user_id}"
|
||||
)
|
||||
return ApiResponse(**data)
|
||||
|
||||
except aiohttp.ClientError as e:
|
||||
logger.error(f"Connection error to Otto API: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=503, detail="Failed to connect to Otto service"
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.error("Timeout error connecting to Otto API after 60 seconds")
|
||||
raise HTTPException(
|
||||
status_code=504, detail="Request to Otto service timed out"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in Otto API proxy: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500, detail="Internal server error in Otto proxy"
|
||||
)
|
||||
@@ -2,9 +2,14 @@ import logging
|
||||
from typing import Annotated
|
||||
|
||||
from autogpt_libs.auth.middleware import APIKeyValidator
|
||||
from fastapi import APIRouter, Body, Depends
|
||||
from fastapi import APIRouter, Body, Depends, Query
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from backend.data.user import get_user_by_email, set_user_email_verification
|
||||
from backend.data.user import (
|
||||
get_user_by_email,
|
||||
set_user_email_verification,
|
||||
unsubscribe_user_by_token,
|
||||
)
|
||||
from backend.server.v2.postmark.models import (
|
||||
PostmarkBounceEnum,
|
||||
PostmarkBounceWebhook,
|
||||
@@ -23,13 +28,24 @@ postmark_validator = APIKeyValidator(
|
||||
settings.secrets.postmark_webhook_token,
|
||||
)
|
||||
|
||||
router = APIRouter(dependencies=[Depends(postmark_validator.get_dependency())])
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@router.post("/")
|
||||
@router.post("/unsubscribe")
|
||||
async def unsubscribe_via_one_click(token: Annotated[str, Query()]):
|
||||
logger.info(f"Received unsubscribe request from One Click Unsubscribe: {token}")
|
||||
try:
|
||||
await unsubscribe_user_by_token(token)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to unsubscribe user by token {token}: {e}")
|
||||
raise e
|
||||
return JSONResponse(status_code=200, content={"status": "ok"})
|
||||
|
||||
|
||||
@router.post("/", dependencies=[Depends(postmark_validator.get_dependency())])
|
||||
async def postmark_webhook_handler(
|
||||
webhook: Annotated[
|
||||
PostmarkWebhook,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
import fastapi
|
||||
import prisma.enums
|
||||
@@ -84,20 +83,30 @@ async def get_store_agents(
|
||||
)
|
||||
total_pages = (total + page_size - 1) // page_size
|
||||
|
||||
store_agents = [
|
||||
backend.server.v2.store.model.StoreAgent(
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_image=agent.agent_image[0] if agent.agent_image else "",
|
||||
creator=agent.creator_username or "Needs Profile",
|
||||
creator_avatar=agent.creator_avatar or "",
|
||||
sub_heading=agent.sub_heading,
|
||||
description=agent.description,
|
||||
runs=agent.runs,
|
||||
rating=agent.rating,
|
||||
)
|
||||
for agent in agents
|
||||
]
|
||||
store_agents: list[backend.server.v2.store.model.StoreAgent] = []
|
||||
for agent in agents:
|
||||
try:
|
||||
# Create the StoreAgent object safely
|
||||
store_agent = backend.server.v2.store.model.StoreAgent(
|
||||
slug=agent.slug,
|
||||
agent_name=agent.agent_name,
|
||||
agent_image=agent.agent_image[0] if agent.agent_image else "",
|
||||
creator=agent.creator_username or "Needs Profile",
|
||||
creator_avatar=agent.creator_avatar or "",
|
||||
sub_heading=agent.sub_heading,
|
||||
description=agent.description,
|
||||
runs=agent.runs,
|
||||
rating=agent.rating,
|
||||
)
|
||||
# Add to the list only if creation was successful
|
||||
store_agents.append(store_agent)
|
||||
except Exception as e:
|
||||
# Skip this agent if there was an error
|
||||
# You could log the error here if needed
|
||||
logger.error(
|
||||
f"Error parsing Store agent when getting store agents from db: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
logger.debug(f"Found {len(store_agents)} agents")
|
||||
return backend.server.v2.store.model.StoreAgentsResponse(
|
||||
@@ -696,45 +705,35 @@ async def get_my_agents(
|
||||
logger.debug(f"Getting my agents for user {user_id}, page={page}")
|
||||
|
||||
try:
|
||||
agents_with_max_version = await prisma.models.AgentGraph.prisma().find_many(
|
||||
where=prisma.types.AgentGraphWhereInput(
|
||||
userId=user_id, StoreListing={"none": {"isDeleted": False}}
|
||||
),
|
||||
order=[{"version": "desc"}],
|
||||
distinct=["id"],
|
||||
search_filter: prisma.types.LibraryAgentWhereInput = {
|
||||
"userId": user_id,
|
||||
"Agent": {"is": {"StoreListing": {"none": {"isDeleted": False}}}},
|
||||
"isArchived": False,
|
||||
"isDeleted": False,
|
||||
}
|
||||
|
||||
library_agents = await prisma.models.LibraryAgent.prisma().find_many(
|
||||
where=search_filter,
|
||||
order=[{"agentVersion": "desc"}],
|
||||
skip=(page - 1) * page_size,
|
||||
take=page_size,
|
||||
include={"Agent": True},
|
||||
)
|
||||
|
||||
# store_listings = await prisma.models.StoreListing.prisma().find_many(
|
||||
# where=prisma.types.StoreListingWhereInput(
|
||||
# isDeleted=False,
|
||||
# ),
|
||||
# )
|
||||
|
||||
total = len(
|
||||
await prisma.models.AgentGraph.prisma().find_many(
|
||||
where=prisma.types.AgentGraphWhereInput(
|
||||
userId=user_id, StoreListing={"none": {"isDeleted": False}}
|
||||
),
|
||||
order=[{"version": "desc"}],
|
||||
distinct=["id"],
|
||||
)
|
||||
)
|
||||
|
||||
total = await prisma.models.LibraryAgent.prisma().count(where=search_filter)
|
||||
total_pages = (total + page_size - 1) // page_size
|
||||
|
||||
agents = agents_with_max_version
|
||||
|
||||
my_agents = [
|
||||
backend.server.v2.store.model.MyAgent(
|
||||
agent_id=agent.id,
|
||||
agent_version=agent.version,
|
||||
agent_name=agent.name or "",
|
||||
last_edited=agent.updatedAt or agent.createdAt,
|
||||
description=agent.description or "",
|
||||
agent_id=graph.id,
|
||||
agent_version=graph.version,
|
||||
agent_name=graph.name or "",
|
||||
last_edited=graph.updatedAt or graph.createdAt,
|
||||
description=graph.description or "",
|
||||
agent_image=library_agent.imageUrl,
|
||||
)
|
||||
for agent in agents
|
||||
for library_agent in library_agents
|
||||
if (graph := library_agent.Agent)
|
||||
]
|
||||
|
||||
return backend.server.v2.store.model.MyAgentsResponse(
|
||||
@@ -754,47 +753,31 @@ async def get_my_agents(
|
||||
|
||||
|
||||
async def get_agent(
|
||||
store_listing_version_id: str, version_id: Optional[int]
|
||||
user_id: str,
|
||||
store_listing_version_id: str,
|
||||
) -> GraphModel:
|
||||
"""Get agent using the version ID and store listing version ID."""
|
||||
try:
|
||||
store_listing_version = (
|
||||
await prisma.models.StoreListingVersion.prisma().find_unique(
|
||||
where={"id": store_listing_version_id}, include={"Agent": True}
|
||||
)
|
||||
store_listing_version = (
|
||||
await prisma.models.StoreListingVersion.prisma().find_unique(
|
||||
where={"id": store_listing_version_id}
|
||||
)
|
||||
)
|
||||
|
||||
if not store_listing_version:
|
||||
raise ValueError(f"Store listing version {store_listing_version_id} not found")
|
||||
|
||||
graph = await backend.data.graph.get_graph(
|
||||
user_id=user_id,
|
||||
graph_id=store_listing_version.agentId,
|
||||
version=store_listing_version.agentVersion,
|
||||
for_export=True,
|
||||
)
|
||||
if not graph:
|
||||
raise ValueError(
|
||||
f"Agent {store_listing_version.agentId} v{store_listing_version.agentVersion} not found"
|
||||
)
|
||||
|
||||
if not store_listing_version or not store_listing_version.Agent:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Store listing version {store_listing_version_id} not found",
|
||||
)
|
||||
|
||||
graph_id = store_listing_version.agentId
|
||||
graph_version = store_listing_version.agentVersion
|
||||
graph = await backend.data.graph.get_graph(graph_id, graph_version)
|
||||
|
||||
if not graph:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=404,
|
||||
detail=(
|
||||
f"Agent #{graph_id} not found "
|
||||
f"for store listing version #{store_listing_version_id}"
|
||||
),
|
||||
)
|
||||
|
||||
graph.version = 1
|
||||
graph.is_template = False
|
||||
graph.is_active = True
|
||||
delattr(graph, "user_id")
|
||||
|
||||
return graph
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent: {e}")
|
||||
raise backend.server.v2.store.exceptions.DatabaseError(
|
||||
"Failed to fetch agent"
|
||||
) from e
|
||||
return graph
|
||||
|
||||
|
||||
async def review_store_submission(
|
||||
|
||||
@@ -146,7 +146,6 @@ async def test_create_store_submission(mocker):
|
||||
userId="user-id",
|
||||
createdAt=datetime.now(),
|
||||
isActive=True,
|
||||
isTemplate=False,
|
||||
)
|
||||
|
||||
mock_listing = prisma.models.StoreListing(
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import io
|
||||
import logging
|
||||
from enum import Enum
|
||||
@@ -34,6 +35,13 @@ class ImageStyle(str, Enum):
|
||||
DIGITAL_ART = "digital art"
|
||||
|
||||
|
||||
async def generate_agent_image(agent: Graph | AgentGraph) -> io.BytesIO:
|
||||
if settings.config.use_agent_image_generation_v2:
|
||||
return await asyncio.to_thread(generate_agent_image_v2, graph=agent)
|
||||
else:
|
||||
return await generate_agent_image_v1(agent=agent)
|
||||
|
||||
|
||||
def generate_agent_image_v2(graph: Graph | AgentGraph) -> io.BytesIO:
|
||||
"""
|
||||
Generate an image for an agent using Ideogram model.
|
||||
@@ -76,7 +84,7 @@ def generate_agent_image_v2(graph: Graph | AgentGraph) -> io.BytesIO:
|
||||
),
|
||||
prompt=prompt,
|
||||
ideogram_model_name=IdeogramModelName.V2,
|
||||
aspect_ratio=AspectRatio.ASPECT_4_3,
|
||||
aspect_ratio=AspectRatio.ASPECT_16_9,
|
||||
magic_prompt_option=MagicPromptOption.OFF,
|
||||
style_type=StyleType.AUTO,
|
||||
upscale=UpscaleOption.NO_UPSCALE,
|
||||
@@ -91,7 +99,7 @@ def generate_agent_image_v2(graph: Graph | AgentGraph) -> io.BytesIO:
|
||||
return io.BytesIO(requests.get(url).content)
|
||||
|
||||
|
||||
async def generate_agent_image(agent: Graph | AgentGraph) -> io.BytesIO:
|
||||
async def generate_agent_image_v1(agent: Graph | AgentGraph) -> io.BytesIO:
|
||||
"""
|
||||
Generate an image for an agent using Flux model via Replicate API.
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ class MyAgent(pydantic.BaseModel):
|
||||
agent_id: str
|
||||
agent_version: int
|
||||
agent_name: str
|
||||
agent_image: str | None = None
|
||||
description: str
|
||||
last_edited: datetime.datetime
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import json
|
||||
import logging
|
||||
import tempfile
|
||||
import typing
|
||||
@@ -8,7 +7,6 @@ import autogpt_libs.auth.depends
|
||||
import autogpt_libs.auth.middleware
|
||||
import fastapi
|
||||
import fastapi.responses
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
|
||||
import backend.data.block
|
||||
import backend.data.graph
|
||||
@@ -16,6 +14,7 @@ import backend.server.v2.store.db
|
||||
import backend.server.v2.store.image_gen
|
||||
import backend.server.v2.store.media
|
||||
import backend.server.v2.store.model
|
||||
import backend.util.json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -591,19 +590,18 @@ async def generate_image(
|
||||
tags=["store", "public"],
|
||||
)
|
||||
async def download_agent_file(
|
||||
user_id: typing.Annotated[
|
||||
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
|
||||
],
|
||||
store_listing_version_id: str = fastapi.Path(
|
||||
..., description="The ID of the agent to download"
|
||||
),
|
||||
version: typing.Optional[int] = fastapi.Query(
|
||||
None, description="Specific version of the agent"
|
||||
),
|
||||
) -> fastapi.responses.FileResponse:
|
||||
"""
|
||||
Download the agent file by streaming its content.
|
||||
|
||||
Args:
|
||||
agent_id (str): The ID of the agent to download.
|
||||
version (Optional[int]): Specific version of the agent to download.
|
||||
store_listing_version_id (str): The ID of the agent to download
|
||||
|
||||
Returns:
|
||||
StreamingResponse: A streaming response containing the agent's graph data.
|
||||
@@ -613,35 +611,16 @@ async def download_agent_file(
|
||||
"""
|
||||
|
||||
graph_data = await backend.server.v2.store.db.get_agent(
|
||||
store_listing_version_id=store_listing_version_id, version_id=version
|
||||
user_id=user_id,
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
)
|
||||
|
||||
graph_data.clean_graph()
|
||||
graph_date_dict = jsonable_encoder(graph_data)
|
||||
|
||||
def remove_credentials(obj):
|
||||
if obj and isinstance(obj, dict):
|
||||
if "credentials" in obj:
|
||||
del obj["credentials"]
|
||||
if "creds" in obj:
|
||||
del obj["creds"]
|
||||
|
||||
for value in obj.values():
|
||||
remove_credentials(value)
|
||||
elif isinstance(obj, list):
|
||||
for item in obj:
|
||||
remove_credentials(item)
|
||||
return obj
|
||||
|
||||
graph_date_dict = remove_credentials(graph_date_dict)
|
||||
|
||||
file_name = f"agent_{store_listing_version_id}_v{version or 'latest'}.json"
|
||||
file_name = f"agent_{graph_data.id}_v{graph_data.version or 'latest'}.json"
|
||||
|
||||
# Sending graph as a stream (similar to marketplace v1)
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="w", suffix=".json", delete=False
|
||||
) as tmp_file:
|
||||
tmp_file.write(json.dumps(graph_date_dict))
|
||||
tmp_file.write(backend.util.json.dumps(graph_data))
|
||||
tmp_file.flush()
|
||||
|
||||
return fastapi.responses.FileResponse(
|
||||
|
||||
@@ -56,6 +56,7 @@ config = Config()
|
||||
api_host = config.pyro_host
|
||||
api_comm_retry = config.pyro_client_comm_retry
|
||||
api_comm_timeout = config.pyro_client_comm_timeout
|
||||
api_call_timeout = config.rpc_client_call_timeout
|
||||
pyro_config.MAX_RETRIES = api_comm_retry # type: ignore
|
||||
pyro_config.COMMTIMEOUT = api_comm_timeout # type: ignore
|
||||
|
||||
@@ -264,7 +265,11 @@ class FastApiAppService(BaseAppService, ABC):
|
||||
def _handle_internal_http_error(status_code: int = 500, log_error: bool = True):
|
||||
def handler(request: Request, exc: Exception):
|
||||
if log_error:
|
||||
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
|
||||
if status_code == 500:
|
||||
log = logger.exception
|
||||
else:
|
||||
log = logger.error
|
||||
log(f"{request.method} {request.url.path} failed: {exc}")
|
||||
return responses.JSONResponse(
|
||||
status_code=status_code,
|
||||
content=RemoteCallError(
|
||||
@@ -429,7 +434,10 @@ def fastapi_close_service_client(client: Any) -> None:
|
||||
|
||||
|
||||
@conn_retry("FastAPI client", "Creating service client", max_retry=api_comm_retry)
|
||||
def fastapi_get_service_client(service_type: Type[AS]) -> AS:
|
||||
def fastapi_get_service_client(
|
||||
service_type: Type[AS],
|
||||
call_timeout: int | None = api_call_timeout,
|
||||
) -> AS:
|
||||
class DynamicClient:
|
||||
def __init__(self):
|
||||
host = service_type.get_host()
|
||||
@@ -437,7 +445,7 @@ def fastapi_get_service_client(service_type: Type[AS]) -> AS:
|
||||
self.base_url = f"http://{host}:{port}".rstrip("/")
|
||||
self.client = httpx.Client(
|
||||
base_url=self.base_url,
|
||||
timeout=api_comm_timeout,
|
||||
timeout=call_timeout,
|
||||
)
|
||||
|
||||
def _call_method(self, method_name: str, **kwargs) -> Any:
|
||||
@@ -447,7 +455,7 @@ def fastapi_get_service_client(service_type: Type[AS]) -> AS:
|
||||
return response.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
logger.error(f"HTTP error in {method_name}: {e.response.text}")
|
||||
error = RemoteCallError.model_validate(e.response.json(), strict=False)
|
||||
error = RemoteCallError.model_validate(e.response.json())
|
||||
# DEBUG HELP: if you made a custom exception, make sure you override self.args to be how to make your exception
|
||||
raise EXCEPTION_MAPPING.get(error.type, Exception)(
|
||||
*(error.args or [str(e)])
|
||||
|
||||
@@ -81,6 +81,10 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
default=3,
|
||||
description="The default number of retries for Pyro client connections.",
|
||||
)
|
||||
rpc_client_call_timeout: int = Field(
|
||||
default=300,
|
||||
description="The default timeout in seconds, for RPC client calls.",
|
||||
)
|
||||
enable_auth: bool = Field(
|
||||
default=True,
|
||||
description="If authentication is enabled or not",
|
||||
@@ -160,6 +164,11 @@ class Config(UpdateTrackingModel["Config"], BaseSettings):
|
||||
description="The port for notification service daemon to run on",
|
||||
)
|
||||
|
||||
otto_api_url: str = Field(
|
||||
default="",
|
||||
description="The URL for the Otto API service",
|
||||
)
|
||||
|
||||
platform_base_url: str = Field(
|
||||
default="",
|
||||
description="Must be set so the application knows where it's hosted at. "
|
||||
@@ -316,6 +325,11 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
description="The token to use for the Postmark webhook",
|
||||
)
|
||||
|
||||
unsubscribe_secret_key: str = Field(
|
||||
default="",
|
||||
description="The secret key to use for the unsubscribe user by token",
|
||||
)
|
||||
|
||||
# OAuth server credentials for integrations
|
||||
# --8<-- [start:OAuthServerCredentialsExample]
|
||||
github_client_id: str = Field(default="", description="GitHub OAuth client ID")
|
||||
@@ -390,6 +404,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
|
||||
smartlead_api_key: str = Field(default="", description="SmartLead API Key")
|
||||
zerobounce_api_key: str = Field(default="", description="ZeroBounce API Key")
|
||||
|
||||
example_api_key: str = Field(default="", description="Example API Key")
|
||||
# Add more secret fields as needed
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
|
||||
@@ -8,7 +8,7 @@ from backend.data.block import Block, BlockSchema, initialize_blocks
|
||||
from backend.data.execution import ExecutionResult, ExecutionStatus
|
||||
from backend.data.model import _BaseCredentials
|
||||
from backend.data.user import create_default_user
|
||||
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
|
||||
from backend.executor import DatabaseManager, ExecutionManager, Scheduler
|
||||
from backend.notifications.notifications import NotificationManager
|
||||
from backend.server.rest_api import AgentServer
|
||||
from backend.server.utils import get_user_id
|
||||
@@ -21,7 +21,7 @@ class SpinTestServer:
|
||||
self.db_api = DatabaseManager()
|
||||
self.exec_manager = ExecutionManager()
|
||||
self.agent_server = AgentServer()
|
||||
self.scheduler = ExecutionScheduler()
|
||||
self.scheduler = Scheduler()
|
||||
self.notif_manager = NotificationManager()
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
-- Create UserOnboarding table
|
||||
CREATE TABLE "UserOnboarding" (
|
||||
"id" TEXT NOT NULL,
|
||||
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"step" INTEGER NOT NULL DEFAULT 0,
|
||||
"usageReason" TEXT,
|
||||
"integrations" TEXT[] DEFAULT ARRAY[]::TEXT[],
|
||||
"otherIntegrations" TEXT,
|
||||
"selectedAgentCreator" TEXT,
|
||||
"selectedAgentSlug" TEXT,
|
||||
"agentInput" JSONB,
|
||||
"isCompleted" BOOLEAN NOT NULL DEFAULT false,
|
||||
"userId" TEXT NOT NULL,
|
||||
|
||||
CONSTRAINT "UserOnboarding_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- Create unique constraint on userId
|
||||
ALTER TABLE "UserOnboarding" ADD CONSTRAINT "UserOnboarding_userId_key" UNIQUE ("userId");
|
||||
|
||||
-- Create index on userId
|
||||
CREATE INDEX "UserOnboarding_userId_idx" ON "UserOnboarding"("userId");
|
||||
|
||||
-- Add foreign key constraint
|
||||
ALTER TABLE "UserOnboarding" ADD CONSTRAINT "UserOnboarding_userId_fkey"
|
||||
FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE CASCADE ON UPDATE CASCADE;
|
||||
@@ -0,0 +1,11 @@
|
||||
-- DropIndex
|
||||
DROP INDEX "APIKey_userId_idx";
|
||||
|
||||
-- DropIndex
|
||||
DROP INDEX "StoreListing_agentId_owningUserId_idx";
|
||||
|
||||
-- DropIndex
|
||||
DROP INDEX "StoreListing_isDeleted_idx";
|
||||
|
||||
-- DropIndex
|
||||
DROP INDEX "StoreListingVersion_agentId_agentVersion_isDeleted_idx";
|
||||
@@ -0,0 +1,8 @@
|
||||
/*
|
||||
Warnings:
|
||||
|
||||
- You are about to drop the column `isTemplate` on the `AgentGraph` table. All the data in the column will be lost.
|
||||
|
||||
*/
|
||||
-- AlterTable
|
||||
ALTER TABLE "AgentGraph" DROP COLUMN "isTemplate";
|
||||
@@ -1,4 +1,3 @@
|
||||
// THIS FILE IS AUTO-GENERATED, RUN `poetry run schema` TO UPDATE
|
||||
datasource db {
|
||||
provider = "postgresql"
|
||||
url = env("DATABASE_URL")
|
||||
@@ -46,6 +45,7 @@ model User {
|
||||
LibraryAgent LibraryAgent[]
|
||||
|
||||
Profile Profile[]
|
||||
UserOnboarding UserOnboarding?
|
||||
StoreListing StoreListing[]
|
||||
StoreListingReview StoreListingReview[]
|
||||
StoreListingSubmission StoreListingSubmission[]
|
||||
@@ -57,6 +57,25 @@ model User {
|
||||
@@index([email])
|
||||
}
|
||||
|
||||
model UserOnboarding {
|
||||
id String @id @default(uuid())
|
||||
createdAt DateTime @default(now())
|
||||
|
||||
step Int @default(0)
|
||||
usageReason String?
|
||||
integrations String[] @default([])
|
||||
otherIntegrations String?
|
||||
selectedAgentCreator String?
|
||||
selectedAgentSlug String?
|
||||
agentInput Json?
|
||||
isCompleted Boolean @default(false)
|
||||
|
||||
userId String @unique
|
||||
user User @relation(fields: [userId], references: [id], onDelete: Cascade)
|
||||
|
||||
@@index([userId])
|
||||
}
|
||||
|
||||
// This model describes the Agent Graph/Flow (Multi Agent System).
|
||||
model AgentGraph {
|
||||
id String @default(uuid())
|
||||
@@ -68,7 +87,6 @@ model AgentGraph {
|
||||
description String?
|
||||
|
||||
isActive Boolean @default(true)
|
||||
isTemplate Boolean @default(false)
|
||||
|
||||
// Link to User model
|
||||
userId String
|
||||
@@ -600,11 +618,9 @@ model StoreListing {
|
||||
|
||||
// Unique index on agentId to ensure only one listing per agent, regardless of number of versions the agent has.
|
||||
@@unique([agentId])
|
||||
@@index([agentId, owningUserId])
|
||||
@@index([owningUserId])
|
||||
// Used in the view query
|
||||
@@index([isDeleted, isApproved])
|
||||
@@index([isDeleted])
|
||||
}
|
||||
|
||||
model StoreListingVersion {
|
||||
@@ -645,7 +661,6 @@ model StoreListingVersion {
|
||||
StoreListingReview StoreListingReview[]
|
||||
|
||||
@@unique([agentId, agentVersion])
|
||||
@@index([agentId, agentVersion, isDeleted])
|
||||
}
|
||||
|
||||
model StoreListingReview {
|
||||
@@ -721,7 +736,6 @@ model APIKey {
|
||||
|
||||
@@index([key])
|
||||
@@index([prefix])
|
||||
@@index([userId])
|
||||
@@index([status])
|
||||
@@index([userId, status])
|
||||
}
|
||||
|
||||
@@ -199,7 +199,9 @@ async def test_clean_graph(server: SpinTestServer):
|
||||
)
|
||||
|
||||
# Clean the graph
|
||||
created_graph.clean_graph()
|
||||
created_graph = await server.agent_server.test_get_graph(
|
||||
created_graph.id, created_graph.version, DEFAULT_USER_ID, for_export=True
|
||||
)
|
||||
|
||||
# # Verify input block value is cleared
|
||||
input_node = next(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
|
||||
from backend.data import db
|
||||
from backend.executor import ExecutionScheduler
|
||||
from backend.executor import Scheduler
|
||||
from backend.server.model import CreateGraph
|
||||
from backend.usecases.sample import create_test_graph, create_test_user
|
||||
from backend.util.service import get_service_client
|
||||
@@ -17,7 +17,7 @@ async def test_agent_schedule(server: SpinTestServer):
|
||||
user_id=test_user.id,
|
||||
)
|
||||
|
||||
scheduler = get_service_client(ExecutionScheduler)
|
||||
scheduler = get_service_client(Scheduler)
|
||||
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
|
||||
assert len(schedules) == 0
|
||||
|
||||
|
||||
@@ -91,7 +91,6 @@ async def main():
|
||||
"description": faker.text(max_nb_chars=200),
|
||||
"userId": user.id,
|
||||
"isActive": True,
|
||||
"isTemplate": False,
|
||||
}
|
||||
)
|
||||
agent_graphs.append(graph)
|
||||
|
||||
123
autogpt_platform/db/docker/.env.example
Normal file
123
autogpt_platform/db/docker/.env.example
Normal file
@@ -0,0 +1,123 @@
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=supabase
|
||||
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY=your-encryption-key-32-chars-min
|
||||
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
|
||||
############
|
||||
# Supavisor -- Database pooler
|
||||
############
|
||||
POOLER_PROXY_PORT_TRANSACTION=6543
|
||||
POOLER_DEFAULT_POOL_SIZE=20
|
||||
POOLER_MAX_CLIENT_CONN=100
|
||||
POOLER_TENANT_ID=your-tenant-id
|
||||
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3000
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
ENABLE_ANONYMOUS_USERS=false
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3000
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
# Add your OpenAI API key to enable SQL Editor Assistant
|
||||
OPENAI_API_KEY=
|
||||
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
||||
5
autogpt_platform/db/docker/.gitignore
vendored
Normal file
5
autogpt_platform/db/docker/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
volumes/db/data
|
||||
volumes/storage
|
||||
.env
|
||||
test.http
|
||||
docker-compose.override.yml
|
||||
3
autogpt_platform/db/docker/README.md
Normal file
3
autogpt_platform/db/docker/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Supabase Docker
|
||||
|
||||
This is a minimal Docker Compose setup for self-hosting Supabase. Follow the steps [here](https://supabase.com/docs/guides/hosting/docker) to get started.
|
||||
48
autogpt_platform/db/docker/dev/data.sql
Normal file
48
autogpt_platform/db/docker/dev/data.sql
Normal file
@@ -0,0 +1,48 @@
|
||||
create table profiles (
|
||||
id uuid references auth.users not null,
|
||||
updated_at timestamp with time zone,
|
||||
username text unique,
|
||||
avatar_url text,
|
||||
website text,
|
||||
|
||||
primary key (id),
|
||||
unique(username),
|
||||
constraint username_length check (char_length(username) >= 3)
|
||||
);
|
||||
|
||||
alter table profiles enable row level security;
|
||||
|
||||
create policy "Public profiles are viewable by the owner."
|
||||
on profiles for select
|
||||
using ( auth.uid() = id );
|
||||
|
||||
create policy "Users can insert their own profile."
|
||||
on profiles for insert
|
||||
with check ( auth.uid() = id );
|
||||
|
||||
create policy "Users can update own profile."
|
||||
on profiles for update
|
||||
using ( auth.uid() = id );
|
||||
|
||||
-- Set up Realtime
|
||||
begin;
|
||||
drop publication if exists supabase_realtime;
|
||||
create publication supabase_realtime;
|
||||
commit;
|
||||
alter publication supabase_realtime add table profiles;
|
||||
|
||||
-- Set up Storage
|
||||
insert into storage.buckets (id, name)
|
||||
values ('avatars', 'avatars');
|
||||
|
||||
create policy "Avatar images are publicly accessible."
|
||||
on storage.objects for select
|
||||
using ( bucket_id = 'avatars' );
|
||||
|
||||
create policy "Anyone can upload an avatar."
|
||||
on storage.objects for insert
|
||||
with check ( bucket_id = 'avatars' );
|
||||
|
||||
create policy "Anyone can update an avatar."
|
||||
on storage.objects for update
|
||||
with check ( bucket_id = 'avatars' );
|
||||
34
autogpt_platform/db/docker/dev/docker-compose.dev.yml
Normal file
34
autogpt_platform/db/docker/dev/docker-compose.dev.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
studio:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: studio/Dockerfile
|
||||
target: dev
|
||||
ports:
|
||||
- 8082:8082
|
||||
mail:
|
||||
container_name: supabase-mail
|
||||
image: inbucket/inbucket:3.0.3
|
||||
ports:
|
||||
- '2500:2500' # SMTP
|
||||
- '9000:9000' # web interface
|
||||
- '1100:1100' # POP3
|
||||
auth:
|
||||
environment:
|
||||
- GOTRUE_SMTP_USER=
|
||||
- GOTRUE_SMTP_PASS=
|
||||
meta:
|
||||
ports:
|
||||
- 5555:8080
|
||||
db:
|
||||
restart: 'no'
|
||||
volumes:
|
||||
# Always use a fresh database when developing
|
||||
- /var/lib/postgresql/data
|
||||
# Seed data should be inserted last (alphabetical order)
|
||||
- ./dev/data.sql:/docker-entrypoint-initdb.d/seed.sql
|
||||
storage:
|
||||
volumes:
|
||||
- /var/lib/storage
|
||||
94
autogpt_platform/db/docker/docker-compose.s3.yml
Normal file
94
autogpt_platform/db/docker/docker-compose.s3.yml
Normal file
@@ -0,0 +1,94 @@
|
||||
services:
|
||||
|
||||
minio:
|
||||
image: minio/minio
|
||||
ports:
|
||||
- '9000:9000'
|
||||
- '9001:9001'
|
||||
environment:
|
||||
MINIO_ROOT_USER: supa-storage
|
||||
MINIO_ROOT_PASSWORD: secret1234
|
||||
command: server --console-address ":9001" /data
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://minio:9000/minio/health/live" ]
|
||||
interval: 2s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
volumes:
|
||||
- ./volumes/storage:/data:z
|
||||
|
||||
minio-createbucket:
|
||||
image: minio/mc
|
||||
depends_on:
|
||||
minio:
|
||||
condition: service_healthy
|
||||
entrypoint: >
|
||||
/bin/sh -c "
|
||||
/usr/bin/mc alias set supa-minio http://minio:9000 supa-storage secret1234;
|
||||
/usr/bin/mc mb supa-minio/stub;
|
||||
exit 0;
|
||||
"
|
||||
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v1.11.13
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
minio:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: s3
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
GLOBAL_S3_ENDPOINT: http://minio:9000
|
||||
GLOBAL_S3_PROTOCOL: http
|
||||
GLOBAL_S3_FORCE_PATH_STYLE: true
|
||||
AWS_ACCESS_KEY_ID: supa-storage
|
||||
AWS_SECRET_ACCESS_KEY: secret1234
|
||||
AWS_DEFAULT_REGION: stub
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0
|
||||
healthcheck:
|
||||
test: [ "CMD", "imgproxy", "health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
526
autogpt_platform/db/docker/docker-compose.yml
Normal file
526
autogpt_platform/db/docker/docker-compose.yml
Normal file
@@ -0,0 +1,526 @@
|
||||
# Usage
|
||||
# Start: docker compose up
|
||||
# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up
|
||||
# Stop: docker compose down
|
||||
# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
|
||||
# Reset everything: ./reset.sh
|
||||
|
||||
name: supabase
|
||||
|
||||
services:
|
||||
|
||||
studio:
|
||||
container_name: supabase-studio
|
||||
image: supabase/studio:20250224-d10db0f
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"node",
|
||||
"-e",
|
||||
"fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})"
|
||||
]
|
||||
timeout: 10s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
STUDIO_PG_META_URL: http://meta:8080
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
||||
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
||||
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
|
||||
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
AUTH_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_URL: http://analytics:4000
|
||||
NEXT_PUBLIC_ENABLE_LOGS: true
|
||||
# Comment to use Big Query backend for analytics
|
||||
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
|
||||
|
||||
kong:
|
||||
container_name: supabase-kong
|
||||
image: kong:2.8.1
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- ${KONG_HTTP_PORT}:8000/tcp
|
||||
- ${KONG_HTTPS_PORT}:8443/tcp
|
||||
volumes:
|
||||
# https://github.com/supabase/supabase/issues/12661
|
||||
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
KONG_DATABASE: "off"
|
||||
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
||||
# https://github.com/supabase/cli/issues/14
|
||||
KONG_DNS_ORDER: LAST,A,CNAME
|
||||
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
||||
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
||||
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
||||
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
||||
# https://unix.stackexchange.com/a/294837
|
||||
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
||||
|
||||
auth:
|
||||
container_name: supabase-auth
|
||||
image: supabase/gotrue:v2.170.0
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9999/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
GOTRUE_API_HOST: 0.0.0.0
|
||||
GOTRUE_API_PORT: 9999
|
||||
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
||||
|
||||
GOTRUE_DB_DRIVER: postgres
|
||||
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
|
||||
GOTRUE_SITE_URL: ${SITE_URL}
|
||||
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
||||
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
||||
|
||||
GOTRUE_JWT_ADMIN_ROLES: service_role
|
||||
GOTRUE_JWT_AUD: authenticated
|
||||
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
||||
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
||||
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
||||
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
|
||||
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
||||
|
||||
# Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile.
|
||||
# GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true
|
||||
|
||||
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
||||
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
||||
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
||||
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
||||
GOTRUE_SMTP_USER: ${SMTP_USER}
|
||||
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
||||
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
||||
|
||||
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
||||
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
||||
# Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook
|
||||
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true"
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook"
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "<standard-base64-secret>"
|
||||
|
||||
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true"
|
||||
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt"
|
||||
|
||||
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true"
|
||||
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt"
|
||||
|
||||
# GOTRUE_HOOK_SEND_SMS_ENABLED: "false"
|
||||
# GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook"
|
||||
# GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
|
||||
|
||||
# GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false"
|
||||
# GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender"
|
||||
# GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
|
||||
|
||||
rest:
|
||||
container_name: supabase-rest
|
||||
image: postgrest/postgrest:v12.2.8
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
||||
PGRST_DB_ANON_ROLE: anon
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_DB_USE_LEGACY_GUCS: "false"
|
||||
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
||||
command:
|
||||
[
|
||||
"postgrest"
|
||||
]
|
||||
|
||||
realtime:
|
||||
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
||||
container_name: realtime-dev.supabase-realtime
|
||||
image: supabase/realtime:v2.34.40
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"-sSfL",
|
||||
"--head",
|
||||
"-o",
|
||||
"/dev/null",
|
||||
"-H",
|
||||
"Authorization: Bearer ${ANON_KEY}",
|
||||
"http://localhost:4000/api/tenants/realtime-dev/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
PORT: 4000
|
||||
DB_HOST: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_USER: supabase_admin
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_NAME: ${POSTGRES_DB}
|
||||
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
||||
DB_ENC_KEY: supabaserealtime
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
DNS_NODES: "''"
|
||||
RLIMIT_NOFILE: "10000"
|
||||
APP_NAME: realtime
|
||||
SEED_SELF_HOST: true
|
||||
RUN_JANITOR: true
|
||||
|
||||
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v1.19.3
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://storage:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: file
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"imgproxy",
|
||||
"health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
|
||||
meta:
|
||||
container_name: supabase-meta
|
||||
image: supabase/postgres-meta:v0.86.1
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PG_META_PORT: 8080
|
||||
PG_META_DB_HOST: ${POSTGRES_HOST}
|
||||
PG_META_DB_PORT: ${POSTGRES_PORT}
|
||||
PG_META_DB_NAME: ${POSTGRES_DB}
|
||||
PG_META_DB_USER: supabase_admin
|
||||
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
functions:
|
||||
container_name: supabase-edge-functions
|
||||
image: supabase/edge-runtime:v1.67.2
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/functions:/home/deno/functions:Z
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
||||
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
||||
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
||||
command:
|
||||
[
|
||||
"start",
|
||||
"--main-service",
|
||||
"/home/deno/functions/main"
|
||||
]
|
||||
|
||||
analytics:
|
||||
container_name: supabase-analytics
|
||||
image: supabase/logflare:1.12.5
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 4000:4000
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# volumes:
|
||||
# - type: bind
|
||||
# source: ${PWD}/gcloud.json
|
||||
# target: /opt/app/rel/logflare/bin/gcloud.json
|
||||
# read_only: true
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"http://localhost:4000/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
environment:
|
||||
LOGFLARE_NODE_HOST: 127.0.0.1
|
||||
DB_USERNAME: supabase_admin
|
||||
DB_DATABASE: _supabase
|
||||
DB_HOSTNAME: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_SCHEMA: _analytics
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_SINGLE_TENANT: true
|
||||
LOGFLARE_SUPABASE_MODE: true
|
||||
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
||||
|
||||
# Comment variables to use Big Query backend for analytics
|
||||
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
|
||||
POSTGRES_BACKEND_SCHEMA: _analytics
|
||||
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
||||
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
||||
|
||||
# Comment out everything below this point if you are using an external Postgres database
|
||||
db:
|
||||
container_name: supabase-db
|
||||
image: supabase/postgres:15.8.1.049
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
||||
# Must be superuser to create event trigger
|
||||
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
||||
# Must be superuser to alter reserved role
|
||||
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
||||
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
||||
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
||||
# PGDATA directory is persisted between restarts
|
||||
- ./volumes/db/data:/var/lib/postgresql/data:Z
|
||||
# Changes required for internal supabase data such as _analytics
|
||||
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
|
||||
# Changes required for Analytics support
|
||||
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
||||
# Changes required for Pooler support
|
||||
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
|
||||
# Use named volume to persist pgsodium decryption key between restarts
|
||||
- supabase-config:/etc/postgresql-custom
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-U",
|
||||
"postgres",
|
||||
"-h",
|
||||
"localhost"
|
||||
]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
vector:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
POSTGRES_HOST: /var/run/postgresql
|
||||
PGPORT: ${POSTGRES_PORT}
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PGDATABASE: ${POSTGRES_DB}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
JWT_EXP: ${JWT_EXPIRY}
|
||||
command:
|
||||
[
|
||||
"postgres",
|
||||
"-c",
|
||||
"config_file=/etc/postgresql/postgresql.conf",
|
||||
"-c",
|
||||
"log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs
|
||||
]
|
||||
|
||||
vector:
|
||||
container_name: supabase-vector
|
||||
image: timberio/vector:0.28.1-alpine
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
||||
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://vector:9001/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
command:
|
||||
[
|
||||
"--config",
|
||||
"/etc/vector/vector.yml"
|
||||
]
|
||||
|
||||
# Update the DATABASE_URL if you are using an external Postgres database
|
||||
supavisor:
|
||||
container_name: supabase-pooler
|
||||
image: supabase/supavisor:2.4.12
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- ${POSTGRES_PORT}:5432
|
||||
- ${POOLER_PROXY_PORT_TRANSACTION}:6543
|
||||
volumes:
|
||||
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"-sSfL",
|
||||
"--head",
|
||||
"-o",
|
||||
"/dev/null",
|
||||
"http://127.0.0.1:4000/api/health"
|
||||
]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PORT: 4000
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase
|
||||
CLUSTER_POSTGRES: true
|
||||
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
||||
VAULT_ENC_KEY: ${VAULT_ENC_KEY}
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
METRICS_JWT_SECRET: ${JWT_SECRET}
|
||||
REGION: local
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
POOLER_TENANT_ID: ${POOLER_TENANT_ID}
|
||||
POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE}
|
||||
POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN}
|
||||
POOLER_POOL_MODE: transaction
|
||||
command:
|
||||
[
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server"
|
||||
]
|
||||
|
||||
volumes:
|
||||
supabase-config:
|
||||
44
autogpt_platform/db/docker/reset.sh
Executable file
44
autogpt_platform/db/docker/reset.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "WARNING: This will remove all containers and container data, and will reset the .env file. This action cannot be undone!"
|
||||
read -p "Are you sure you want to proceed? (y/N) " -n 1 -r
|
||||
echo # Move to a new line
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
echo "Operation cancelled."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Stopping and removing all containers..."
|
||||
docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
|
||||
|
||||
echo "Cleaning up bind-mounted directories..."
|
||||
BIND_MOUNTS=(
|
||||
"./volumes/db/data"
|
||||
)
|
||||
|
||||
for DIR in "${BIND_MOUNTS[@]}"; do
|
||||
if [ -d "$DIR" ]; then
|
||||
echo "Deleting $DIR..."
|
||||
rm -rf "$DIR"
|
||||
else
|
||||
echo "Directory $DIR does not exist. Skipping bind mount deletion step..."
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Resetting .env file..."
|
||||
if [ -f ".env" ]; then
|
||||
echo "Removing existing .env file..."
|
||||
rm -f .env
|
||||
else
|
||||
echo "No .env file found. Skipping .env removal step..."
|
||||
fi
|
||||
|
||||
if [ -f ".env.example" ]; then
|
||||
echo "Copying .env.example to .env..."
|
||||
cp .env.example .env
|
||||
else
|
||||
echo ".env.example file not found. Skipping .env reset step..."
|
||||
fi
|
||||
|
||||
echo "Cleanup complete!"
|
||||
241
autogpt_platform/db/docker/volumes/api/kong.yml
Normal file
241
autogpt_platform/db/docker/volumes/api/kong.yml
Normal file
@@ -0,0 +1,241 @@
|
||||
_format_version: '2.1'
|
||||
_transform: true
|
||||
|
||||
###
|
||||
### Consumers / Users
|
||||
###
|
||||
consumers:
|
||||
- username: DASHBOARD
|
||||
- username: anon
|
||||
keyauth_credentials:
|
||||
- key: $SUPABASE_ANON_KEY
|
||||
- username: service_role
|
||||
keyauth_credentials:
|
||||
- key: $SUPABASE_SERVICE_KEY
|
||||
|
||||
###
|
||||
### Access Control List
|
||||
###
|
||||
acls:
|
||||
- consumer: anon
|
||||
group: anon
|
||||
- consumer: service_role
|
||||
group: admin
|
||||
|
||||
###
|
||||
### Dashboard credentials
|
||||
###
|
||||
basicauth_credentials:
|
||||
- consumer: DASHBOARD
|
||||
username: $DASHBOARD_USERNAME
|
||||
password: $DASHBOARD_PASSWORD
|
||||
|
||||
###
|
||||
### API Routes
|
||||
###
|
||||
services:
|
||||
## Open Auth routes
|
||||
- name: auth-v1-open
|
||||
url: http://auth:9999/verify
|
||||
routes:
|
||||
- name: auth-v1-open
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/verify
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: auth-v1-open-callback
|
||||
url: http://auth:9999/callback
|
||||
routes:
|
||||
- name: auth-v1-open-callback
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/callback
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: auth-v1-open-authorize
|
||||
url: http://auth:9999/authorize
|
||||
routes:
|
||||
- name: auth-v1-open-authorize
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/authorize
|
||||
plugins:
|
||||
- name: cors
|
||||
|
||||
## Secure Auth routes
|
||||
- name: auth-v1
|
||||
_comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*'
|
||||
url: http://auth:9999/
|
||||
routes:
|
||||
- name: auth-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
|
||||
## Secure REST routes
|
||||
- name: rest-v1
|
||||
_comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*'
|
||||
url: http://rest:3000/
|
||||
routes:
|
||||
- name: rest-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /rest/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: true
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
|
||||
## Secure GraphQL routes
|
||||
- name: graphql-v1
|
||||
_comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql'
|
||||
url: http://rest:3000/rpc/graphql
|
||||
routes:
|
||||
- name: graphql-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /graphql/v1
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: true
|
||||
- name: request-transformer
|
||||
config:
|
||||
add:
|
||||
headers:
|
||||
- Content-Profile:graphql_public
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
|
||||
## Secure Realtime routes
|
||||
- name: realtime-v1-ws
|
||||
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
|
||||
url: http://realtime-dev.supabase-realtime:4000/socket
|
||||
protocol: ws
|
||||
routes:
|
||||
- name: realtime-v1-ws
|
||||
strip_path: true
|
||||
paths:
|
||||
- /realtime/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
- name: realtime-v1-rest
|
||||
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
|
||||
url: http://realtime-dev.supabase-realtime:4000/api
|
||||
protocol: http
|
||||
routes:
|
||||
- name: realtime-v1-rest
|
||||
strip_path: true
|
||||
paths:
|
||||
- /realtime/v1/api
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
## Storage routes: the storage server manages its own auth
|
||||
- name: storage-v1
|
||||
_comment: 'Storage: /storage/v1/* -> http://storage:5000/*'
|
||||
url: http://storage:5000/
|
||||
routes:
|
||||
- name: storage-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /storage/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
|
||||
## Edge Functions routes
|
||||
- name: functions-v1
|
||||
_comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*'
|
||||
url: http://functions:9000/
|
||||
routes:
|
||||
- name: functions-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /functions/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
|
||||
## Analytics routes
|
||||
- name: analytics-v1
|
||||
_comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
|
||||
url: http://analytics:4000/
|
||||
routes:
|
||||
- name: analytics-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /analytics/v1/
|
||||
|
||||
## Secure Database routes
|
||||
- name: meta
|
||||
_comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*'
|
||||
url: http://meta:8080/
|
||||
routes:
|
||||
- name: meta-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /pg/
|
||||
plugins:
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
|
||||
## Protected Dashboard - catch all remaining routes
|
||||
- name: dashboard
|
||||
_comment: 'Studio: /* -> http://studio:3000/*'
|
||||
url: http://studio:3000/
|
||||
routes:
|
||||
- name: dashboard-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: basic-auth
|
||||
config:
|
||||
hide_credentials: true
|
||||
3
autogpt_platform/db/docker/volumes/db/_supabase.sql
Normal file
3
autogpt_platform/db/docker/volumes/db/_supabase.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
CREATE DATABASE _supabase WITH OWNER :pguser;
|
||||
0
autogpt_platform/db/docker/volumes/db/init/data.sql
Executable file
0
autogpt_platform/db/docker/volumes/db/init/data.sql
Executable file
5
autogpt_platform/db/docker/volumes/db/jwt.sql
Normal file
5
autogpt_platform/db/docker/volumes/db/jwt.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
\set jwt_secret `echo "$JWT_SECRET"`
|
||||
\set jwt_exp `echo "$JWT_EXP"`
|
||||
|
||||
ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
|
||||
ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';
|
||||
6
autogpt_platform/db/docker/volumes/db/logs.sql
Normal file
6
autogpt_platform/db/docker/volumes/db/logs.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
\c _supabase
|
||||
create schema if not exists _analytics;
|
||||
alter schema _analytics owner to :pguser;
|
||||
\c postgres
|
||||
6
autogpt_platform/db/docker/volumes/db/pooler.sql
Normal file
6
autogpt_platform/db/docker/volumes/db/pooler.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
\c _supabase
|
||||
create schema if not exists _supavisor;
|
||||
alter schema _supavisor owner to :pguser;
|
||||
\c postgres
|
||||
4
autogpt_platform/db/docker/volumes/db/realtime.sql
Normal file
4
autogpt_platform/db/docker/volumes/db/realtime.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
create schema if not exists _realtime;
|
||||
alter schema _realtime owner to :pguser;
|
||||
8
autogpt_platform/db/docker/volumes/db/roles.sql
Normal file
8
autogpt_platform/db/docker/volumes/db/roles.sql
Normal file
@@ -0,0 +1,8 @@
|
||||
-- NOTE: change to your own passwords for production environments
|
||||
\set pgpass `echo "$POSTGRES_PASSWORD"`
|
||||
|
||||
ALTER USER authenticator WITH PASSWORD :'pgpass';
|
||||
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
|
||||
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
|
||||
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
|
||||
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';
|
||||
208
autogpt_platform/db/docker/volumes/db/webhooks.sql
Normal file
208
autogpt_platform/db/docker/volumes/db/webhooks.sql
Normal file
@@ -0,0 +1,208 @@
|
||||
BEGIN;
|
||||
-- Create pg_net extension
|
||||
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
|
||||
-- Create supabase_functions schema
|
||||
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
|
||||
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
|
||||
-- supabase_functions.migrations definition
|
||||
CREATE TABLE supabase_functions.migrations (
|
||||
version text PRIMARY KEY,
|
||||
inserted_at timestamptz NOT NULL DEFAULT NOW()
|
||||
);
|
||||
-- Initial supabase_functions migration
|
||||
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
|
||||
-- supabase_functions.hooks definition
|
||||
CREATE TABLE supabase_functions.hooks (
|
||||
id bigserial PRIMARY KEY,
|
||||
hook_table_id integer NOT NULL,
|
||||
hook_name text NOT NULL,
|
||||
created_at timestamptz NOT NULL DEFAULT NOW(),
|
||||
request_id bigint
|
||||
);
|
||||
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
|
||||
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
|
||||
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
|
||||
CREATE FUNCTION supabase_functions.http_request()
|
||||
RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
request_id bigint;
|
||||
payload jsonb;
|
||||
url text := TG_ARGV[0]::text;
|
||||
method text := TG_ARGV[1]::text;
|
||||
headers jsonb DEFAULT '{}'::jsonb;
|
||||
params jsonb DEFAULT '{}'::jsonb;
|
||||
timeout_ms integer DEFAULT 1000;
|
||||
BEGIN
|
||||
IF url IS NULL OR url = 'null' THEN
|
||||
RAISE EXCEPTION 'url argument is missing';
|
||||
END IF;
|
||||
|
||||
IF method IS NULL OR method = 'null' THEN
|
||||
RAISE EXCEPTION 'method argument is missing';
|
||||
END IF;
|
||||
|
||||
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
|
||||
headers = '{"Content-Type": "application/json"}'::jsonb;
|
||||
ELSE
|
||||
headers = TG_ARGV[2]::jsonb;
|
||||
END IF;
|
||||
|
||||
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
|
||||
params = '{}'::jsonb;
|
||||
ELSE
|
||||
params = TG_ARGV[3]::jsonb;
|
||||
END IF;
|
||||
|
||||
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
|
||||
timeout_ms = 1000;
|
||||
ELSE
|
||||
timeout_ms = TG_ARGV[4]::integer;
|
||||
END IF;
|
||||
|
||||
CASE
|
||||
WHEN method = 'GET' THEN
|
||||
SELECT http_get INTO request_id FROM net.http_get(
|
||||
url,
|
||||
params,
|
||||
headers,
|
||||
timeout_ms
|
||||
);
|
||||
WHEN method = 'POST' THEN
|
||||
payload = jsonb_build_object(
|
||||
'old_record', OLD,
|
||||
'record', NEW,
|
||||
'type', TG_OP,
|
||||
'table', TG_TABLE_NAME,
|
||||
'schema', TG_TABLE_SCHEMA
|
||||
);
|
||||
|
||||
SELECT http_post INTO request_id FROM net.http_post(
|
||||
url,
|
||||
payload,
|
||||
params,
|
||||
headers,
|
||||
timeout_ms
|
||||
);
|
||||
ELSE
|
||||
RAISE EXCEPTION 'method argument % is invalid', method;
|
||||
END CASE;
|
||||
|
||||
INSERT INTO supabase_functions.hooks
|
||||
(hook_table_id, hook_name, request_id)
|
||||
VALUES
|
||||
(TG_RELID, TG_NAME, request_id);
|
||||
|
||||
RETURN NEW;
|
||||
END
|
||||
$function$;
|
||||
-- Supabase super admin
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_roles
|
||||
WHERE rolname = 'supabase_functions_admin'
|
||||
)
|
||||
THEN
|
||||
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
||||
ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
|
||||
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
|
||||
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
|
||||
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
|
||||
GRANT supabase_functions_admin TO postgres;
|
||||
-- Remove unused supabase_pg_net_admin role
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_roles
|
||||
WHERE rolname = 'supabase_pg_net_admin'
|
||||
)
|
||||
THEN
|
||||
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
|
||||
DROP OWNED BY supabase_pg_net_admin;
|
||||
DROP ROLE supabase_pg_net_admin;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
-- pg_net grants when extension is already enabled
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_extension
|
||||
WHERE extname = 'pg_net'
|
||||
)
|
||||
THEN
|
||||
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
-- Event trigger for pg_net
|
||||
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
JOIN pg_extension AS ext
|
||||
ON ev.objid = ext.oid
|
||||
WHERE ext.extname = 'pg_net'
|
||||
)
|
||||
THEN
|
||||
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_event_trigger
|
||||
WHERE evtname = 'issue_pg_net_access'
|
||||
) THEN
|
||||
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
|
||||
EXECUTE PROCEDURE extensions.grant_pg_net_access();
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
|
||||
ALTER function supabase_functions.http_request() SECURITY DEFINER;
|
||||
ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
|
||||
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
|
||||
COMMIT;
|
||||
16
autogpt_platform/db/docker/volumes/functions/hello/index.ts
Normal file
16
autogpt_platform/db/docker/volumes/functions/hello/index.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
// Follow this setup guide to integrate the Deno language server with your editor:
|
||||
// https://deno.land/manual/getting_started/setup_your_environment
|
||||
// This enables autocomplete, go to definition, etc.
|
||||
|
||||
import { serve } from "https://deno.land/std@0.177.1/http/server.ts"
|
||||
|
||||
serve(async () => {
|
||||
return new Response(
|
||||
`"Hello from Edge Functions!"`,
|
||||
{ headers: { "Content-Type": "application/json" } },
|
||||
)
|
||||
})
|
||||
|
||||
// To invoke:
|
||||
// curl 'http://localhost:<KONG_HTTP_PORT>/functions/v1/hello' \
|
||||
// --header 'Authorization: Bearer <anon/service_role API key>'
|
||||
94
autogpt_platform/db/docker/volumes/functions/main/index.ts
Normal file
94
autogpt_platform/db/docker/volumes/functions/main/index.ts
Normal file
@@ -0,0 +1,94 @@
|
||||
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
|
||||
import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'
|
||||
|
||||
console.log('main function started')
|
||||
|
||||
const JWT_SECRET = Deno.env.get('JWT_SECRET')
|
||||
const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'
|
||||
|
||||
function getAuthToken(req: Request) {
|
||||
const authHeader = req.headers.get('authorization')
|
||||
if (!authHeader) {
|
||||
throw new Error('Missing authorization header')
|
||||
}
|
||||
const [bearer, token] = authHeader.split(' ')
|
||||
if (bearer !== 'Bearer') {
|
||||
throw new Error(`Auth header is not 'Bearer {token}'`)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
async function verifyJWT(jwt: string): Promise<boolean> {
|
||||
const encoder = new TextEncoder()
|
||||
const secretKey = encoder.encode(JWT_SECRET)
|
||||
try {
|
||||
await jose.jwtVerify(jwt, secretKey)
|
||||
} catch (err) {
|
||||
console.error(err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
serve(async (req: Request) => {
|
||||
if (req.method !== 'OPTIONS' && VERIFY_JWT) {
|
||||
try {
|
||||
const token = getAuthToken(req)
|
||||
const isValidJWT = await verifyJWT(token)
|
||||
|
||||
if (!isValidJWT) {
|
||||
return new Response(JSON.stringify({ msg: 'Invalid JWT' }), {
|
||||
status: 401,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
return new Response(JSON.stringify({ msg: e.toString() }), {
|
||||
status: 401,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const url = new URL(req.url)
|
||||
const { pathname } = url
|
||||
const path_parts = pathname.split('/')
|
||||
const service_name = path_parts[1]
|
||||
|
||||
if (!service_name || service_name === '') {
|
||||
const error = { msg: 'missing function name in request' }
|
||||
return new Response(JSON.stringify(error), {
|
||||
status: 400,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
|
||||
const servicePath = `/home/deno/functions/${service_name}`
|
||||
console.error(`serving the request with ${servicePath}`)
|
||||
|
||||
const memoryLimitMb = 150
|
||||
const workerTimeoutMs = 1 * 60 * 1000
|
||||
const noModuleCache = false
|
||||
const importMapPath = null
|
||||
const envVarsObj = Deno.env.toObject()
|
||||
const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]])
|
||||
|
||||
try {
|
||||
const worker = await EdgeRuntime.userWorkers.create({
|
||||
servicePath,
|
||||
memoryLimitMb,
|
||||
workerTimeoutMs,
|
||||
noModuleCache,
|
||||
importMapPath,
|
||||
envVars,
|
||||
})
|
||||
return await worker.fetch(req)
|
||||
} catch (e) {
|
||||
const error = { msg: e.toString() }
|
||||
return new Response(JSON.stringify(error), {
|
||||
status: 500,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
})
|
||||
232
autogpt_platform/db/docker/volumes/logs/vector.yml
Normal file
232
autogpt_platform/db/docker/volumes/logs/vector.yml
Normal file
@@ -0,0 +1,232 @@
|
||||
api:
|
||||
enabled: true
|
||||
address: 0.0.0.0:9001
|
||||
|
||||
sources:
|
||||
docker_host:
|
||||
type: docker_logs
|
||||
exclude_containers:
|
||||
- supabase-vector
|
||||
|
||||
transforms:
|
||||
project_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- docker_host
|
||||
source: |-
|
||||
.project = "default"
|
||||
.event_message = del(.message)
|
||||
.appname = del(.container_name)
|
||||
del(.container_created_at)
|
||||
del(.container_id)
|
||||
del(.source_type)
|
||||
del(.stream)
|
||||
del(.label)
|
||||
del(.image)
|
||||
del(.host)
|
||||
del(.stream)
|
||||
router:
|
||||
type: route
|
||||
inputs:
|
||||
- project_logs
|
||||
route:
|
||||
kong: '.appname == "supabase-kong"'
|
||||
auth: '.appname == "supabase-auth"'
|
||||
rest: '.appname == "supabase-rest"'
|
||||
realtime: '.appname == "supabase-realtime"'
|
||||
storage: '.appname == "supabase-storage"'
|
||||
functions: '.appname == "supabase-functions"'
|
||||
db: '.appname == "supabase-db"'
|
||||
# Ignores non nginx errors since they are related with kong booting up
|
||||
kong_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.kong
|
||||
source: |-
|
||||
req, err = parse_nginx_log(.event_message, "combined")
|
||||
if err == null {
|
||||
.timestamp = req.timestamp
|
||||
.metadata.request.headers.referer = req.referer
|
||||
.metadata.request.headers.user_agent = req.agent
|
||||
.metadata.request.headers.cf_connecting_ip = req.client
|
||||
.metadata.request.method = req.method
|
||||
.metadata.request.path = req.path
|
||||
.metadata.request.protocol = req.protocol
|
||||
.metadata.response.status_code = req.status
|
||||
}
|
||||
if err != null {
|
||||
abort
|
||||
}
|
||||
# Ignores non nginx errors since they are related with kong booting up
|
||||
kong_err:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.kong
|
||||
source: |-
|
||||
.metadata.request.method = "GET"
|
||||
.metadata.response.status_code = 200
|
||||
parsed, err = parse_nginx_log(.event_message, "error")
|
||||
if err == null {
|
||||
.timestamp = parsed.timestamp
|
||||
.severity = parsed.severity
|
||||
.metadata.request.host = parsed.host
|
||||
.metadata.request.headers.cf_connecting_ip = parsed.client
|
||||
url, err = split(parsed.request, " ")
|
||||
if err == null {
|
||||
.metadata.request.method = url[0]
|
||||
.metadata.request.path = url[1]
|
||||
.metadata.request.protocol = url[2]
|
||||
}
|
||||
}
|
||||
if err != null {
|
||||
abort
|
||||
}
|
||||
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
|
||||
auth_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.auth
|
||||
source: |-
|
||||
parsed, err = parse_json(.event_message)
|
||||
if err == null {
|
||||
.metadata.timestamp = parsed.time
|
||||
.metadata = merge!(.metadata, parsed)
|
||||
}
|
||||
# PostgREST logs are structured so we separate timestamp from message using regex
|
||||
rest_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.rest
|
||||
source: |-
|
||||
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
|
||||
if err == null {
|
||||
.event_message = parsed.msg
|
||||
.timestamp = to_timestamp!(parsed.time)
|
||||
.metadata.host = .project
|
||||
}
|
||||
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
|
||||
realtime_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.realtime
|
||||
source: |-
|
||||
.metadata.project = del(.project)
|
||||
.metadata.external_id = .metadata.project
|
||||
parsed, err = parse_regex(.event_message, r'^(?P<time>\d+:\d+:\d+\.\d+) \[(?P<level>\w+)\] (?P<msg>.*)$')
|
||||
if err == null {
|
||||
.event_message = parsed.msg
|
||||
.metadata.level = parsed.level
|
||||
}
|
||||
# Storage logs may contain json objects so we parse them for completeness
|
||||
storage_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.storage
|
||||
source: |-
|
||||
.metadata.project = del(.project)
|
||||
.metadata.tenantId = .metadata.project
|
||||
parsed, err = parse_json(.event_message)
|
||||
if err == null {
|
||||
.event_message = parsed.msg
|
||||
.metadata.level = parsed.level
|
||||
.metadata.timestamp = parsed.time
|
||||
.metadata.context[0].host = parsed.hostname
|
||||
.metadata.context[0].pid = parsed.pid
|
||||
}
|
||||
# Postgres logs some messages to stderr which we map to warning severity level
|
||||
db_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.db
|
||||
source: |-
|
||||
.metadata.host = "db-default"
|
||||
.metadata.parsed.timestamp = .timestamp
|
||||
|
||||
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
|
||||
|
||||
if err != null || parsed == null {
|
||||
.metadata.parsed.error_severity = "info"
|
||||
}
|
||||
if parsed != null {
|
||||
.metadata.parsed.error_severity = parsed.level
|
||||
}
|
||||
if .metadata.parsed.error_severity == "info" {
|
||||
.metadata.parsed.error_severity = "log"
|
||||
}
|
||||
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
|
||||
|
||||
sinks:
|
||||
logflare_auth:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- auth_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_realtime:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- realtime_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_rest:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- rest_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_db:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- db_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
|
||||
# lead to broken queries from studio. This works by the assumption that containers are started in the
|
||||
# following order: vector > db > logflare > kong
|
||||
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_functions:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- router.functions
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_storage:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- storage_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_kong:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- kong_logs
|
||||
- kong_err
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
30
autogpt_platform/db/docker/volumes/pooler/pooler.exs
Normal file
30
autogpt_platform/db/docker/volumes/pooler/pooler.exs
Normal file
@@ -0,0 +1,30 @@
|
||||
{:ok, _} = Application.ensure_all_started(:supavisor)
|
||||
|
||||
{:ok, version} =
|
||||
case Supavisor.Repo.query!("select version()") do
|
||||
%{rows: [[ver]]} -> Supavisor.Helpers.parse_pg_version(ver)
|
||||
_ -> nil
|
||||
end
|
||||
|
||||
params = %{
|
||||
"external_id" => System.get_env("POOLER_TENANT_ID"),
|
||||
"db_host" => "db",
|
||||
"db_port" => System.get_env("POSTGRES_PORT"),
|
||||
"db_database" => System.get_env("POSTGRES_DB"),
|
||||
"require_user" => false,
|
||||
"auth_query" => "SELECT * FROM pgbouncer.get_auth($1)",
|
||||
"default_max_clients" => System.get_env("POOLER_MAX_CLIENT_CONN"),
|
||||
"default_pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
|
||||
"default_parameter_status" => %{"server_version" => version},
|
||||
"users" => [%{
|
||||
"db_user" => "pgbouncer",
|
||||
"db_password" => System.get_env("POSTGRES_PASSWORD"),
|
||||
"mode_type" => System.get_env("POOLER_POOL_MODE"),
|
||||
"pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
|
||||
"is_manager" => true
|
||||
}]
|
||||
}
|
||||
|
||||
if !Supavisor.Tenants.get_tenant_by_external_id(params["external_id"]) do
|
||||
{:ok, _} = Supavisor.Tenants.create_tenant(params)
|
||||
end
|
||||
@@ -92,6 +92,7 @@ services:
|
||||
- FRONTEND_BASE_URL=http://localhost:3000
|
||||
- BACKEND_CORS_ALLOW_ORIGINS=["http://localhost:3000"]
|
||||
- ENCRYPTION_KEY=dvziYgz0KSK8FENhju0ZYi8-fRTfAdlz6YLhdB_jhNw= # DO NOT USE IN PRODUCTION!!
|
||||
- UNSUBSCRIBE_SECRET_KEY=HlP8ivStJjmbf6NKi78m_3FnOogut0t5ckzjsIqeaio= # DO NOT USE IN PRODUCTION!!
|
||||
ports:
|
||||
- "8006:8006"
|
||||
- "8007:8007"
|
||||
|
||||
@@ -5,7 +5,7 @@ networks:
|
||||
name: shared-network
|
||||
|
||||
volumes:
|
||||
db-config:
|
||||
supabase-config:
|
||||
|
||||
x-agpt-services:
|
||||
&agpt-services
|
||||
@@ -67,19 +67,19 @@ services:
|
||||
studio:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: studio
|
||||
|
||||
kong:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: kong
|
||||
|
||||
auth:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: auth
|
||||
environment:
|
||||
GOTRUE_MAILER_AUTOCONFIRM: true
|
||||
@@ -87,54 +87,57 @@ services:
|
||||
rest:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: rest
|
||||
|
||||
realtime:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: realtime
|
||||
|
||||
storage:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: storage
|
||||
|
||||
imgproxy:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: imgproxy
|
||||
|
||||
meta:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: meta
|
||||
|
||||
functions:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: functions
|
||||
|
||||
analytics:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: analytics
|
||||
|
||||
db:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: db
|
||||
ports:
|
||||
- ${POSTGRES_PORT}:5432 # We don't use Supavisor locally, so we expose the db directly.
|
||||
|
||||
vector:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: vector
|
||||
|
||||
deps:
|
||||
|
||||
@@ -23,9 +23,9 @@
|
||||
"defaults"
|
||||
],
|
||||
"dependencies": {
|
||||
"@faker-js/faker": "^9.4.0",
|
||||
"@faker-js/faker": "^9.6.0",
|
||||
"@hookform/resolvers": "^3.10.0",
|
||||
"@next/third-parties": "^15.1.6",
|
||||
"@next/third-parties": "^15.2.1",
|
||||
"@radix-ui/react-alert-dialog": "^1.1.5",
|
||||
"@radix-ui/react-avatar": "^1.1.1",
|
||||
"@radix-ui/react-checkbox": "^1.1.2",
|
||||
@@ -46,11 +46,12 @@
|
||||
"@radix-ui/react-tooltip": "^1.1.7",
|
||||
"@sentry/nextjs": "^8",
|
||||
"@supabase/ssr": "^0.5.2",
|
||||
"@supabase/supabase-js": "^2.48.1",
|
||||
"@tanstack/react-table": "^8.20.6",
|
||||
"@xyflow/react": "^12.4.2",
|
||||
"@supabase/supabase-js": "^2.49.1",
|
||||
"@tanstack/react-table": "^8.21.2",
|
||||
"@xyflow/react": "12.4.2",
|
||||
"ajv": "^8.17.1",
|
||||
"boring-avatars": "^1.11.2",
|
||||
"canvas-confetti": "^1.9.3",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "1.0.4",
|
||||
@@ -59,28 +60,28 @@
|
||||
"dotenv": "^16.4.7",
|
||||
"elliptic": "6.6.1",
|
||||
"embla-carousel-react": "^8.5.2",
|
||||
"framer-motion": "^12.0.11",
|
||||
"framer-motion": "^12.4.11",
|
||||
"geist": "^1.3.1",
|
||||
"launchdarkly-react-client-sdk": "^3.6.1",
|
||||
"lodash.debounce": "^4.0.8",
|
||||
"lucide-react": "^0.474.0",
|
||||
"lucide-react": "^0.479.0",
|
||||
"moment": "^2.30.1",
|
||||
"next": "^14.2.21",
|
||||
"next-themes": "^0.4.4",
|
||||
"next-themes": "^0.4.5",
|
||||
"react": "^18",
|
||||
"react-day-picker": "^9.5.1",
|
||||
"react-day-picker": "^9.6.1",
|
||||
"react-dom": "^18",
|
||||
"react-drag-drop-files": "^2.4.0",
|
||||
"react-hook-form": "^7.54.0",
|
||||
"react-icons": "^5.4.0",
|
||||
"react-icons": "^5.5.0",
|
||||
"react-markdown": "^9.0.3",
|
||||
"react-modal": "^3.16.3",
|
||||
"react-shepherd": "^6.1.7",
|
||||
"react-shepherd": "^6.1.8",
|
||||
"recharts": "^2.15.1",
|
||||
"tailwind-merge": "^2.6.0",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"uuid": "^11.0.5",
|
||||
"zod": "^3.23.8"
|
||||
"uuid": "^11.1.0",
|
||||
"zod": "^3.24.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chromatic-com/storybook": "^3.2.4",
|
||||
@@ -94,7 +95,8 @@
|
||||
"@storybook/nextjs": "^8.5.3",
|
||||
"@storybook/react": "^8.3.5",
|
||||
"@storybook/test": "^8.3.5",
|
||||
"@storybook/test-runner": "^0.20.1",
|
||||
"@storybook/test-runner": "^0.21.0",
|
||||
"@types/canvas-confetti": "^1.9.0",
|
||||
"@types/lodash": "^4.17.13",
|
||||
"@types/negotiator": "^0.6.3",
|
||||
"@types/node": "^22.13.0",
|
||||
|
||||
@@ -1,15 +1,29 @@
|
||||
"use client";
|
||||
import { ShoppingBag } from "lucide-react";
|
||||
import { Sidebar } from "@/components/agptui/Sidebar";
|
||||
import { Users, DollarSign, LogOut } from "lucide-react";
|
||||
|
||||
import { useState } from "react";
|
||||
import Link from "next/link";
|
||||
import { BinaryIcon, XIcon } from "lucide-react";
|
||||
import { usePathname } from "next/navigation"; // Add this import
|
||||
import { IconSliders } from "@/components/ui/icons";
|
||||
|
||||
const tabs = [
|
||||
{ name: "Dashboard", href: "/admin/dashboard" },
|
||||
{ name: "Marketplace", href: "/admin/marketplace" },
|
||||
{ name: "Users", href: "/admin/users" },
|
||||
{ name: "Settings", href: "/admin/settings" },
|
||||
const sidebarLinkGroups = [
|
||||
{
|
||||
links: [
|
||||
{
|
||||
text: "Agent Management",
|
||||
href: "/admin/agents",
|
||||
icon: <Users className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "User Spending",
|
||||
href: "/admin/spending",
|
||||
icon: <DollarSign className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "Admin User Management",
|
||||
href: "/admin/settings",
|
||||
icon: <IconSliders className="h-6 w-6" />,
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
export default function AdminLayout({
|
||||
@@ -17,84 +31,10 @@ export default function AdminLayout({
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
const pathname = usePathname(); // Get the current pathname
|
||||
const [activeTab, setActiveTab] = useState(() => {
|
||||
// Set active tab based on the current route
|
||||
return tabs.find((tab) => tab.href === pathname)?.name || tabs[0].name;
|
||||
});
|
||||
const [mobileMenuOpen, setMobileMenuOpen] = useState(false);
|
||||
|
||||
return (
|
||||
<div className="min-h-screen bg-gray-100">
|
||||
<nav className="bg-white shadow-sm">
|
||||
<div className="max-w-10xl mx-auto px-4 sm:px-6 lg:px-8">
|
||||
<div className="flex h-16 items-center justify-between">
|
||||
<div className="flex items-center">
|
||||
<div className="flex-shrink-0">
|
||||
<h1 className="text-xl font-bold">Admin Panel</h1>
|
||||
</div>
|
||||
<div className="hidden sm:ml-6 sm:flex sm:space-x-8">
|
||||
{tabs.map((tab) => (
|
||||
<Link
|
||||
key={tab.name}
|
||||
href={tab.href}
|
||||
className={`${
|
||||
activeTab === tab.name
|
||||
? "border-indigo-500 text-indigo-600"
|
||||
: "border-transparent text-gray-500 hover:border-gray-300 hover:text-gray-700"
|
||||
} inline-flex items-center border-b-2 px-1 pt-1 text-sm font-medium`}
|
||||
onClick={() => setActiveTab(tab.name)}
|
||||
>
|
||||
{tab.name}
|
||||
</Link>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
<div className="sm:hidden">
|
||||
<button
|
||||
type="button"
|
||||
className="inline-flex items-center justify-center rounded-md p-2 text-gray-400 hover:bg-gray-100 hover:text-gray-500 focus:outline-none focus:ring-2 focus:ring-inset focus:ring-indigo-500"
|
||||
onClick={() => setMobileMenuOpen(!mobileMenuOpen)}
|
||||
>
|
||||
<span className="sr-only">Open main menu</span>
|
||||
{mobileMenuOpen ? (
|
||||
<XIcon className="block h-6 w-6" aria-hidden="true" />
|
||||
) : (
|
||||
<BinaryIcon className="block h-6 w-6" aria-hidden="true" />
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{mobileMenuOpen && (
|
||||
<div className="sm:hidden">
|
||||
<div className="space-y-1 pb-3 pt-2">
|
||||
{tabs.map((tab) => (
|
||||
<Link
|
||||
key={tab.name}
|
||||
href={tab.href}
|
||||
className={`${
|
||||
activeTab === tab.name
|
||||
? "border-indigo-500 bg-indigo-50 text-indigo-700"
|
||||
: "border-transparent text-gray-600 hover:border-gray-300 hover:bg-gray-50 hover:text-gray-800"
|
||||
} block border-l-4 py-2 pl-3 pr-4 text-base font-medium`}
|
||||
onClick={() => {
|
||||
setActiveTab(tab.name);
|
||||
setMobileMenuOpen(false);
|
||||
}}
|
||||
>
|
||||
{tab.name}
|
||||
</Link>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</nav>
|
||||
|
||||
<main className="py-10">
|
||||
<div className="mx-auto max-w-7xl px-4 sm:px-6 lg:px-8">{children}</div>
|
||||
</main>
|
||||
<div className="flex min-h-screen w-screen max-w-[1360px] flex-col lg:flex-row">
|
||||
<Sidebar linkGroups={sidebarLinkGroups} />
|
||||
<div className="flex-1 pl-4">{children}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
37
autogpt_platform/frontend/src/app/build/actions.ts
Normal file
37
autogpt_platform/frontend/src/app/build/actions.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
"use server";
|
||||
|
||||
import { revalidatePath } from "next/cache";
|
||||
import BackendAPI from "@/lib/autogpt-server-api/client";
|
||||
import { OttoQuery, OttoResponse } from "@/lib/autogpt-server-api/types";
|
||||
|
||||
const api = new BackendAPI();
|
||||
|
||||
export async function askOtto(
|
||||
query: string,
|
||||
conversationHistory: { query: string; response: string }[],
|
||||
includeGraphData: boolean,
|
||||
graphId?: string,
|
||||
): Promise<OttoResponse> {
|
||||
const messageId = `${Date.now()}-web`;
|
||||
|
||||
const ottoQuery: OttoQuery = {
|
||||
query,
|
||||
conversation_history: conversationHistory,
|
||||
message_id: messageId,
|
||||
include_graph_data: includeGraphData,
|
||||
graph_id: graphId,
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await api.askOtto(ottoQuery);
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error("Error in askOtto server action:", error);
|
||||
return {
|
||||
answer: error instanceof Error ? error.message : "Unknown error occurred",
|
||||
documents: [],
|
||||
success: false,
|
||||
error: true,
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import { Toaster } from "@/components/ui/toaster";
|
||||
import { IconType } from "@/components/ui/icons";
|
||||
import { Providers } from "@/app/providers";
|
||||
import TallyPopupSimple from "@/components/TallyPopup";
|
||||
import OttoChatWidget from "@/components/OttoChatWidget";
|
||||
|
||||
const inter = Inter({ subsets: ["latin"], variable: "--font-inter" });
|
||||
|
||||
@@ -116,6 +117,7 @@ export default async function RootLayout({
|
||||
)}
|
||||
<main className="w-full flex-grow">{children}</main>
|
||||
<TallyPopupSimple />
|
||||
<OttoChatWidget />
|
||||
</div>
|
||||
<Toaster />
|
||||
</Providers>
|
||||
|
||||
@@ -2,11 +2,13 @@
|
||||
import React, { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import { useParams, useRouter } from "next/navigation";
|
||||
|
||||
import { exportAsJSONFile } from "@/lib/utils";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import {
|
||||
GraphExecution,
|
||||
GraphExecutionID,
|
||||
GraphExecutionMeta,
|
||||
GraphID,
|
||||
GraphMeta,
|
||||
LibraryAgent,
|
||||
LibraryAgentID,
|
||||
@@ -61,22 +63,46 @@ export default function AgentRunsPage(): React.ReactElement {
|
||||
setSelectedSchedule(schedule);
|
||||
}, []);
|
||||
|
||||
const [graphVersions, setGraphVersions] = useState<Record<number, GraphMeta>>(
|
||||
{},
|
||||
);
|
||||
const getGraphVersion = useCallback(
|
||||
async (graphID: GraphID, version: number) => {
|
||||
if (graphVersions[version]) return graphVersions[version];
|
||||
|
||||
const graphVersion = await api.getGraph(graphID, version);
|
||||
setGraphVersions((prev) => ({
|
||||
...prev,
|
||||
[version]: graphVersion,
|
||||
}));
|
||||
return graphVersion;
|
||||
},
|
||||
[api, graphVersions],
|
||||
);
|
||||
|
||||
const fetchAgents = useCallback(() => {
|
||||
api.getLibraryAgent(agentID).then((agent) => {
|
||||
setAgent(agent);
|
||||
|
||||
api.getGraph(agent.agent_id).then(setGraph);
|
||||
getGraphVersion(agent.agent_id, agent.agent_version).then(
|
||||
(_graph) =>
|
||||
(graph && graph.version == _graph.version) || setGraph(_graph),
|
||||
);
|
||||
api.getGraphExecutions(agent.agent_id).then((agentRuns) => {
|
||||
const sortedRuns = agentRuns.toSorted(
|
||||
(a, b) => b.started_at - a.started_at,
|
||||
);
|
||||
setAgentRuns(sortedRuns);
|
||||
|
||||
// Preload the corresponding graph versions
|
||||
new Set(sortedRuns.map((run) => run.graph_version)).forEach((version) =>
|
||||
getGraphVersion(agent.agent_id, version),
|
||||
);
|
||||
|
||||
if (!selectedView.id && isFirstLoad && sortedRuns.length > 0) {
|
||||
// only for first load or first execution
|
||||
setIsFirstLoad(false);
|
||||
selectView({ type: "run", id: sortedRuns[0].execution_id });
|
||||
setSelectedRun(sortedRuns[0]);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -85,7 +111,7 @@ export default function AgentRunsPage(): React.ReactElement {
|
||||
.getGraphExecutionInfo(agent.agent_id, selectedView.id)
|
||||
.then(setSelectedRun);
|
||||
}
|
||||
}, [api, agentID, selectedView, isFirstLoad]);
|
||||
}, [api, agentID, getGraphVersion, graph, selectedView, isFirstLoad, agent]);
|
||||
|
||||
useEffect(() => {
|
||||
fetchAgents();
|
||||
@@ -95,17 +121,29 @@ export default function AgentRunsPage(): React.ReactElement {
|
||||
useEffect(() => {
|
||||
if (selectedView.type != "run" || !selectedView.id || !agent) return;
|
||||
|
||||
// pull partial data from "cache" while waiting for the rest to load
|
||||
const newSelectedRun = agentRuns.find(
|
||||
(run) => run.execution_id == selectedView.id,
|
||||
);
|
||||
if (selectedView.id !== selectedRun?.execution_id) {
|
||||
setSelectedRun(
|
||||
agentRuns.find((r) => r.execution_id == selectedView.id) ?? null,
|
||||
);
|
||||
}
|
||||
// Pull partial data from "cache" while waiting for the rest to load
|
||||
setSelectedRun(newSelectedRun ?? null);
|
||||
|
||||
api
|
||||
.getGraphExecutionInfo(agent.agent_id, selectedView.id)
|
||||
.then(setSelectedRun);
|
||||
}, [api, selectedView, agentID]);
|
||||
// Ensure corresponding graph version is available before rendering I/O
|
||||
api
|
||||
.getGraphExecutionInfo(agent.agent_id, selectedView.id)
|
||||
.then(async (run) => {
|
||||
await getGraphVersion(run.graph_id, run.graph_version);
|
||||
setSelectedRun(run);
|
||||
});
|
||||
}
|
||||
}, [
|
||||
api,
|
||||
selectedView,
|
||||
agent,
|
||||
agentRuns,
|
||||
selectedRun?.execution_id,
|
||||
getGraphVersion,
|
||||
]);
|
||||
|
||||
const fetchSchedules = useCallback(async () => {
|
||||
if (!agent) return;
|
||||
@@ -154,19 +192,40 @@ export default function AgentRunsPage(): React.ReactElement {
|
||||
[schedules, api],
|
||||
);
|
||||
|
||||
const downloadGraph = useCallback(
|
||||
async () =>
|
||||
agent &&
|
||||
// Export sanitized graph from backend
|
||||
api
|
||||
.getGraph(agent.agent_id, agent.agent_version, true)
|
||||
.then((graph) =>
|
||||
exportAsJSONFile(graph, `${graph.name}_v${graph.version}.json`),
|
||||
),
|
||||
[api, agent],
|
||||
);
|
||||
|
||||
const agentActions: ButtonAction[] = useMemo(
|
||||
() => [
|
||||
{
|
||||
label: "Open in builder",
|
||||
callback: () => agent && router.push(`/build?flowID=${agent.agent_id}`),
|
||||
},
|
||||
...(agent?.can_access_graph
|
||||
? [
|
||||
{
|
||||
label: "Open in builder",
|
||||
callback: () =>
|
||||
agent &&
|
||||
router.push(
|
||||
`/build?flowID=${agent.agent_id}&flowVersion=${agent.agent_version}`,
|
||||
),
|
||||
},
|
||||
{ label: "Export agent to file", callback: downloadGraph },
|
||||
]
|
||||
: []),
|
||||
{
|
||||
label: "Delete agent",
|
||||
variant: "destructive",
|
||||
callback: () => setAgentDeleteDialogOpen(true),
|
||||
},
|
||||
],
|
||||
[agent, router],
|
||||
[agent, router, downloadGraph],
|
||||
);
|
||||
|
||||
if (!agent || !graph) {
|
||||
@@ -205,7 +264,7 @@ export default function AgentRunsPage(): React.ReactElement {
|
||||
{(selectedView.type == "run" && selectedView.id ? (
|
||||
selectedRun && (
|
||||
<AgentRunDetailsView
|
||||
graph={graph}
|
||||
graph={graphVersions[selectedRun.graph_version] ?? graph}
|
||||
run={selectedRun}
|
||||
agentActions={agentActions}
|
||||
deleteRun={() => setConfirmingDeleteAgentRun(selectedRun)}
|
||||
|
||||
@@ -49,11 +49,14 @@ export async function login(values: z.infer<typeof loginFormSchema>) {
|
||||
}
|
||||
|
||||
await api.createUser();
|
||||
if (!(await api.getUserOnboarding()).isCompleted) {
|
||||
revalidatePath("/onboarding", "layout");
|
||||
redirect("/onboarding");
|
||||
}
|
||||
|
||||
if (data.session) {
|
||||
await supabase.auth.setSession(data.session);
|
||||
}
|
||||
console.log("Logged in");
|
||||
revalidatePath("/", "layout");
|
||||
redirect("/");
|
||||
});
|
||||
@@ -86,7 +89,10 @@ export async function providerLogin(provider: LoginProvider) {
|
||||
}
|
||||
|
||||
await api.createUser();
|
||||
console.log("Logged in");
|
||||
if (!(await api.getUserOnboarding()).isCompleted) {
|
||||
revalidatePath("/onboarding", "layout");
|
||||
redirect("/onboarding");
|
||||
}
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user