mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-04-08 03:00:28 -04:00
Merge branch 'dev' of github.com:Significant-Gravitas/AutoGPT into zamilmajdy/fix-add-list-self-loop-behavior
This commit is contained in:
24
.github/dependabot.yml
vendored
24
.github/dependabot.yml
vendored
@@ -129,30 +129,6 @@ updates:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Submodules
|
||||
- package-ecosystem: "gitsubmodule"
|
||||
directory: "autogpt_platform/supabase"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 1
|
||||
target-branch: "dev"
|
||||
commit-message:
|
||||
prefix: "chore(platform/deps)"
|
||||
prefix-development: "chore(platform/deps-dev)"
|
||||
groups:
|
||||
production-dependencies:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
development-dependencies:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
|
||||
|
||||
# Docs
|
||||
- package-ecosystem: 'pip'
|
||||
directory: "docs/"
|
||||
|
||||
2
.github/workflows/platform-frontend-ci.yml
vendored
2
.github/workflows/platform-frontend-ci.yml
vendored
@@ -82,7 +82,7 @@ jobs:
|
||||
|
||||
- name: Copy default supabase .env
|
||||
run: |
|
||||
cp ../supabase/docker/.env.example ../.env
|
||||
cp ../.env.example ../.env
|
||||
|
||||
- name: Copy backend .env
|
||||
run: |
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,6 +1,3 @@
|
||||
[submodule "classic/forge/tests/vcr_cassettes"]
|
||||
path = classic/forge/tests/vcr_cassettes
|
||||
url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
|
||||
[submodule "autogpt_platform/supabase"]
|
||||
path = autogpt_platform/supabase
|
||||
url = https://github.com/supabase/supabase.git
|
||||
|
||||
123
autogpt_platform/.env.example
Normal file
123
autogpt_platform/.env.example
Normal file
@@ -0,0 +1,123 @@
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=supabase
|
||||
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY=your-encryption-key-32-chars-min
|
||||
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
|
||||
############
|
||||
# Supavisor -- Database pooler
|
||||
############
|
||||
POOLER_PROXY_PORT_TRANSACTION=6543
|
||||
POOLER_DEFAULT_POOL_SIZE=20
|
||||
POOLER_MAX_CLIENT_CONN=100
|
||||
POOLER_TENANT_ID=your-tenant-id
|
||||
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3000
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
ENABLE_ANONYMOUS_USERS=false
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3000
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
# Add your OpenAI API key to enable SQL Editor Assistant
|
||||
OPENAI_API_KEY=
|
||||
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
||||
@@ -22,35 +22,29 @@ To run the AutoGPT Platform, follow these steps:
|
||||
|
||||
2. Run the following command:
|
||||
```
|
||||
git submodule update --init --recursive --progress
|
||||
cp .env.example .env
|
||||
```
|
||||
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
|
||||
This command will copy the `.env.example` file to `.env`. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
3. Run the following command:
|
||||
```
|
||||
cp supabase/docker/.env.example .env
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
4. Run the following command:
|
||||
```
|
||||
docker compose up -d
|
||||
```
|
||||
This command will start all the necessary backend services defined in the `docker-compose.yml` file in detached mode.
|
||||
|
||||
5. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
4. Navigate to `frontend` within the `autogpt_platform` directory:
|
||||
```
|
||||
cd frontend
|
||||
```
|
||||
You will need to run your frontend application separately on your local machine.
|
||||
|
||||
6. Run the following command:
|
||||
5. Run the following command:
|
||||
```
|
||||
cp .env.example .env.local
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env.local` in the `frontend` directory. You can modify the `.env.local` within this folder to add your own environment variables for the frontend application.
|
||||
|
||||
7. Run the following command:
|
||||
6. Run the following command:
|
||||
```
|
||||
npm install
|
||||
npm run dev
|
||||
@@ -61,7 +55,7 @@ To run the AutoGPT Platform, follow these steps:
|
||||
yarn install && yarn dev
|
||||
```
|
||||
|
||||
8. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
7. Open your browser and navigate to `http://localhost:3000` to access the AutoGPT Platform frontend.
|
||||
|
||||
### Docker Compose Commands
|
||||
|
||||
|
||||
40
autogpt_platform/autogpt_libs/poetry.lock
generated
40
autogpt_platform/autogpt_libs/poetry.lock
generated
@@ -1476,30 +1476,30 @@ pyasn1 = ">=0.1.3"
|
||||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.9.6"
|
||||
version = "0.9.10"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "ruff-0.9.6-py3-none-linux_armv6l.whl", hash = "sha256:2f218f356dd2d995839f1941322ff021c72a492c470f0b26a34f844c29cdf5ba"},
|
||||
{file = "ruff-0.9.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b908ff4df65dad7b251c9968a2e4560836d8f5487c2f0cc238321ed951ea0504"},
|
||||
{file = "ruff-0.9.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b109c0ad2ececf42e75fa99dc4043ff72a357436bb171900714a9ea581ddef83"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1de4367cca3dac99bcbd15c161404e849bb0bfd543664db39232648dc00112dc"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3ee4d7c2c92ddfdaedf0bf31b2b176fa7aa8950efc454628d477394d35638b"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc1edd1775270e6aa2386119aea692039781429f0be1e0949ea5884e011aa8e"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4a091729086dffa4bd070aa5dab7e39cc6b9d62eb2bef8f3d91172d30d599666"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1bbc6808bf7b15796cef0815e1dfb796fbd383e7dbd4334709642649625e7c5"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:589d1d9f25b5754ff230dce914a174a7c951a85a4e9270613a2b74231fdac2f5"},
|
||||
{file = "ruff-0.9.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc61dd5131742e21103fbbdcad683a8813be0e3c204472d520d9a5021ca8b217"},
|
||||
{file = "ruff-0.9.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5e2d9126161d0357e5c8f30b0bd6168d2c3872372f14481136d13de9937f79b6"},
|
||||
{file = "ruff-0.9.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:68660eab1a8e65babb5229a1f97b46e3120923757a68b5413d8561f8a85d4897"},
|
||||
{file = "ruff-0.9.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c4cae6c4cc7b9b4017c71114115db0445b00a16de3bcde0946273e8392856f08"},
|
||||
{file = "ruff-0.9.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19f505b643228b417c1111a2a536424ddde0db4ef9023b9e04a46ed8a1cb4656"},
|
||||
{file = "ruff-0.9.6-py3-none-win32.whl", hash = "sha256:194d8402bceef1b31164909540a597e0d913c0e4952015a5b40e28c146121b5d"},
|
||||
{file = "ruff-0.9.6-py3-none-win_amd64.whl", hash = "sha256:03482d5c09d90d4ee3f40d97578423698ad895c87314c4de39ed2af945633caa"},
|
||||
{file = "ruff-0.9.6-py3-none-win_arm64.whl", hash = "sha256:0e2bb706a2be7ddfea4a4af918562fdc1bcb16df255e5fa595bbd800ce322a5a"},
|
||||
{file = "ruff-0.9.6.tar.gz", hash = "sha256:81761592f72b620ec8fa1068a6fd00e98a5ebee342a3642efd84454f3031dca9"},
|
||||
{file = "ruff-0.9.10-py3-none-linux_armv6l.whl", hash = "sha256:eb4d25532cfd9fe461acc83498361ec2e2252795b4f40b17e80692814329e42d"},
|
||||
{file = "ruff-0.9.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:188a6638dab1aa9bb6228a7302387b2c9954e455fb25d6b4470cb0641d16759d"},
|
||||
{file = "ruff-0.9.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5284dcac6b9dbc2fcb71fdfc26a217b2ca4ede6ccd57476f52a587451ebe450d"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47678f39fa2a3da62724851107f438c8229a3470f533894b5568a39b40029c0c"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99713a6e2766b7a17147b309e8c915b32b07a25c9efd12ada79f217c9c778b3e"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524ee184d92f7c7304aa568e2db20f50c32d1d0caa235d8ddf10497566ea1a12"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df92aeac30af821f9acf819fc01b4afc3dfb829d2782884f8739fb52a8119a16"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de42e4edc296f520bb84954eb992a07a0ec5a02fecb834498415908469854a52"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d257f95b65806104b6b1ffca0ea53f4ef98454036df65b1eda3693534813ecd1"},
|
||||
{file = "ruff-0.9.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60dec7201c0b10d6d11be00e8f2dbb6f40ef1828ee75ed739923799513db24c"},
|
||||
{file = "ruff-0.9.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d838b60007da7a39c046fcdd317293d10b845001f38bcb55ba766c3875b01e43"},
|
||||
{file = "ruff-0.9.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ccaf903108b899beb8e09a63ffae5869057ab649c1e9231c05ae354ebc62066c"},
|
||||
{file = "ruff-0.9.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f9567d135265d46e59d62dc60c0bfad10e9a6822e231f5b24032dba5a55be6b5"},
|
||||
{file = "ruff-0.9.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5f202f0d93738c28a89f8ed9eaba01b7be339e5d8d642c994347eaa81c6d75b8"},
|
||||
{file = "ruff-0.9.10-py3-none-win32.whl", hash = "sha256:bfb834e87c916521ce46b1788fbb8484966e5113c02df216680102e9eb960029"},
|
||||
{file = "ruff-0.9.10-py3-none-win_amd64.whl", hash = "sha256:f2160eeef3031bf4b17df74e307d4c5fb689a6f3a26a2de3f7ef4044e3c484f1"},
|
||||
{file = "ruff-0.9.10-py3-none-win_arm64.whl", hash = "sha256:5fd804c0327a5e5ea26615550e706942f348b197d5475ff34c19733aee4b2e69"},
|
||||
{file = "ruff-0.9.10.tar.gz", hash = "sha256:9bacb735d7bada9cfb0f2c227d3658fc443d90a727b47f206fb33f52f3c0eac7"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1929,4 +1929,4 @@ type = ["pytest-mypy"]
|
||||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10,<4.0"
|
||||
content-hash = "f5cd0d1dafeb2b5c97d0ef27bef8a2235d4a1f54e3c60583d05ef582ac49c0e6"
|
||||
content-hash = "931772287f71c539575d601e6398423bf68e09ca87ae1a144057c7f5707cf978"
|
||||
|
||||
@@ -21,7 +21,7 @@ supabase = "^2.13.0"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
redis = "^5.2.1"
|
||||
ruff = "^0.9.6"
|
||||
ruff = "^0.9.10"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
@@ -39,6 +39,7 @@ from backend.util.settings import Settings
|
||||
settings = Settings()
|
||||
stripe.api_key = settings.secrets.stripe_api_key
|
||||
logger = logging.getLogger(__name__)
|
||||
base_url = settings.config.frontend_base_url or settings.config.platform_base_url
|
||||
|
||||
|
||||
class UserCreditBase(ABC):
|
||||
@@ -185,6 +186,14 @@ class UserCreditBase(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
async def create_billing_portal_session(user_id: str) -> str:
|
||||
session = stripe.billing_portal.Session.create(
|
||||
customer=await get_stripe_customer_id(user_id),
|
||||
return_url=base_url + "/profile/credits",
|
||||
)
|
||||
return session.url
|
||||
|
||||
@staticmethod
|
||||
def time_now() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
@@ -765,10 +774,8 @@ class UserCredit(UserCreditBase):
|
||||
ui_mode="hosted",
|
||||
payment_intent_data={"setup_future_usage": "off_session"},
|
||||
saved_payment_method_options={"payment_method_save": "enabled"},
|
||||
success_url=settings.config.frontend_base_url
|
||||
+ "/profile/credits?topup=success",
|
||||
cancel_url=settings.config.frontend_base_url
|
||||
+ "/profile/credits?topup=cancel",
|
||||
success_url=base_url + "/profile/credits?topup=success",
|
||||
cancel_url=base_url + "/profile/credits?topup=cancel",
|
||||
allow_promotion_codes=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
@@ -19,9 +18,9 @@ from pydantic.fields import Field, computed_field
|
||||
|
||||
from backend.blocks.agent import AgentExecutorBlock
|
||||
from backend.blocks.basic import AgentInputBlock, AgentOutputBlock
|
||||
from backend.util import type
|
||||
from backend.util import type as type_utils
|
||||
|
||||
from .block import BlockInput, BlockType, get_block, get_blocks
|
||||
from .block import Block, BlockInput, BlockSchema, BlockType, get_block, get_blocks
|
||||
from .db import BaseDbModel, transaction
|
||||
from .execution import ExecutionResult, ExecutionStatus
|
||||
from .includes import AGENT_GRAPH_INCLUDE, AGENT_NODE_INCLUDE
|
||||
@@ -71,13 +70,20 @@ class NodeModel(Node):
|
||||
|
||||
webhook: Optional[Webhook] = None
|
||||
|
||||
@property
|
||||
def block(self) -> Block[BlockSchema, BlockSchema]:
|
||||
block = get_block(self.block_id)
|
||||
if not block:
|
||||
raise ValueError(f"Block #{self.block_id} does not exist")
|
||||
return block
|
||||
|
||||
@staticmethod
|
||||
def from_db(node: AgentNode) -> "NodeModel":
|
||||
def from_db(node: AgentNode, for_export: bool = False) -> "NodeModel":
|
||||
obj = NodeModel(
|
||||
id=node.id,
|
||||
block_id=node.agentBlockId,
|
||||
input_default=type.convert(node.constantInput, dict[str, Any]),
|
||||
metadata=type.convert(node.metadata, dict[str, Any]),
|
||||
input_default=type_utils.convert(node.constantInput, dict[str, Any]),
|
||||
metadata=type_utils.convert(node.metadata, dict[str, Any]),
|
||||
graph_id=node.agentGraphId,
|
||||
graph_version=node.agentGraphVersion,
|
||||
webhook_id=node.webhookId,
|
||||
@@ -85,6 +91,8 @@ class NodeModel(Node):
|
||||
)
|
||||
obj.input_links = [Link.from_db(link) for link in node.Input or []]
|
||||
obj.output_links = [Link.from_db(link) for link in node.Output or []]
|
||||
if for_export:
|
||||
return obj.stripped_for_export()
|
||||
return obj
|
||||
|
||||
def is_triggered_by_event_type(self, event_type: str) -> bool:
|
||||
@@ -103,6 +111,51 @@ class NodeModel(Node):
|
||||
if event_filter[k] is True
|
||||
]
|
||||
|
||||
def stripped_for_export(self) -> "NodeModel":
|
||||
"""
|
||||
Returns a copy of the node model, stripped of any non-transferable properties
|
||||
"""
|
||||
stripped_node = self.model_copy(deep=True)
|
||||
# Remove credentials from node input
|
||||
if stripped_node.input_default:
|
||||
stripped_node.input_default = NodeModel._filter_secrets_from_node_input(
|
||||
stripped_node.input_default, self.block.input_schema.jsonschema()
|
||||
)
|
||||
|
||||
if (
|
||||
stripped_node.block.block_type == BlockType.INPUT
|
||||
and "value" in stripped_node.input_default
|
||||
):
|
||||
stripped_node.input_default["value"] = ""
|
||||
|
||||
# Remove webhook info
|
||||
stripped_node.webhook_id = None
|
||||
stripped_node.webhook = None
|
||||
|
||||
return stripped_node
|
||||
|
||||
@staticmethod
|
||||
def _filter_secrets_from_node_input(
|
||||
input_data: dict[str, Any], schema: dict[str, Any] | None
|
||||
) -> dict[str, Any]:
|
||||
sensitive_keys = ["credentials", "api_key", "password", "token", "secret"]
|
||||
field_schemas = schema.get("properties", {}) if schema else {}
|
||||
result = {}
|
||||
for key, value in input_data.items():
|
||||
field_schema: dict | None = field_schemas.get(key)
|
||||
if (field_schema and field_schema.get("secret", False)) or any(
|
||||
sensitive_key in key.lower() for sensitive_key in sensitive_keys
|
||||
):
|
||||
# This is a secret value -> filter this key-value pair out
|
||||
continue
|
||||
elif isinstance(value, dict):
|
||||
result[key] = NodeModel._filter_secrets_from_node_input(
|
||||
value, field_schema
|
||||
)
|
||||
else:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
# Fix 2-way reference Node <-> Webhook
|
||||
Webhook.model_rebuild()
|
||||
@@ -129,7 +182,7 @@ class GraphExecutionMeta(BaseDbModel):
|
||||
total_run_time = duration
|
||||
|
||||
try:
|
||||
stats = type.convert(_graph_exec.stats or {}, dict[str, Any])
|
||||
stats = type_utils.convert(_graph_exec.stats or {}, dict[str, Any])
|
||||
except ValueError:
|
||||
stats = {}
|
||||
|
||||
@@ -201,10 +254,9 @@ class GraphExecution(GraphExecutionMeta):
|
||||
)
|
||||
|
||||
|
||||
class Graph(BaseDbModel):
|
||||
class BaseGraph(BaseDbModel):
|
||||
version: int = 1
|
||||
is_active: bool = True
|
||||
is_template: bool = False
|
||||
name: str
|
||||
description: str
|
||||
nodes: list[Node] = []
|
||||
@@ -267,6 +319,10 @@ class Graph(BaseDbModel):
|
||||
}
|
||||
|
||||
|
||||
class Graph(BaseGraph):
|
||||
sub_graphs: list[BaseGraph] = [] # Flattened sub-graphs, only used in export
|
||||
|
||||
|
||||
class GraphModel(Graph):
|
||||
user_id: str
|
||||
nodes: list[NodeModel] = [] # type: ignore
|
||||
@@ -290,31 +346,55 @@ class GraphModel(Graph):
|
||||
Reassigns all IDs in the graph to new UUIDs.
|
||||
This method can be used before storing a new graph to the database.
|
||||
"""
|
||||
if reassign_graph_id:
|
||||
graph_id_map = {
|
||||
self.id: str(uuid.uuid4()),
|
||||
**{sub_graph.id: str(uuid.uuid4()) for sub_graph in self.sub_graphs},
|
||||
}
|
||||
else:
|
||||
graph_id_map = {}
|
||||
|
||||
self._reassign_ids(self, user_id, graph_id_map)
|
||||
for sub_graph in self.sub_graphs:
|
||||
self._reassign_ids(sub_graph, user_id, graph_id_map)
|
||||
|
||||
@staticmethod
|
||||
def _reassign_ids(
|
||||
graph: BaseGraph,
|
||||
user_id: str,
|
||||
graph_id_map: dict[str, str],
|
||||
):
|
||||
|
||||
# Reassign Graph ID
|
||||
id_map = {node.id: str(uuid.uuid4()) for node in self.nodes}
|
||||
if reassign_graph_id:
|
||||
self.id = str(uuid.uuid4())
|
||||
if graph.id in graph_id_map:
|
||||
graph.id = graph_id_map[graph.id]
|
||||
|
||||
# Reassign Node IDs
|
||||
for node in self.nodes:
|
||||
id_map = {node.id: str(uuid.uuid4()) for node in graph.nodes}
|
||||
for node in graph.nodes:
|
||||
node.id = id_map[node.id]
|
||||
|
||||
# Reassign Link IDs
|
||||
for link in self.links:
|
||||
for link in graph.links:
|
||||
link.source_id = id_map[link.source_id]
|
||||
link.sink_id = id_map[link.sink_id]
|
||||
|
||||
# Reassign User IDs for agent blocks
|
||||
for node in self.nodes:
|
||||
for node in graph.nodes:
|
||||
if node.block_id != AgentExecutorBlock().id:
|
||||
continue
|
||||
node.input_default["user_id"] = user_id
|
||||
node.input_default.setdefault("data", {})
|
||||
|
||||
self.validate_graph()
|
||||
if (graph_id := node.input_default.get("graph_id")) in graph_id_map:
|
||||
node.input_default["graph_id"] = graph_id_map[graph_id]
|
||||
|
||||
def validate_graph(self, for_run: bool = False):
|
||||
self._validate_graph(self, for_run)
|
||||
for sub_graph in self.sub_graphs:
|
||||
self._validate_graph(sub_graph, for_run)
|
||||
|
||||
@staticmethod
|
||||
def _validate_graph(graph: BaseGraph, for_run: bool = False):
|
||||
def sanitize(name):
|
||||
sanitized_name = name.split("_#_")[0].split("_@_")[0].split("_$_")[0]
|
||||
if sanitized_name.startswith("tools_^_"):
|
||||
@@ -326,11 +406,11 @@ class GraphModel(Graph):
|
||||
agent_nodes = set()
|
||||
nodes_block = {
|
||||
node.id: block
|
||||
for node in self.nodes
|
||||
for node in graph.nodes
|
||||
if (block := get_block(node.block_id)) is not None
|
||||
}
|
||||
|
||||
for node in self.nodes:
|
||||
for node in graph.nodes:
|
||||
if (block := nodes_block.get(node.id)) is None:
|
||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||
|
||||
@@ -343,11 +423,11 @@ class GraphModel(Graph):
|
||||
|
||||
input_links = defaultdict(list)
|
||||
|
||||
for link in self.links:
|
||||
for link in graph.links:
|
||||
input_links[link.sink_id].append(link)
|
||||
|
||||
# Nodes: required fields are filled or connected and dependencies are satisfied
|
||||
for node in self.nodes:
|
||||
for node in graph.nodes:
|
||||
if (block := nodes_block.get(node.id)) is None:
|
||||
raise ValueError(f"Invalid block {node.block_id} for node #{node.id}")
|
||||
|
||||
@@ -408,7 +488,7 @@ class GraphModel(Graph):
|
||||
f"Node {block.name} #{node.id}: Field `{field_name}` requires [{', '.join(missing_deps)}] to be set"
|
||||
)
|
||||
|
||||
node_map = {v.id: v for v in self.nodes}
|
||||
node_map = {v.id: v for v in graph.nodes}
|
||||
|
||||
def is_static_output_block(nid: str) -> bool:
|
||||
bid = node_map[nid].block_id
|
||||
@@ -416,7 +496,7 @@ class GraphModel(Graph):
|
||||
return b.static_output if b else False
|
||||
|
||||
# Links: links are connected and the connected pin data type are compatible.
|
||||
for link in self.links:
|
||||
for link in graph.links:
|
||||
source = (link.source_id, link.source_name)
|
||||
sink = (link.sink_id, link.sink_name)
|
||||
prefix = f"Link {source} <-> {sink}"
|
||||
@@ -457,18 +537,20 @@ class GraphModel(Graph):
|
||||
link.is_static = True # Each value block output should be static.
|
||||
|
||||
@staticmethod
|
||||
def from_db(graph: AgentGraph, for_export: bool = False):
|
||||
def from_db(
|
||||
graph: AgentGraph,
|
||||
for_export: bool = False,
|
||||
sub_graphs: list[AgentGraph] | None = None,
|
||||
):
|
||||
return GraphModel(
|
||||
id=graph.id,
|
||||
user_id=graph.userId,
|
||||
user_id=graph.userId if not for_export else "",
|
||||
version=graph.version,
|
||||
is_active=graph.isActive,
|
||||
is_template=graph.isTemplate,
|
||||
name=graph.name or "",
|
||||
description=graph.description or "",
|
||||
nodes=[
|
||||
NodeModel.from_db(GraphModel._process_node(node, for_export))
|
||||
for node in graph.AgentNodes or []
|
||||
NodeModel.from_db(node, for_export) for node in graph.AgentNodes or []
|
||||
],
|
||||
links=list(
|
||||
{
|
||||
@@ -477,59 +559,12 @@ class GraphModel(Graph):
|
||||
for link in (node.Input or []) + (node.Output or [])
|
||||
}
|
||||
),
|
||||
sub_graphs=[
|
||||
GraphModel.from_db(sub_graph, for_export)
|
||||
for sub_graph in sub_graphs or []
|
||||
],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _process_node(node: AgentNode, for_export: bool) -> AgentNode:
|
||||
if for_export:
|
||||
# Remove credentials from node input
|
||||
if node.constantInput:
|
||||
constant_input = type.convert(node.constantInput, dict[str, Any])
|
||||
constant_input = GraphModel._hide_node_input_credentials(constant_input)
|
||||
node.constantInput = Json(constant_input)
|
||||
|
||||
# Remove webhook info
|
||||
node.webhookId = None
|
||||
node.Webhook = None
|
||||
|
||||
return node
|
||||
|
||||
@staticmethod
|
||||
def _hide_node_input_credentials(input_data: dict[str, Any]) -> dict[str, Any]:
|
||||
sensitive_keys = ["credentials", "api_key", "password", "token", "secret"]
|
||||
result = {}
|
||||
for key, value in input_data.items():
|
||||
if isinstance(value, dict):
|
||||
result[key] = GraphModel._hide_node_input_credentials(value)
|
||||
elif isinstance(value, str) and any(
|
||||
sensitive_key in key.lower() for sensitive_key in sensitive_keys
|
||||
):
|
||||
# Skip this key-value pair in the result
|
||||
continue
|
||||
else:
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
def clean_graph(self):
|
||||
blocks = [block() for block in get_blocks().values()]
|
||||
|
||||
input_blocks = [
|
||||
node
|
||||
for node in self.nodes
|
||||
if next(
|
||||
(
|
||||
b
|
||||
for b in blocks
|
||||
if b.id == node.block_id and b.block_type == BlockType.INPUT
|
||||
),
|
||||
None,
|
||||
)
|
||||
]
|
||||
|
||||
for node in self.nodes:
|
||||
if any(input_block.id == node.id for input_block in input_blocks):
|
||||
node.input_default["value"] = ""
|
||||
|
||||
|
||||
# --------------------- CRUD functions --------------------- #
|
||||
|
||||
@@ -559,14 +594,14 @@ async def set_node_webhook(node_id: str, webhook_id: str | None) -> NodeModel:
|
||||
|
||||
async def get_graphs(
|
||||
user_id: str,
|
||||
filter_by: Literal["active", "template"] | None = "active",
|
||||
filter_by: Literal["active"] | None = "active",
|
||||
) -> list[GraphModel]:
|
||||
"""
|
||||
Retrieves graph metadata objects.
|
||||
Default behaviour is to get all currently active graphs.
|
||||
|
||||
Args:
|
||||
filter_by: An optional filter to either select templates or active graphs.
|
||||
filter_by: An optional filter to either select graphs.
|
||||
user_id: The ID of the user that owns the graph.
|
||||
|
||||
Returns:
|
||||
@@ -576,8 +611,6 @@ async def get_graphs(
|
||||
|
||||
if filter_by == "active":
|
||||
where_clause["isActive"] = True
|
||||
elif filter_by == "template":
|
||||
where_clause["isTemplate"] = True
|
||||
|
||||
graphs = await AgentGraph.prisma().find_many(
|
||||
where=where_clause,
|
||||
@@ -666,21 +699,18 @@ async def get_graph_metadata(graph_id: str, version: int | None = None) -> Graph
|
||||
description=graph.description or "",
|
||||
version=graph.version,
|
||||
is_active=graph.isActive,
|
||||
is_template=graph.isTemplate,
|
||||
)
|
||||
|
||||
|
||||
async def get_graph(
|
||||
graph_id: str,
|
||||
version: int | None = None,
|
||||
template: bool = False, # note: currently not in use; TODO: remove from DB entirely
|
||||
user_id: str | None = None,
|
||||
for_export: bool = False,
|
||||
) -> GraphModel | None:
|
||||
"""
|
||||
Retrieves a graph from the DB.
|
||||
Defaults to the version with `is_active` if `version` is not passed,
|
||||
or the latest version with `is_template` if `template=True`.
|
||||
Defaults to the version with `is_active` if `version` is not passed.
|
||||
|
||||
Returns `None` if the record is not found.
|
||||
"""
|
||||
@@ -690,8 +720,6 @@ async def get_graph(
|
||||
|
||||
if version is not None:
|
||||
where_clause["version"] = version
|
||||
elif not template:
|
||||
where_clause["isActive"] = True
|
||||
|
||||
graph = await AgentGraph.prisma().find_first(
|
||||
where=where_clause,
|
||||
@@ -715,9 +743,62 @@ async def get_graph(
|
||||
):
|
||||
return None
|
||||
|
||||
if for_export:
|
||||
sub_graphs = await _get_sub_graphs(graph)
|
||||
return GraphModel.from_db(
|
||||
graph=graph,
|
||||
sub_graphs=sub_graphs,
|
||||
for_export=for_export,
|
||||
)
|
||||
|
||||
return GraphModel.from_db(graph, for_export)
|
||||
|
||||
|
||||
async def _get_sub_graphs(graph: AgentGraph) -> list[AgentGraph]:
|
||||
"""
|
||||
Iteratively fetches all sub-graphs of a given graph, and flattens them into a list.
|
||||
This call involves a DB fetch in batch, breadth-first, per-level of graph depth.
|
||||
On each DB fetch we will only fetch the sub-graphs that are not already in the list.
|
||||
"""
|
||||
sub_graphs = {graph.id: graph}
|
||||
search_graphs = [graph]
|
||||
agent_block_id = AgentExecutorBlock().id
|
||||
|
||||
while search_graphs:
|
||||
sub_graph_ids = [
|
||||
(graph_id, graph_version)
|
||||
for graph in search_graphs
|
||||
for node in graph.AgentNodes or []
|
||||
if (
|
||||
node.AgentBlock
|
||||
and node.AgentBlock.id == agent_block_id
|
||||
and (graph_id := dict(node.constantInput).get("graph_id"))
|
||||
and (graph_version := dict(node.constantInput).get("graph_version"))
|
||||
)
|
||||
]
|
||||
if not sub_graph_ids:
|
||||
break
|
||||
|
||||
graphs = await AgentGraph.prisma().find_many(
|
||||
where={
|
||||
"OR": [
|
||||
{
|
||||
"id": graph_id,
|
||||
"version": graph_version,
|
||||
"userId": graph.userId, # Ensure the sub-graph is owned by the same user
|
||||
}
|
||||
for graph_id, graph_version in sub_graph_ids
|
||||
] # type: ignore
|
||||
},
|
||||
include=AGENT_GRAPH_INCLUDE,
|
||||
)
|
||||
|
||||
search_graphs = [graph for graph in graphs if graph.id not in sub_graphs]
|
||||
sub_graphs.update({graph.id: graph for graph in search_graphs})
|
||||
|
||||
return [g for g in sub_graphs.values() if g.id != graph.id]
|
||||
|
||||
|
||||
async def get_connected_output_nodes(node_id: str) -> list[tuple[Link, Node]]:
|
||||
links = await AgentNodeLink.prisma().find_many(
|
||||
where={"agentNodeSourceId": node_id},
|
||||
@@ -781,50 +862,56 @@ async def create_graph(graph: Graph, user_id: str) -> GraphModel:
|
||||
async with transaction() as tx:
|
||||
await __create_graph(tx, graph, user_id)
|
||||
|
||||
if created_graph := await get_graph(
|
||||
graph.id, graph.version, template=graph.is_template, user_id=user_id
|
||||
):
|
||||
if created_graph := await get_graph(graph.id, graph.version, user_id=user_id):
|
||||
return created_graph
|
||||
|
||||
raise ValueError(f"Created graph {graph.id} v{graph.version} is not in DB")
|
||||
|
||||
|
||||
async def __create_graph(tx, graph: Graph, user_id: str):
|
||||
await AgentGraph.prisma(tx).create(
|
||||
data={
|
||||
"id": graph.id,
|
||||
"version": graph.version,
|
||||
"name": graph.name,
|
||||
"description": graph.description,
|
||||
"isTemplate": graph.is_template,
|
||||
"isActive": graph.is_active,
|
||||
"userId": user_id,
|
||||
"AgentNodes": {
|
||||
"create": [
|
||||
{
|
||||
"id": node.id,
|
||||
"agentBlockId": node.block_id,
|
||||
"constantInput": Json(node.input_default),
|
||||
"metadata": Json(node.metadata),
|
||||
}
|
||||
for node in graph.nodes
|
||||
]
|
||||
},
|
||||
}
|
||||
graphs = [graph] + graph.sub_graphs
|
||||
|
||||
await AgentGraph.prisma(tx).create_many(
|
||||
data=[
|
||||
{
|
||||
"id": graph.id,
|
||||
"version": graph.version,
|
||||
"name": graph.name,
|
||||
"description": graph.description,
|
||||
"isActive": graph.is_active,
|
||||
"userId": user_id,
|
||||
}
|
||||
for graph in graphs
|
||||
]
|
||||
)
|
||||
|
||||
await asyncio.gather(
|
||||
*[
|
||||
AgentNodeLink.prisma(tx).create(
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"sourceName": link.source_name,
|
||||
"sinkName": link.sink_name,
|
||||
"agentNodeSourceId": link.source_id,
|
||||
"agentNodeSinkId": link.sink_id,
|
||||
"isStatic": link.is_static,
|
||||
}
|
||||
)
|
||||
await AgentNode.prisma(tx).create_many(
|
||||
data=[
|
||||
{
|
||||
"id": node.id,
|
||||
"agentGraphId": graph.id,
|
||||
"agentGraphVersion": graph.version,
|
||||
"agentBlockId": node.block_id,
|
||||
"constantInput": Json(node.input_default),
|
||||
"metadata": Json(node.metadata),
|
||||
"webhookId": node.webhook_id,
|
||||
}
|
||||
for graph in graphs
|
||||
for node in graph.nodes
|
||||
]
|
||||
)
|
||||
|
||||
await AgentNodeLink.prisma(tx).create_many(
|
||||
data=[
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"sourceName": link.source_name,
|
||||
"sinkName": link.sink_name,
|
||||
"agentNodeSourceId": link.source_id,
|
||||
"agentNodeSinkId": link.sink_id,
|
||||
"isStatic": link.is_static,
|
||||
}
|
||||
for graph in graphs
|
||||
for link in graph.links
|
||||
]
|
||||
)
|
||||
|
||||
@@ -35,7 +35,8 @@ class QueueType(Enum):
|
||||
|
||||
|
||||
class BaseNotificationData(BaseModel):
|
||||
pass
|
||||
class Config:
|
||||
extra = "allow"
|
||||
|
||||
|
||||
class AgentRunData(BaseNotificationData):
|
||||
@@ -341,6 +342,43 @@ class NotificationPreference(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class UserNotificationEventDTO(BaseModel):
|
||||
type: NotificationType
|
||||
data: dict
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
@staticmethod
|
||||
def from_db(model: NotificationEvent) -> "UserNotificationEventDTO":
|
||||
return UserNotificationEventDTO(
|
||||
type=model.type,
|
||||
data=dict(model.data),
|
||||
created_at=model.createdAt,
|
||||
updated_at=model.updatedAt,
|
||||
)
|
||||
|
||||
|
||||
class UserNotificationBatchDTO(BaseModel):
|
||||
user_id: str
|
||||
type: NotificationType
|
||||
notifications: list[UserNotificationEventDTO]
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
@staticmethod
|
||||
def from_db(model: UserNotificationBatch) -> "UserNotificationBatchDTO":
|
||||
return UserNotificationBatchDTO(
|
||||
user_id=model.userId,
|
||||
type=model.type,
|
||||
notifications=[
|
||||
UserNotificationEventDTO.from_db(notification)
|
||||
for notification in model.notifications or []
|
||||
],
|
||||
created_at=model.createdAt,
|
||||
updated_at=model.updatedAt,
|
||||
)
|
||||
|
||||
|
||||
def get_batch_delay(notification_type: NotificationType) -> timedelta:
|
||||
return {
|
||||
NotificationType.AGENT_RUN: timedelta(minutes=1),
|
||||
@@ -355,7 +393,7 @@ async def create_or_add_to_user_notification_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
notification_data: NotificationEventModel,
|
||||
) -> UserNotificationBatch:
|
||||
) -> UserNotificationBatchDTO:
|
||||
try:
|
||||
logger.info(
|
||||
f"Creating or adding to notification batch for {user_id} with type {notification_type} and data {notification_data}"
|
||||
@@ -393,7 +431,7 @@ async def create_or_add_to_user_notification_batch(
|
||||
},
|
||||
include={"notifications": True},
|
||||
)
|
||||
return resp
|
||||
return UserNotificationBatchDTO.from_db(resp)
|
||||
else:
|
||||
async with transaction() as tx:
|
||||
notification_event = await tx.notificationevent.create(
|
||||
@@ -415,7 +453,7 @@ async def create_or_add_to_user_notification_batch(
|
||||
raise DatabaseError(
|
||||
f"Failed to add notification event {notification_event.id} to existing batch {existing_batch.id}"
|
||||
)
|
||||
return resp
|
||||
return UserNotificationBatchDTO.from_db(resp)
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to create or add to notification batch for user {user_id} and type {notification_type}: {e}"
|
||||
@@ -425,7 +463,7 @@ async def create_or_add_to_user_notification_batch(
|
||||
async def get_user_notification_oldest_message_in_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
) -> NotificationEvent | None:
|
||||
) -> UserNotificationEventDTO | None:
|
||||
try:
|
||||
batch = await UserNotificationBatch.prisma().find_first(
|
||||
where={"userId": user_id, "type": notification_type},
|
||||
@@ -436,7 +474,12 @@ async def get_user_notification_oldest_message_in_batch(
|
||||
if not batch.notifications:
|
||||
return None
|
||||
sorted_notifications = sorted(batch.notifications, key=lambda x: x.createdAt)
|
||||
return sorted_notifications[0]
|
||||
|
||||
return (
|
||||
UserNotificationEventDTO.from_db(sorted_notifications[0])
|
||||
if sorted_notifications
|
||||
else None
|
||||
)
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get user notification last message in batch for user {user_id} and type {notification_type}: {e}"
|
||||
@@ -471,12 +514,13 @@ async def empty_user_notification_batch(
|
||||
async def get_user_notification_batch(
|
||||
user_id: str,
|
||||
notification_type: NotificationType,
|
||||
) -> UserNotificationBatch | None:
|
||||
) -> UserNotificationBatchDTO | None:
|
||||
try:
|
||||
return await UserNotificationBatch.prisma().find_first(
|
||||
batch = await UserNotificationBatch.prisma().find_first(
|
||||
where={"userId": user_id, "type": notification_type},
|
||||
include={"notifications": True},
|
||||
)
|
||||
return UserNotificationBatchDTO.from_db(batch) if batch else None
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get user notification batch for user {user_id} and type {notification_type}: {e}"
|
||||
@@ -485,9 +529,9 @@ async def get_user_notification_batch(
|
||||
|
||||
async def get_all_batches_by_type(
|
||||
notification_type: NotificationType,
|
||||
) -> list[UserNotificationBatch]:
|
||||
) -> list[UserNotificationBatchDTO]:
|
||||
try:
|
||||
return await UserNotificationBatch.prisma().find_many(
|
||||
batches = await UserNotificationBatch.prisma().find_many(
|
||||
where={
|
||||
"type": notification_type,
|
||||
"notifications": {
|
||||
@@ -496,6 +540,7 @@ async def get_all_batches_by_type(
|
||||
},
|
||||
include={"notifications": True},
|
||||
)
|
||||
return [UserNotificationBatchDTO.from_db(batch) for batch in batches]
|
||||
except Exception as e:
|
||||
raise DatabaseError(
|
||||
f"Failed to get all batches by type {notification_type}: {e}"
|
||||
|
||||
@@ -193,68 +193,68 @@ class NotificationManager(AppService):
|
||||
# Check if batch has aged out
|
||||
oldest_message = (
|
||||
get_db().get_user_notification_oldest_message_in_batch(
|
||||
batch.userId, notification_type
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
)
|
||||
|
||||
if not oldest_message:
|
||||
# this should never happen
|
||||
logger.error(
|
||||
f"Batch for user {batch.userId} and type {notification_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!"
|
||||
f"Batch for user {batch.user_id} and type {notification_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!"
|
||||
)
|
||||
continue
|
||||
|
||||
max_delay = get_batch_delay(notification_type)
|
||||
|
||||
# If batch has aged out, process it
|
||||
if oldest_message.createdAt + max_delay < current_time:
|
||||
recipient_email = get_db().get_user_email_by_id(batch.userId)
|
||||
if oldest_message.created_at + max_delay < current_time:
|
||||
recipient_email = get_db().get_user_email_by_id(batch.user_id)
|
||||
|
||||
if not recipient_email:
|
||||
logger.error(
|
||||
f"User email not found for user {batch.userId}"
|
||||
f"User email not found for user {batch.user_id}"
|
||||
)
|
||||
continue
|
||||
|
||||
should_send = self._should_email_user_based_on_preference(
|
||||
batch.userId, notification_type
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
|
||||
if not should_send:
|
||||
logger.debug(
|
||||
f"User {batch.userId} does not want to receive {notification_type} notifications"
|
||||
f"User {batch.user_id} does not want to receive {notification_type} notifications"
|
||||
)
|
||||
# Clear the batch
|
||||
get_db().empty_user_notification_batch(
|
||||
batch.userId, notification_type
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
continue
|
||||
|
||||
batch_data = get_db().get_user_notification_batch(
|
||||
batch.userId, notification_type
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
|
||||
if not batch_data or not batch_data.notifications:
|
||||
logger.error(
|
||||
f"Batch data not found for user {batch.userId}"
|
||||
f"Batch data not found for user {batch.user_id}"
|
||||
)
|
||||
# Clear the batch
|
||||
get_db().empty_user_notification_batch(
|
||||
batch.userId, notification_type
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
continue
|
||||
|
||||
unsub_link = generate_unsubscribe_link(batch.userId)
|
||||
unsub_link = generate_unsubscribe_link(batch.user_id)
|
||||
|
||||
events = [
|
||||
NotificationEventModel[
|
||||
get_notif_data_type(db_event.type)
|
||||
].model_validate(
|
||||
{
|
||||
"user_id": batch.userId,
|
||||
"user_id": batch.user_id,
|
||||
"type": db_event.type,
|
||||
"data": db_event.data,
|
||||
"created_at": db_event.createdAt,
|
||||
"created_at": db_event.created_at,
|
||||
}
|
||||
)
|
||||
for db_event in batch_data.notifications
|
||||
@@ -270,7 +270,7 @@ class NotificationManager(AppService):
|
||||
|
||||
# Clear the batch
|
||||
get_db().empty_user_notification_batch(
|
||||
batch.userId, notification_type
|
||||
batch.user_id, notification_type
|
||||
)
|
||||
|
||||
processed_count += 1
|
||||
@@ -465,7 +465,7 @@ class NotificationManager(AppService):
|
||||
f"Batch for user {user_id} and type {event_type} has no oldest message whichshould never happen!!!!!!!!!!!!!!!!"
|
||||
)
|
||||
return False
|
||||
oldest_age = oldest_message.createdAt
|
||||
oldest_age = oldest_message.created_at
|
||||
|
||||
max_delay = get_batch_delay(event_type)
|
||||
|
||||
@@ -584,7 +584,7 @@ class NotificationManager(AppService):
|
||||
"user_id": event.user_id,
|
||||
"type": db_event.type,
|
||||
"data": db_event.data,
|
||||
"created_at": db_event.createdAt,
|
||||
"created_at": db_event.created_at,
|
||||
}
|
||||
)
|
||||
for db_event in batch.notifications
|
||||
|
||||
@@ -154,9 +154,10 @@ class AgentServer(backend.util.service.AppProcess):
|
||||
graph_id: str,
|
||||
graph_version: int,
|
||||
user_id: str,
|
||||
for_export: bool = False,
|
||||
):
|
||||
return await backend.server.routers.v1.get_graph(
|
||||
graph_id, user_id, graph_version
|
||||
graph_id, user_id, graph_version, for_export
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -38,7 +38,6 @@ from backend.data.credit import (
|
||||
TransactionHistory,
|
||||
get_auto_top_up,
|
||||
get_block_costs,
|
||||
get_stripe_customer_id,
|
||||
get_user_credit_model,
|
||||
set_auto_top_up,
|
||||
)
|
||||
@@ -341,15 +340,7 @@ async def stripe_webhook(request: Request):
|
||||
async def manage_payment_method(
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
) -> dict[str, str]:
|
||||
session = stripe.billing_portal.Session.create(
|
||||
customer=await get_stripe_customer_id(user_id),
|
||||
return_url=settings.config.frontend_base_url + "/profile/credits",
|
||||
)
|
||||
if not session:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Failed to create billing portal session"
|
||||
)
|
||||
return {"url": session.url}
|
||||
return {"url": await _user_credit_model.create_billing_portal_session(user_id)}
|
||||
|
||||
|
||||
@v1_router.get(path="/credits/transactions", dependencies=[Depends(auth_middleware)])
|
||||
@@ -405,10 +396,10 @@ async def get_graph(
|
||||
graph_id: str,
|
||||
user_id: Annotated[str, Depends(get_user_id)],
|
||||
version: int | None = None,
|
||||
hide_credentials: bool = False,
|
||||
for_export: bool = False,
|
||||
) -> graph_db.GraphModel:
|
||||
graph = await graph_db.get_graph(
|
||||
graph_id, version, user_id=user_id, for_export=hide_credentials
|
||||
graph_id, version, user_id=user_id, for_export=for_export
|
||||
)
|
||||
if not graph:
|
||||
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
|
||||
@@ -438,6 +429,7 @@ async def create_new_graph(
|
||||
) -> graph_db.GraphModel:
|
||||
graph = graph_db.make_graph_model(create_graph.graph, user_id)
|
||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=True)
|
||||
graph.validate_graph(for_run=False)
|
||||
|
||||
graph = await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
@@ -489,17 +481,10 @@ async def update_graph(
|
||||
latest_version_number = max(g.version for g in existing_versions)
|
||||
graph.version = latest_version_number + 1
|
||||
|
||||
latest_version_graph = next(
|
||||
v for v in existing_versions if v.version == latest_version_number
|
||||
)
|
||||
current_active_version = next((v for v in existing_versions if v.is_active), None)
|
||||
if latest_version_graph.is_template != graph.is_template:
|
||||
raise HTTPException(
|
||||
400, detail="Changing is_template on an existing graph is forbidden"
|
||||
)
|
||||
graph.is_active = not graph.is_template
|
||||
graph = graph_db.make_graph_model(graph, user_id)
|
||||
graph.reassign_ids(user_id=user_id)
|
||||
graph.reassign_ids(user_id=user_id, reassign_graph_id=False)
|
||||
graph.validate_graph(for_run=False)
|
||||
|
||||
new_graph_version = await graph_db.create_graph(graph, user_id=user_id)
|
||||
|
||||
|
||||
@@ -3,20 +3,11 @@ from datetime import datetime
|
||||
import prisma.errors
|
||||
import prisma.models
|
||||
import pytest
|
||||
from prisma import Prisma
|
||||
|
||||
import backend.server.v2.library.db as db
|
||||
import backend.server.v2.store.exceptions
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
async def setup_prisma():
|
||||
# Don't register client if already registered
|
||||
try:
|
||||
Prisma()
|
||||
except prisma.errors.ClientAlreadyRegisteredError:
|
||||
pass
|
||||
yield
|
||||
from backend.data.db import connect
|
||||
from backend.data.includes import library_agent_include
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -31,7 +22,6 @@ async def test_get_library_agents(mocker):
|
||||
userId="test-user",
|
||||
isActive=True,
|
||||
createdAt=datetime.now(),
|
||||
isTemplate=False,
|
||||
)
|
||||
]
|
||||
|
||||
@@ -56,7 +46,6 @@ async def test_get_library_agents(mocker):
|
||||
userId="other-user",
|
||||
isActive=True,
|
||||
createdAt=datetime.now(),
|
||||
isTemplate=False,
|
||||
),
|
||||
)
|
||||
]
|
||||
@@ -91,10 +80,11 @@ async def test_get_library_agents(mocker):
|
||||
assert result.pagination.page_size == 50
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_add_agent_to_library(mocker):
|
||||
await connect()
|
||||
# Mock data
|
||||
mock_store_listing = prisma.models.StoreListingVersion(
|
||||
mock_store_listing_data = prisma.models.StoreListingVersion(
|
||||
id="version123",
|
||||
version=1,
|
||||
createdAt=datetime.now(),
|
||||
@@ -119,21 +109,37 @@ async def test_add_agent_to_library(mocker):
|
||||
userId="creator",
|
||||
isActive=True,
|
||||
createdAt=datetime.now(),
|
||||
isTemplate=False,
|
||||
),
|
||||
)
|
||||
|
||||
mock_library_agent_data = prisma.models.LibraryAgent(
|
||||
id="ua1",
|
||||
userId="test-user",
|
||||
agentId=mock_store_listing_data.agentId,
|
||||
agentVersion=1,
|
||||
isCreatedByUser=False,
|
||||
isDeleted=False,
|
||||
isArchived=False,
|
||||
createdAt=datetime.now(),
|
||||
updatedAt=datetime.now(),
|
||||
isFavorite=False,
|
||||
useGraphIsActiveVersion=True,
|
||||
Agent=mock_store_listing_data.Agent,
|
||||
)
|
||||
|
||||
# Mock prisma calls
|
||||
mock_store_listing_version = mocker.patch(
|
||||
"prisma.models.StoreListingVersion.prisma"
|
||||
)
|
||||
mock_store_listing_version.return_value.find_unique = mocker.AsyncMock(
|
||||
return_value=mock_store_listing
|
||||
return_value=mock_store_listing_data
|
||||
)
|
||||
|
||||
mock_library_agent = mocker.patch("prisma.models.LibraryAgent.prisma")
|
||||
mock_library_agent.return_value.find_first = mocker.AsyncMock(return_value=None)
|
||||
mock_library_agent.return_value.create = mocker.AsyncMock()
|
||||
mock_library_agent.return_value.create = mocker.AsyncMock(
|
||||
return_value=mock_library_agent_data
|
||||
)
|
||||
|
||||
# Call function
|
||||
await db.add_store_agent_to_library("version123", "test-user")
|
||||
@@ -147,17 +153,20 @@ async def test_add_agent_to_library(mocker):
|
||||
"userId": "test-user",
|
||||
"agentId": "agent1",
|
||||
"agentVersion": 1,
|
||||
}
|
||||
},
|
||||
include=library_agent_include("test-user"),
|
||||
)
|
||||
mock_library_agent.return_value.create.assert_called_once_with(
|
||||
data=prisma.types.LibraryAgentCreateInput(
|
||||
userId="test-user", agentId="agent1", agentVersion=1, isCreatedByUser=False
|
||||
)
|
||||
),
|
||||
include=library_agent_include("test-user"),
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.asyncio(scope="session")
|
||||
async def test_add_agent_to_library_not_found(mocker):
|
||||
await connect()
|
||||
# Mock prisma calls
|
||||
mock_store_listing_version = mocker.patch(
|
||||
"prisma.models.StoreListingVersion.prisma"
|
||||
|
||||
@@ -2,11 +2,14 @@ import datetime
|
||||
|
||||
import prisma.fields
|
||||
import prisma.models
|
||||
import pytest
|
||||
|
||||
import backend.server.v2.library.model as library_model
|
||||
from backend.util import json
|
||||
|
||||
|
||||
def test_agent_preset_from_db():
|
||||
@pytest.mark.asyncio
|
||||
async def test_agent_preset_from_db():
|
||||
# Create mock DB agent
|
||||
db_agent = prisma.models.AgentPreset(
|
||||
id="test-agent-123",
|
||||
@@ -24,7 +27,7 @@ def test_agent_preset_from_db():
|
||||
id="input-123",
|
||||
time=datetime.datetime.now(),
|
||||
name="input1",
|
||||
data=prisma.fields.Json({"type": "string", "value": "test value"}),
|
||||
data=json.dumps({"type": "string", "value": "test value"}), # type: ignore
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import datetime
|
||||
|
||||
import autogpt_libs.auth as autogpt_auth_lib
|
||||
import fastapi
|
||||
import fastapi.testclient
|
||||
import pytest
|
||||
import pytest_mock
|
||||
@@ -30,49 +29,48 @@ app.dependency_overrides[autogpt_auth_lib.auth_middleware] = override_auth_middl
|
||||
app.dependency_overrides[autogpt_auth_lib.depends.get_user_id] = override_get_user_id
|
||||
|
||||
|
||||
def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
|
||||
mocked_value = [
|
||||
library_model.LibraryAgentResponse(
|
||||
agents=[
|
||||
library_model.LibraryAgent(
|
||||
id="test-agent-1",
|
||||
agent_id="test-agent-1",
|
||||
agent_version=1,
|
||||
name="Test Agent 1",
|
||||
description="Test Description 1",
|
||||
image_url=None,
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
new_output=False,
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
library_model.LibraryAgent(
|
||||
id="test-agent-2",
|
||||
agent_id="test-agent-2",
|
||||
agent_version=1,
|
||||
name="Test Agent 2",
|
||||
description="Test Description 2",
|
||||
image_url=None,
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
new_output=False,
|
||||
can_access_graph=False,
|
||||
is_latest_version=True,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
],
|
||||
pagination=server_model.Pagination(
|
||||
total_items=2, total_pages=1, current_page=1, page_size=50
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
|
||||
mocked_value = library_model.LibraryAgentResponse(
|
||||
agents=[
|
||||
library_model.LibraryAgent(
|
||||
id="test-agent-1",
|
||||
agent_id="test-agent-1",
|
||||
agent_version=1,
|
||||
name="Test Agent 1",
|
||||
description="Test Description 1",
|
||||
image_url=None,
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
new_output=False,
|
||||
can_access_graph=True,
|
||||
is_latest_version=True,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
library_model.LibraryAgent(
|
||||
id="test-agent-2",
|
||||
agent_id="test-agent-2",
|
||||
agent_version=1,
|
||||
name="Test Agent 2",
|
||||
description="Test Description 2",
|
||||
image_url=None,
|
||||
creator_name="Test Creator",
|
||||
creator_image_url="",
|
||||
input_schema={"type": "object", "properties": {}},
|
||||
status=library_model.LibraryAgentStatus.COMPLETED,
|
||||
new_output=False,
|
||||
can_access_graph=False,
|
||||
is_latest_version=True,
|
||||
updated_at=datetime.datetime(2023, 1, 1, 0, 0, 0),
|
||||
),
|
||||
],
|
||||
pagination=server_model.Pagination(
|
||||
total_items=2, total_pages=1, current_page=1, page_size=50
|
||||
),
|
||||
]
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
|
||||
)
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.list_library_agents")
|
||||
mock_db_call.return_value = mocked_value
|
||||
|
||||
response = client.get("/agents?search_term=test")
|
||||
@@ -94,7 +92,7 @@ def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
|
||||
|
||||
|
||||
def test_get_library_agents_error(mocker: pytest_mock.MockFixture):
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
|
||||
mock_db_call = mocker.patch("backend.server.v2.library.db.list_library_agents")
|
||||
mock_db_call.side_effect = Exception("Test error")
|
||||
|
||||
response = client.get("/agents?search_term=test")
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
import fastapi
|
||||
import prisma.enums
|
||||
@@ -754,47 +753,31 @@ async def get_my_agents(
|
||||
|
||||
|
||||
async def get_agent(
|
||||
store_listing_version_id: str, version_id: Optional[int]
|
||||
user_id: str,
|
||||
store_listing_version_id: str,
|
||||
) -> GraphModel:
|
||||
"""Get agent using the version ID and store listing version ID."""
|
||||
try:
|
||||
store_listing_version = (
|
||||
await prisma.models.StoreListingVersion.prisma().find_unique(
|
||||
where={"id": store_listing_version_id}, include={"Agent": True}
|
||||
)
|
||||
store_listing_version = (
|
||||
await prisma.models.StoreListingVersion.prisma().find_unique(
|
||||
where={"id": store_listing_version_id}
|
||||
)
|
||||
)
|
||||
|
||||
if not store_listing_version:
|
||||
raise ValueError(f"Store listing version {store_listing_version_id} not found")
|
||||
|
||||
graph = await backend.data.graph.get_graph(
|
||||
user_id=user_id,
|
||||
graph_id=store_listing_version.agentId,
|
||||
version=store_listing_version.agentVersion,
|
||||
for_export=True,
|
||||
)
|
||||
if not graph:
|
||||
raise ValueError(
|
||||
f"Agent {store_listing_version.agentId} v{store_listing_version.agentVersion} not found"
|
||||
)
|
||||
|
||||
if not store_listing_version or not store_listing_version.Agent:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Store listing version {store_listing_version_id} not found",
|
||||
)
|
||||
|
||||
graph_id = store_listing_version.agentId
|
||||
graph_version = store_listing_version.agentVersion
|
||||
graph = await backend.data.graph.get_graph(graph_id, graph_version)
|
||||
|
||||
if not graph:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=404,
|
||||
detail=(
|
||||
f"Agent #{graph_id} not found "
|
||||
f"for store listing version #{store_listing_version_id}"
|
||||
),
|
||||
)
|
||||
|
||||
graph.version = 1
|
||||
graph.is_template = False
|
||||
graph.is_active = True
|
||||
delattr(graph, "user_id")
|
||||
|
||||
return graph
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting agent: {e}")
|
||||
raise backend.server.v2.store.exceptions.DatabaseError(
|
||||
"Failed to fetch agent"
|
||||
) from e
|
||||
return graph
|
||||
|
||||
|
||||
async def review_store_submission(
|
||||
|
||||
@@ -146,7 +146,6 @@ async def test_create_store_submission(mocker):
|
||||
userId="user-id",
|
||||
createdAt=datetime.now(),
|
||||
isActive=True,
|
||||
isTemplate=False,
|
||||
)
|
||||
|
||||
mock_listing = prisma.models.StoreListing(
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import json
|
||||
import logging
|
||||
import tempfile
|
||||
import typing
|
||||
@@ -8,7 +7,6 @@ import autogpt_libs.auth.depends
|
||||
import autogpt_libs.auth.middleware
|
||||
import fastapi
|
||||
import fastapi.responses
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
|
||||
import backend.data.block
|
||||
import backend.data.graph
|
||||
@@ -16,6 +14,7 @@ import backend.server.v2.store.db
|
||||
import backend.server.v2.store.image_gen
|
||||
import backend.server.v2.store.media
|
||||
import backend.server.v2.store.model
|
||||
import backend.util.json
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -591,19 +590,18 @@ async def generate_image(
|
||||
tags=["store", "public"],
|
||||
)
|
||||
async def download_agent_file(
|
||||
user_id: typing.Annotated[
|
||||
str, fastapi.Depends(autogpt_libs.auth.depends.get_user_id)
|
||||
],
|
||||
store_listing_version_id: str = fastapi.Path(
|
||||
..., description="The ID of the agent to download"
|
||||
),
|
||||
version: typing.Optional[int] = fastapi.Query(
|
||||
None, description="Specific version of the agent"
|
||||
),
|
||||
) -> fastapi.responses.FileResponse:
|
||||
"""
|
||||
Download the agent file by streaming its content.
|
||||
|
||||
Args:
|
||||
agent_id (str): The ID of the agent to download.
|
||||
version (Optional[int]): Specific version of the agent to download.
|
||||
store_listing_version_id (str): The ID of the agent to download
|
||||
|
||||
Returns:
|
||||
StreamingResponse: A streaming response containing the agent's graph data.
|
||||
@@ -613,35 +611,16 @@ async def download_agent_file(
|
||||
"""
|
||||
|
||||
graph_data = await backend.server.v2.store.db.get_agent(
|
||||
store_listing_version_id=store_listing_version_id, version_id=version
|
||||
user_id=user_id,
|
||||
store_listing_version_id=store_listing_version_id,
|
||||
)
|
||||
|
||||
graph_data.clean_graph()
|
||||
graph_date_dict = jsonable_encoder(graph_data)
|
||||
|
||||
def remove_credentials(obj):
|
||||
if obj and isinstance(obj, dict):
|
||||
if "credentials" in obj:
|
||||
del obj["credentials"]
|
||||
if "creds" in obj:
|
||||
del obj["creds"]
|
||||
|
||||
for value in obj.values():
|
||||
remove_credentials(value)
|
||||
elif isinstance(obj, list):
|
||||
for item in obj:
|
||||
remove_credentials(item)
|
||||
return obj
|
||||
|
||||
graph_date_dict = remove_credentials(graph_date_dict)
|
||||
|
||||
file_name = f"agent_{store_listing_version_id}_v{version or 'latest'}.json"
|
||||
file_name = f"agent_{graph_data.id}_v{graph_data.version or 'latest'}.json"
|
||||
|
||||
# Sending graph as a stream (similar to marketplace v1)
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="w", suffix=".json", delete=False
|
||||
) as tmp_file:
|
||||
tmp_file.write(json.dumps(graph_date_dict))
|
||||
tmp_file.write(backend.util.json.dumps(graph_data))
|
||||
tmp_file.flush()
|
||||
|
||||
return fastapi.responses.FileResponse(
|
||||
|
||||
@@ -455,7 +455,7 @@ def fastapi_get_service_client(
|
||||
return response.json()
|
||||
except httpx.HTTPStatusError as e:
|
||||
logger.error(f"HTTP error in {method_name}: {e.response.text}")
|
||||
error = RemoteCallError.model_validate(e.response.json(), strict=False)
|
||||
error = RemoteCallError.model_validate(e.response.json())
|
||||
# DEBUG HELP: if you made a custom exception, make sure you override self.args to be how to make your exception
|
||||
raise EXCEPTION_MAPPING.get(error.type, Exception)(
|
||||
*(error.args or [str(e)])
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
/*
|
||||
Warnings:
|
||||
|
||||
- You are about to drop the column `isTemplate` on the `AgentGraph` table. All the data in the column will be lost.
|
||||
|
||||
*/
|
||||
-- AlterTable
|
||||
ALTER TABLE "AgentGraph" DROP COLUMN "isTemplate";
|
||||
@@ -87,7 +87,6 @@ model AgentGraph {
|
||||
description String?
|
||||
|
||||
isActive Boolean @default(true)
|
||||
isTemplate Boolean @default(false)
|
||||
|
||||
// Link to User model
|
||||
userId String
|
||||
|
||||
@@ -199,7 +199,9 @@ async def test_clean_graph(server: SpinTestServer):
|
||||
)
|
||||
|
||||
# Clean the graph
|
||||
created_graph.clean_graph()
|
||||
created_graph = await server.agent_server.test_get_graph(
|
||||
created_graph.id, created_graph.version, DEFAULT_USER_ID, for_export=True
|
||||
)
|
||||
|
||||
# # Verify input block value is cleared
|
||||
input_node = next(
|
||||
|
||||
@@ -91,7 +91,6 @@ async def main():
|
||||
"description": faker.text(max_nb_chars=200),
|
||||
"userId": user.id,
|
||||
"isActive": True,
|
||||
"isTemplate": False,
|
||||
}
|
||||
)
|
||||
agent_graphs.append(graph)
|
||||
|
||||
123
autogpt_platform/db/docker/.env.example
Normal file
123
autogpt_platform/db/docker/.env.example
Normal file
@@ -0,0 +1,123 @@
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=supabase
|
||||
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
|
||||
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
VAULT_ENC_KEY=your-encryption-key-32-chars-min
|
||||
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
|
||||
############
|
||||
# Supavisor -- Database pooler
|
||||
############
|
||||
POOLER_PROXY_PORT_TRANSACTION=6543
|
||||
POOLER_DEFAULT_POOL_SIZE=20
|
||||
POOLER_MAX_CLIENT_CONN=100
|
||||
POOLER_TENANT_ID=your-tenant-id
|
||||
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3000
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
ENABLE_ANONYMOUS_USERS=false
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3000
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
# Add your OpenAI API key to enable SQL Editor Assistant
|
||||
OPENAI_API_KEY=
|
||||
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
||||
5
autogpt_platform/db/docker/.gitignore
vendored
Normal file
5
autogpt_platform/db/docker/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
volumes/db/data
|
||||
volumes/storage
|
||||
.env
|
||||
test.http
|
||||
docker-compose.override.yml
|
||||
3
autogpt_platform/db/docker/README.md
Normal file
3
autogpt_platform/db/docker/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Supabase Docker
|
||||
|
||||
This is a minimal Docker Compose setup for self-hosting Supabase. Follow the steps [here](https://supabase.com/docs/guides/hosting/docker) to get started.
|
||||
48
autogpt_platform/db/docker/dev/data.sql
Normal file
48
autogpt_platform/db/docker/dev/data.sql
Normal file
@@ -0,0 +1,48 @@
|
||||
create table profiles (
|
||||
id uuid references auth.users not null,
|
||||
updated_at timestamp with time zone,
|
||||
username text unique,
|
||||
avatar_url text,
|
||||
website text,
|
||||
|
||||
primary key (id),
|
||||
unique(username),
|
||||
constraint username_length check (char_length(username) >= 3)
|
||||
);
|
||||
|
||||
alter table profiles enable row level security;
|
||||
|
||||
create policy "Public profiles are viewable by the owner."
|
||||
on profiles for select
|
||||
using ( auth.uid() = id );
|
||||
|
||||
create policy "Users can insert their own profile."
|
||||
on profiles for insert
|
||||
with check ( auth.uid() = id );
|
||||
|
||||
create policy "Users can update own profile."
|
||||
on profiles for update
|
||||
using ( auth.uid() = id );
|
||||
|
||||
-- Set up Realtime
|
||||
begin;
|
||||
drop publication if exists supabase_realtime;
|
||||
create publication supabase_realtime;
|
||||
commit;
|
||||
alter publication supabase_realtime add table profiles;
|
||||
|
||||
-- Set up Storage
|
||||
insert into storage.buckets (id, name)
|
||||
values ('avatars', 'avatars');
|
||||
|
||||
create policy "Avatar images are publicly accessible."
|
||||
on storage.objects for select
|
||||
using ( bucket_id = 'avatars' );
|
||||
|
||||
create policy "Anyone can upload an avatar."
|
||||
on storage.objects for insert
|
||||
with check ( bucket_id = 'avatars' );
|
||||
|
||||
create policy "Anyone can update an avatar."
|
||||
on storage.objects for update
|
||||
with check ( bucket_id = 'avatars' );
|
||||
34
autogpt_platform/db/docker/dev/docker-compose.dev.yml
Normal file
34
autogpt_platform/db/docker/dev/docker-compose.dev.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
studio:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: studio/Dockerfile
|
||||
target: dev
|
||||
ports:
|
||||
- 8082:8082
|
||||
mail:
|
||||
container_name: supabase-mail
|
||||
image: inbucket/inbucket:3.0.3
|
||||
ports:
|
||||
- '2500:2500' # SMTP
|
||||
- '9000:9000' # web interface
|
||||
- '1100:1100' # POP3
|
||||
auth:
|
||||
environment:
|
||||
- GOTRUE_SMTP_USER=
|
||||
- GOTRUE_SMTP_PASS=
|
||||
meta:
|
||||
ports:
|
||||
- 5555:8080
|
||||
db:
|
||||
restart: 'no'
|
||||
volumes:
|
||||
# Always use a fresh database when developing
|
||||
- /var/lib/postgresql/data
|
||||
# Seed data should be inserted last (alphabetical order)
|
||||
- ./dev/data.sql:/docker-entrypoint-initdb.d/seed.sql
|
||||
storage:
|
||||
volumes:
|
||||
- /var/lib/storage
|
||||
94
autogpt_platform/db/docker/docker-compose.s3.yml
Normal file
94
autogpt_platform/db/docker/docker-compose.s3.yml
Normal file
@@ -0,0 +1,94 @@
|
||||
services:
|
||||
|
||||
minio:
|
||||
image: minio/minio
|
||||
ports:
|
||||
- '9000:9000'
|
||||
- '9001:9001'
|
||||
environment:
|
||||
MINIO_ROOT_USER: supa-storage
|
||||
MINIO_ROOT_PASSWORD: secret1234
|
||||
command: server --console-address ":9001" /data
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://minio:9000/minio/health/live" ]
|
||||
interval: 2s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
volumes:
|
||||
- ./volumes/storage:/data:z
|
||||
|
||||
minio-createbucket:
|
||||
image: minio/mc
|
||||
depends_on:
|
||||
minio:
|
||||
condition: service_healthy
|
||||
entrypoint: >
|
||||
/bin/sh -c "
|
||||
/usr/bin/mc alias set supa-minio http://minio:9000 supa-storage secret1234;
|
||||
/usr/bin/mc mb supa-minio/stub;
|
||||
exit 0;
|
||||
"
|
||||
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v1.11.13
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
minio:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: s3
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
GLOBAL_S3_ENDPOINT: http://minio:9000
|
||||
GLOBAL_S3_PROTOCOL: http
|
||||
GLOBAL_S3_FORCE_PATH_STYLE: true
|
||||
AWS_ACCESS_KEY_ID: supa-storage
|
||||
AWS_SECRET_ACCESS_KEY: secret1234
|
||||
AWS_DEFAULT_REGION: stub
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0
|
||||
healthcheck:
|
||||
test: [ "CMD", "imgproxy", "health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
526
autogpt_platform/db/docker/docker-compose.yml
Normal file
526
autogpt_platform/db/docker/docker-compose.yml
Normal file
@@ -0,0 +1,526 @@
|
||||
# Usage
|
||||
# Start: docker compose up
|
||||
# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up
|
||||
# Stop: docker compose down
|
||||
# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
|
||||
# Reset everything: ./reset.sh
|
||||
|
||||
name: supabase
|
||||
|
||||
services:
|
||||
|
||||
studio:
|
||||
container_name: supabase-studio
|
||||
image: supabase/studio:20250224-d10db0f
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"node",
|
||||
"-e",
|
||||
"fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})"
|
||||
]
|
||||
timeout: 10s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
STUDIO_PG_META_URL: http://meta:8080
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
||||
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
||||
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
|
||||
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
AUTH_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_URL: http://analytics:4000
|
||||
NEXT_PUBLIC_ENABLE_LOGS: true
|
||||
# Comment to use Big Query backend for analytics
|
||||
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
|
||||
|
||||
kong:
|
||||
container_name: supabase-kong
|
||||
image: kong:2.8.1
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- ${KONG_HTTP_PORT}:8000/tcp
|
||||
- ${KONG_HTTPS_PORT}:8443/tcp
|
||||
volumes:
|
||||
# https://github.com/supabase/supabase/issues/12661
|
||||
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
KONG_DATABASE: "off"
|
||||
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
||||
# https://github.com/supabase/cli/issues/14
|
||||
KONG_DNS_ORDER: LAST,A,CNAME
|
||||
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
||||
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
||||
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
||||
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
||||
# https://unix.stackexchange.com/a/294837
|
||||
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
||||
|
||||
auth:
|
||||
container_name: supabase-auth
|
||||
image: supabase/gotrue:v2.170.0
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9999/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
GOTRUE_API_HOST: 0.0.0.0
|
||||
GOTRUE_API_PORT: 9999
|
||||
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
||||
|
||||
GOTRUE_DB_DRIVER: postgres
|
||||
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
|
||||
GOTRUE_SITE_URL: ${SITE_URL}
|
||||
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
||||
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
||||
|
||||
GOTRUE_JWT_ADMIN_ROLES: service_role
|
||||
GOTRUE_JWT_AUD: authenticated
|
||||
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
||||
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
||||
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
||||
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
|
||||
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
||||
|
||||
# Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile.
|
||||
# GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true
|
||||
|
||||
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
||||
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
||||
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
||||
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
||||
GOTRUE_SMTP_USER: ${SMTP_USER}
|
||||
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
||||
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
||||
|
||||
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
||||
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
||||
# Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook
|
||||
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true"
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook"
|
||||
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "<standard-base64-secret>"
|
||||
|
||||
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true"
|
||||
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt"
|
||||
|
||||
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true"
|
||||
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt"
|
||||
|
||||
# GOTRUE_HOOK_SEND_SMS_ENABLED: "false"
|
||||
# GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook"
|
||||
# GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
|
||||
|
||||
# GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false"
|
||||
# GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender"
|
||||
# GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
|
||||
|
||||
rest:
|
||||
container_name: supabase-rest
|
||||
image: postgrest/postgrest:v12.2.8
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
||||
PGRST_DB_ANON_ROLE: anon
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_DB_USE_LEGACY_GUCS: "false"
|
||||
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
||||
command:
|
||||
[
|
||||
"postgrest"
|
||||
]
|
||||
|
||||
realtime:
|
||||
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
||||
container_name: realtime-dev.supabase-realtime
|
||||
image: supabase/realtime:v2.34.40
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"-sSfL",
|
||||
"--head",
|
||||
"-o",
|
||||
"/dev/null",
|
||||
"-H",
|
||||
"Authorization: Bearer ${ANON_KEY}",
|
||||
"http://localhost:4000/api/tenants/realtime-dev/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
PORT: 4000
|
||||
DB_HOST: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_USER: supabase_admin
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_NAME: ${POSTGRES_DB}
|
||||
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
||||
DB_ENC_KEY: supabaserealtime
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
DNS_NODES: "''"
|
||||
RLIMIT_NOFILE: "10000"
|
||||
APP_NAME: realtime
|
||||
SEED_SELF_HOST: true
|
||||
RUN_JANITOR: true
|
||||
|
||||
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v1.19.3
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://storage:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: file
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"imgproxy",
|
||||
"health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
|
||||
meta:
|
||||
container_name: supabase-meta
|
||||
image: supabase/postgres-meta:v0.86.1
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PG_META_PORT: 8080
|
||||
PG_META_DB_HOST: ${POSTGRES_HOST}
|
||||
PG_META_DB_PORT: ${POSTGRES_PORT}
|
||||
PG_META_DB_NAME: ${POSTGRES_DB}
|
||||
PG_META_DB_USER: supabase_admin
|
||||
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
functions:
|
||||
container_name: supabase-edge-functions
|
||||
image: supabase/edge-runtime:v1.67.2
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/functions:/home/deno/functions:Z
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
||||
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
||||
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
||||
command:
|
||||
[
|
||||
"start",
|
||||
"--main-service",
|
||||
"/home/deno/functions/main"
|
||||
]
|
||||
|
||||
analytics:
|
||||
container_name: supabase-analytics
|
||||
image: supabase/logflare:1.12.5
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 4000:4000
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# volumes:
|
||||
# - type: bind
|
||||
# source: ${PWD}/gcloud.json
|
||||
# target: /opt/app/rel/logflare/bin/gcloud.json
|
||||
# read_only: true
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"http://localhost:4000/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
environment:
|
||||
LOGFLARE_NODE_HOST: 127.0.0.1
|
||||
DB_USERNAME: supabase_admin
|
||||
DB_DATABASE: _supabase
|
||||
DB_HOSTNAME: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_SCHEMA: _analytics
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_SINGLE_TENANT: true
|
||||
LOGFLARE_SUPABASE_MODE: true
|
||||
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
||||
|
||||
# Comment variables to use Big Query backend for analytics
|
||||
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
|
||||
POSTGRES_BACKEND_SCHEMA: _analytics
|
||||
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
||||
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
||||
|
||||
# Comment out everything below this point if you are using an external Postgres database
|
||||
db:
|
||||
container_name: supabase-db
|
||||
image: supabase/postgres:15.8.1.049
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
||||
# Must be superuser to create event trigger
|
||||
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
||||
# Must be superuser to alter reserved role
|
||||
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
||||
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
||||
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
||||
# PGDATA directory is persisted between restarts
|
||||
- ./volumes/db/data:/var/lib/postgresql/data:Z
|
||||
# Changes required for internal supabase data such as _analytics
|
||||
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
|
||||
# Changes required for Analytics support
|
||||
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
||||
# Changes required for Pooler support
|
||||
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
|
||||
# Use named volume to persist pgsodium decryption key between restarts
|
||||
- supabase-config:/etc/postgresql-custom
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-U",
|
||||
"postgres",
|
||||
"-h",
|
||||
"localhost"
|
||||
]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
vector:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
POSTGRES_HOST: /var/run/postgresql
|
||||
PGPORT: ${POSTGRES_PORT}
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PGDATABASE: ${POSTGRES_DB}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
JWT_EXP: ${JWT_EXPIRY}
|
||||
command:
|
||||
[
|
||||
"postgres",
|
||||
"-c",
|
||||
"config_file=/etc/postgresql/postgresql.conf",
|
||||
"-c",
|
||||
"log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs
|
||||
]
|
||||
|
||||
vector:
|
||||
container_name: supabase-vector
|
||||
image: timberio/vector:0.28.1-alpine
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
||||
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://vector:9001/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
command:
|
||||
[
|
||||
"--config",
|
||||
"/etc/vector/vector.yml"
|
||||
]
|
||||
|
||||
# Update the DATABASE_URL if you are using an external Postgres database
|
||||
supavisor:
|
||||
container_name: supabase-pooler
|
||||
image: supabase/supavisor:2.4.12
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- ${POSTGRES_PORT}:5432
|
||||
- ${POOLER_PROXY_PORT_TRANSACTION}:6543
|
||||
volumes:
|
||||
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"curl",
|
||||
"-sSfL",
|
||||
"--head",
|
||||
"-o",
|
||||
"/dev/null",
|
||||
"http://127.0.0.1:4000/api/health"
|
||||
]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
PORT: 4000
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase
|
||||
CLUSTER_POSTGRES: true
|
||||
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
||||
VAULT_ENC_KEY: ${VAULT_ENC_KEY}
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
METRICS_JWT_SECRET: ${JWT_SECRET}
|
||||
REGION: local
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
POOLER_TENANT_ID: ${POOLER_TENANT_ID}
|
||||
POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE}
|
||||
POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN}
|
||||
POOLER_POOL_MODE: transaction
|
||||
command:
|
||||
[
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server"
|
||||
]
|
||||
|
||||
volumes:
|
||||
supabase-config:
|
||||
44
autogpt_platform/db/docker/reset.sh
Executable file
44
autogpt_platform/db/docker/reset.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "WARNING: This will remove all containers and container data, and will reset the .env file. This action cannot be undone!"
|
||||
read -p "Are you sure you want to proceed? (y/N) " -n 1 -r
|
||||
echo # Move to a new line
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]
|
||||
then
|
||||
echo "Operation cancelled."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Stopping and removing all containers..."
|
||||
docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
|
||||
|
||||
echo "Cleaning up bind-mounted directories..."
|
||||
BIND_MOUNTS=(
|
||||
"./volumes/db/data"
|
||||
)
|
||||
|
||||
for DIR in "${BIND_MOUNTS[@]}"; do
|
||||
if [ -d "$DIR" ]; then
|
||||
echo "Deleting $DIR..."
|
||||
rm -rf "$DIR"
|
||||
else
|
||||
echo "Directory $DIR does not exist. Skipping bind mount deletion step..."
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Resetting .env file..."
|
||||
if [ -f ".env" ]; then
|
||||
echo "Removing existing .env file..."
|
||||
rm -f .env
|
||||
else
|
||||
echo "No .env file found. Skipping .env removal step..."
|
||||
fi
|
||||
|
||||
if [ -f ".env.example" ]; then
|
||||
echo "Copying .env.example to .env..."
|
||||
cp .env.example .env
|
||||
else
|
||||
echo ".env.example file not found. Skipping .env reset step..."
|
||||
fi
|
||||
|
||||
echo "Cleanup complete!"
|
||||
241
autogpt_platform/db/docker/volumes/api/kong.yml
Normal file
241
autogpt_platform/db/docker/volumes/api/kong.yml
Normal file
@@ -0,0 +1,241 @@
|
||||
_format_version: '2.1'
|
||||
_transform: true
|
||||
|
||||
###
|
||||
### Consumers / Users
|
||||
###
|
||||
consumers:
|
||||
- username: DASHBOARD
|
||||
- username: anon
|
||||
keyauth_credentials:
|
||||
- key: $SUPABASE_ANON_KEY
|
||||
- username: service_role
|
||||
keyauth_credentials:
|
||||
- key: $SUPABASE_SERVICE_KEY
|
||||
|
||||
###
|
||||
### Access Control List
|
||||
###
|
||||
acls:
|
||||
- consumer: anon
|
||||
group: anon
|
||||
- consumer: service_role
|
||||
group: admin
|
||||
|
||||
###
|
||||
### Dashboard credentials
|
||||
###
|
||||
basicauth_credentials:
|
||||
- consumer: DASHBOARD
|
||||
username: $DASHBOARD_USERNAME
|
||||
password: $DASHBOARD_PASSWORD
|
||||
|
||||
###
|
||||
### API Routes
|
||||
###
|
||||
services:
|
||||
## Open Auth routes
|
||||
- name: auth-v1-open
|
||||
url: http://auth:9999/verify
|
||||
routes:
|
||||
- name: auth-v1-open
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/verify
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: auth-v1-open-callback
|
||||
url: http://auth:9999/callback
|
||||
routes:
|
||||
- name: auth-v1-open-callback
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/callback
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: auth-v1-open-authorize
|
||||
url: http://auth:9999/authorize
|
||||
routes:
|
||||
- name: auth-v1-open-authorize
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/authorize
|
||||
plugins:
|
||||
- name: cors
|
||||
|
||||
## Secure Auth routes
|
||||
- name: auth-v1
|
||||
_comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*'
|
||||
url: http://auth:9999/
|
||||
routes:
|
||||
- name: auth-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /auth/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
|
||||
## Secure REST routes
|
||||
- name: rest-v1
|
||||
_comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*'
|
||||
url: http://rest:3000/
|
||||
routes:
|
||||
- name: rest-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /rest/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: true
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
|
||||
## Secure GraphQL routes
|
||||
- name: graphql-v1
|
||||
_comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql'
|
||||
url: http://rest:3000/rpc/graphql
|
||||
routes:
|
||||
- name: graphql-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /graphql/v1
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: true
|
||||
- name: request-transformer
|
||||
config:
|
||||
add:
|
||||
headers:
|
||||
- Content-Profile:graphql_public
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
|
||||
## Secure Realtime routes
|
||||
- name: realtime-v1-ws
|
||||
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
|
||||
url: http://realtime-dev.supabase-realtime:4000/socket
|
||||
protocol: ws
|
||||
routes:
|
||||
- name: realtime-v1-ws
|
||||
strip_path: true
|
||||
paths:
|
||||
- /realtime/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
- name: realtime-v1-rest
|
||||
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
|
||||
url: http://realtime-dev.supabase-realtime:4000/api
|
||||
protocol: http
|
||||
routes:
|
||||
- name: realtime-v1-rest
|
||||
strip_path: true
|
||||
paths:
|
||||
- /realtime/v1/api
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
- anon
|
||||
## Storage routes: the storage server manages its own auth
|
||||
- name: storage-v1
|
||||
_comment: 'Storage: /storage/v1/* -> http://storage:5000/*'
|
||||
url: http://storage:5000/
|
||||
routes:
|
||||
- name: storage-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /storage/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
|
||||
## Edge Functions routes
|
||||
- name: functions-v1
|
||||
_comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*'
|
||||
url: http://functions:9000/
|
||||
routes:
|
||||
- name: functions-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /functions/v1/
|
||||
plugins:
|
||||
- name: cors
|
||||
|
||||
## Analytics routes
|
||||
- name: analytics-v1
|
||||
_comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
|
||||
url: http://analytics:4000/
|
||||
routes:
|
||||
- name: analytics-v1-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /analytics/v1/
|
||||
|
||||
## Secure Database routes
|
||||
- name: meta
|
||||
_comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*'
|
||||
url: http://meta:8080/
|
||||
routes:
|
||||
- name: meta-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /pg/
|
||||
plugins:
|
||||
- name: key-auth
|
||||
config:
|
||||
hide_credentials: false
|
||||
- name: acl
|
||||
config:
|
||||
hide_groups_header: true
|
||||
allow:
|
||||
- admin
|
||||
|
||||
## Protected Dashboard - catch all remaining routes
|
||||
- name: dashboard
|
||||
_comment: 'Studio: /* -> http://studio:3000/*'
|
||||
url: http://studio:3000/
|
||||
routes:
|
||||
- name: dashboard-all
|
||||
strip_path: true
|
||||
paths:
|
||||
- /
|
||||
plugins:
|
||||
- name: cors
|
||||
- name: basic-auth
|
||||
config:
|
||||
hide_credentials: true
|
||||
3
autogpt_platform/db/docker/volumes/db/_supabase.sql
Normal file
3
autogpt_platform/db/docker/volumes/db/_supabase.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
CREATE DATABASE _supabase WITH OWNER :pguser;
|
||||
0
autogpt_platform/db/docker/volumes/db/init/data.sql
Executable file
0
autogpt_platform/db/docker/volumes/db/init/data.sql
Executable file
5
autogpt_platform/db/docker/volumes/db/jwt.sql
Normal file
5
autogpt_platform/db/docker/volumes/db/jwt.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
\set jwt_secret `echo "$JWT_SECRET"`
|
||||
\set jwt_exp `echo "$JWT_EXP"`
|
||||
|
||||
ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
|
||||
ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';
|
||||
6
autogpt_platform/db/docker/volumes/db/logs.sql
Normal file
6
autogpt_platform/db/docker/volumes/db/logs.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
\c _supabase
|
||||
create schema if not exists _analytics;
|
||||
alter schema _analytics owner to :pguser;
|
||||
\c postgres
|
||||
6
autogpt_platform/db/docker/volumes/db/pooler.sql
Normal file
6
autogpt_platform/db/docker/volumes/db/pooler.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
\c _supabase
|
||||
create schema if not exists _supavisor;
|
||||
alter schema _supavisor owner to :pguser;
|
||||
\c postgres
|
||||
4
autogpt_platform/db/docker/volumes/db/realtime.sql
Normal file
4
autogpt_platform/db/docker/volumes/db/realtime.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
\set pguser `echo "$POSTGRES_USER"`
|
||||
|
||||
create schema if not exists _realtime;
|
||||
alter schema _realtime owner to :pguser;
|
||||
8
autogpt_platform/db/docker/volumes/db/roles.sql
Normal file
8
autogpt_platform/db/docker/volumes/db/roles.sql
Normal file
@@ -0,0 +1,8 @@
|
||||
-- NOTE: change to your own passwords for production environments
|
||||
\set pgpass `echo "$POSTGRES_PASSWORD"`
|
||||
|
||||
ALTER USER authenticator WITH PASSWORD :'pgpass';
|
||||
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
|
||||
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
|
||||
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
|
||||
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';
|
||||
208
autogpt_platform/db/docker/volumes/db/webhooks.sql
Normal file
208
autogpt_platform/db/docker/volumes/db/webhooks.sql
Normal file
@@ -0,0 +1,208 @@
|
||||
BEGIN;
|
||||
-- Create pg_net extension
|
||||
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
|
||||
-- Create supabase_functions schema
|
||||
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
|
||||
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
|
||||
-- supabase_functions.migrations definition
|
||||
CREATE TABLE supabase_functions.migrations (
|
||||
version text PRIMARY KEY,
|
||||
inserted_at timestamptz NOT NULL DEFAULT NOW()
|
||||
);
|
||||
-- Initial supabase_functions migration
|
||||
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
|
||||
-- supabase_functions.hooks definition
|
||||
CREATE TABLE supabase_functions.hooks (
|
||||
id bigserial PRIMARY KEY,
|
||||
hook_table_id integer NOT NULL,
|
||||
hook_name text NOT NULL,
|
||||
created_at timestamptz NOT NULL DEFAULT NOW(),
|
||||
request_id bigint
|
||||
);
|
||||
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
|
||||
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
|
||||
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
|
||||
CREATE FUNCTION supabase_functions.http_request()
|
||||
RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
request_id bigint;
|
||||
payload jsonb;
|
||||
url text := TG_ARGV[0]::text;
|
||||
method text := TG_ARGV[1]::text;
|
||||
headers jsonb DEFAULT '{}'::jsonb;
|
||||
params jsonb DEFAULT '{}'::jsonb;
|
||||
timeout_ms integer DEFAULT 1000;
|
||||
BEGIN
|
||||
IF url IS NULL OR url = 'null' THEN
|
||||
RAISE EXCEPTION 'url argument is missing';
|
||||
END IF;
|
||||
|
||||
IF method IS NULL OR method = 'null' THEN
|
||||
RAISE EXCEPTION 'method argument is missing';
|
||||
END IF;
|
||||
|
||||
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
|
||||
headers = '{"Content-Type": "application/json"}'::jsonb;
|
||||
ELSE
|
||||
headers = TG_ARGV[2]::jsonb;
|
||||
END IF;
|
||||
|
||||
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
|
||||
params = '{}'::jsonb;
|
||||
ELSE
|
||||
params = TG_ARGV[3]::jsonb;
|
||||
END IF;
|
||||
|
||||
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
|
||||
timeout_ms = 1000;
|
||||
ELSE
|
||||
timeout_ms = TG_ARGV[4]::integer;
|
||||
END IF;
|
||||
|
||||
CASE
|
||||
WHEN method = 'GET' THEN
|
||||
SELECT http_get INTO request_id FROM net.http_get(
|
||||
url,
|
||||
params,
|
||||
headers,
|
||||
timeout_ms
|
||||
);
|
||||
WHEN method = 'POST' THEN
|
||||
payload = jsonb_build_object(
|
||||
'old_record', OLD,
|
||||
'record', NEW,
|
||||
'type', TG_OP,
|
||||
'table', TG_TABLE_NAME,
|
||||
'schema', TG_TABLE_SCHEMA
|
||||
);
|
||||
|
||||
SELECT http_post INTO request_id FROM net.http_post(
|
||||
url,
|
||||
payload,
|
||||
params,
|
||||
headers,
|
||||
timeout_ms
|
||||
);
|
||||
ELSE
|
||||
RAISE EXCEPTION 'method argument % is invalid', method;
|
||||
END CASE;
|
||||
|
||||
INSERT INTO supabase_functions.hooks
|
||||
(hook_table_id, hook_name, request_id)
|
||||
VALUES
|
||||
(TG_RELID, TG_NAME, request_id);
|
||||
|
||||
RETURN NEW;
|
||||
END
|
||||
$function$;
|
||||
-- Supabase super admin
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_roles
|
||||
WHERE rolname = 'supabase_functions_admin'
|
||||
)
|
||||
THEN
|
||||
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
||||
ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
|
||||
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
|
||||
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
|
||||
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
|
||||
GRANT supabase_functions_admin TO postgres;
|
||||
-- Remove unused supabase_pg_net_admin role
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_roles
|
||||
WHERE rolname = 'supabase_pg_net_admin'
|
||||
)
|
||||
THEN
|
||||
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
|
||||
DROP OWNED BY supabase_pg_net_admin;
|
||||
DROP ROLE supabase_pg_net_admin;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
-- pg_net grants when extension is already enabled
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_extension
|
||||
WHERE extname = 'pg_net'
|
||||
)
|
||||
THEN
|
||||
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
-- Event trigger for pg_net
|
||||
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
|
||||
RETURNS event_trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_event_trigger_ddl_commands() AS ev
|
||||
JOIN pg_extension AS ext
|
||||
ON ev.objid = ext.oid
|
||||
WHERE ext.extname = 'pg_net'
|
||||
)
|
||||
THEN
|
||||
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
||||
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
||||
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
|
||||
DO
|
||||
$$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_event_trigger
|
||||
WHERE evtname = 'issue_pg_net_access'
|
||||
) THEN
|
||||
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
|
||||
EXECUTE PROCEDURE extensions.grant_pg_net_access();
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
|
||||
ALTER function supabase_functions.http_request() SECURITY DEFINER;
|
||||
ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
|
||||
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
|
||||
COMMIT;
|
||||
16
autogpt_platform/db/docker/volumes/functions/hello/index.ts
Normal file
16
autogpt_platform/db/docker/volumes/functions/hello/index.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
// Follow this setup guide to integrate the Deno language server with your editor:
|
||||
// https://deno.land/manual/getting_started/setup_your_environment
|
||||
// This enables autocomplete, go to definition, etc.
|
||||
|
||||
import { serve } from "https://deno.land/std@0.177.1/http/server.ts"
|
||||
|
||||
serve(async () => {
|
||||
return new Response(
|
||||
`"Hello from Edge Functions!"`,
|
||||
{ headers: { "Content-Type": "application/json" } },
|
||||
)
|
||||
})
|
||||
|
||||
// To invoke:
|
||||
// curl 'http://localhost:<KONG_HTTP_PORT>/functions/v1/hello' \
|
||||
// --header 'Authorization: Bearer <anon/service_role API key>'
|
||||
94
autogpt_platform/db/docker/volumes/functions/main/index.ts
Normal file
94
autogpt_platform/db/docker/volumes/functions/main/index.ts
Normal file
@@ -0,0 +1,94 @@
|
||||
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
|
||||
import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'
|
||||
|
||||
console.log('main function started')
|
||||
|
||||
const JWT_SECRET = Deno.env.get('JWT_SECRET')
|
||||
const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'
|
||||
|
||||
function getAuthToken(req: Request) {
|
||||
const authHeader = req.headers.get('authorization')
|
||||
if (!authHeader) {
|
||||
throw new Error('Missing authorization header')
|
||||
}
|
||||
const [bearer, token] = authHeader.split(' ')
|
||||
if (bearer !== 'Bearer') {
|
||||
throw new Error(`Auth header is not 'Bearer {token}'`)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
async function verifyJWT(jwt: string): Promise<boolean> {
|
||||
const encoder = new TextEncoder()
|
||||
const secretKey = encoder.encode(JWT_SECRET)
|
||||
try {
|
||||
await jose.jwtVerify(jwt, secretKey)
|
||||
} catch (err) {
|
||||
console.error(err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
serve(async (req: Request) => {
|
||||
if (req.method !== 'OPTIONS' && VERIFY_JWT) {
|
||||
try {
|
||||
const token = getAuthToken(req)
|
||||
const isValidJWT = await verifyJWT(token)
|
||||
|
||||
if (!isValidJWT) {
|
||||
return new Response(JSON.stringify({ msg: 'Invalid JWT' }), {
|
||||
status: 401,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
return new Response(JSON.stringify({ msg: e.toString() }), {
|
||||
status: 401,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const url = new URL(req.url)
|
||||
const { pathname } = url
|
||||
const path_parts = pathname.split('/')
|
||||
const service_name = path_parts[1]
|
||||
|
||||
if (!service_name || service_name === '') {
|
||||
const error = { msg: 'missing function name in request' }
|
||||
return new Response(JSON.stringify(error), {
|
||||
status: 400,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
|
||||
const servicePath = `/home/deno/functions/${service_name}`
|
||||
console.error(`serving the request with ${servicePath}`)
|
||||
|
||||
const memoryLimitMb = 150
|
||||
const workerTimeoutMs = 1 * 60 * 1000
|
||||
const noModuleCache = false
|
||||
const importMapPath = null
|
||||
const envVarsObj = Deno.env.toObject()
|
||||
const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]])
|
||||
|
||||
try {
|
||||
const worker = await EdgeRuntime.userWorkers.create({
|
||||
servicePath,
|
||||
memoryLimitMb,
|
||||
workerTimeoutMs,
|
||||
noModuleCache,
|
||||
importMapPath,
|
||||
envVars,
|
||||
})
|
||||
return await worker.fetch(req)
|
||||
} catch (e) {
|
||||
const error = { msg: e.toString() }
|
||||
return new Response(JSON.stringify(error), {
|
||||
status: 500,
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
})
|
||||
232
autogpt_platform/db/docker/volumes/logs/vector.yml
Normal file
232
autogpt_platform/db/docker/volumes/logs/vector.yml
Normal file
@@ -0,0 +1,232 @@
|
||||
api:
|
||||
enabled: true
|
||||
address: 0.0.0.0:9001
|
||||
|
||||
sources:
|
||||
docker_host:
|
||||
type: docker_logs
|
||||
exclude_containers:
|
||||
- supabase-vector
|
||||
|
||||
transforms:
|
||||
project_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- docker_host
|
||||
source: |-
|
||||
.project = "default"
|
||||
.event_message = del(.message)
|
||||
.appname = del(.container_name)
|
||||
del(.container_created_at)
|
||||
del(.container_id)
|
||||
del(.source_type)
|
||||
del(.stream)
|
||||
del(.label)
|
||||
del(.image)
|
||||
del(.host)
|
||||
del(.stream)
|
||||
router:
|
||||
type: route
|
||||
inputs:
|
||||
- project_logs
|
||||
route:
|
||||
kong: '.appname == "supabase-kong"'
|
||||
auth: '.appname == "supabase-auth"'
|
||||
rest: '.appname == "supabase-rest"'
|
||||
realtime: '.appname == "supabase-realtime"'
|
||||
storage: '.appname == "supabase-storage"'
|
||||
functions: '.appname == "supabase-functions"'
|
||||
db: '.appname == "supabase-db"'
|
||||
# Ignores non nginx errors since they are related with kong booting up
|
||||
kong_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.kong
|
||||
source: |-
|
||||
req, err = parse_nginx_log(.event_message, "combined")
|
||||
if err == null {
|
||||
.timestamp = req.timestamp
|
||||
.metadata.request.headers.referer = req.referer
|
||||
.metadata.request.headers.user_agent = req.agent
|
||||
.metadata.request.headers.cf_connecting_ip = req.client
|
||||
.metadata.request.method = req.method
|
||||
.metadata.request.path = req.path
|
||||
.metadata.request.protocol = req.protocol
|
||||
.metadata.response.status_code = req.status
|
||||
}
|
||||
if err != null {
|
||||
abort
|
||||
}
|
||||
# Ignores non nginx errors since they are related with kong booting up
|
||||
kong_err:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.kong
|
||||
source: |-
|
||||
.metadata.request.method = "GET"
|
||||
.metadata.response.status_code = 200
|
||||
parsed, err = parse_nginx_log(.event_message, "error")
|
||||
if err == null {
|
||||
.timestamp = parsed.timestamp
|
||||
.severity = parsed.severity
|
||||
.metadata.request.host = parsed.host
|
||||
.metadata.request.headers.cf_connecting_ip = parsed.client
|
||||
url, err = split(parsed.request, " ")
|
||||
if err == null {
|
||||
.metadata.request.method = url[0]
|
||||
.metadata.request.path = url[1]
|
||||
.metadata.request.protocol = url[2]
|
||||
}
|
||||
}
|
||||
if err != null {
|
||||
abort
|
||||
}
|
||||
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
|
||||
auth_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.auth
|
||||
source: |-
|
||||
parsed, err = parse_json(.event_message)
|
||||
if err == null {
|
||||
.metadata.timestamp = parsed.time
|
||||
.metadata = merge!(.metadata, parsed)
|
||||
}
|
||||
# PostgREST logs are structured so we separate timestamp from message using regex
|
||||
rest_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.rest
|
||||
source: |-
|
||||
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
|
||||
if err == null {
|
||||
.event_message = parsed.msg
|
||||
.timestamp = to_timestamp!(parsed.time)
|
||||
.metadata.host = .project
|
||||
}
|
||||
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
|
||||
realtime_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.realtime
|
||||
source: |-
|
||||
.metadata.project = del(.project)
|
||||
.metadata.external_id = .metadata.project
|
||||
parsed, err = parse_regex(.event_message, r'^(?P<time>\d+:\d+:\d+\.\d+) \[(?P<level>\w+)\] (?P<msg>.*)$')
|
||||
if err == null {
|
||||
.event_message = parsed.msg
|
||||
.metadata.level = parsed.level
|
||||
}
|
||||
# Storage logs may contain json objects so we parse them for completeness
|
||||
storage_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.storage
|
||||
source: |-
|
||||
.metadata.project = del(.project)
|
||||
.metadata.tenantId = .metadata.project
|
||||
parsed, err = parse_json(.event_message)
|
||||
if err == null {
|
||||
.event_message = parsed.msg
|
||||
.metadata.level = parsed.level
|
||||
.metadata.timestamp = parsed.time
|
||||
.metadata.context[0].host = parsed.hostname
|
||||
.metadata.context[0].pid = parsed.pid
|
||||
}
|
||||
# Postgres logs some messages to stderr which we map to warning severity level
|
||||
db_logs:
|
||||
type: remap
|
||||
inputs:
|
||||
- router.db
|
||||
source: |-
|
||||
.metadata.host = "db-default"
|
||||
.metadata.parsed.timestamp = .timestamp
|
||||
|
||||
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
|
||||
|
||||
if err != null || parsed == null {
|
||||
.metadata.parsed.error_severity = "info"
|
||||
}
|
||||
if parsed != null {
|
||||
.metadata.parsed.error_severity = parsed.level
|
||||
}
|
||||
if .metadata.parsed.error_severity == "info" {
|
||||
.metadata.parsed.error_severity = "log"
|
||||
}
|
||||
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
|
||||
|
||||
sinks:
|
||||
logflare_auth:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- auth_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_realtime:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- realtime_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_rest:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- rest_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_db:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- db_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
|
||||
# lead to broken queries from studio. This works by the assumption that containers are started in the
|
||||
# following order: vector > db > logflare > kong
|
||||
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_functions:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- router.functions
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_storage:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- storage_logs
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
logflare_kong:
|
||||
type: 'http'
|
||||
inputs:
|
||||
- kong_logs
|
||||
- kong_err
|
||||
encoding:
|
||||
codec: 'json'
|
||||
method: 'post'
|
||||
request:
|
||||
retry_max_duration_secs: 10
|
||||
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
||||
30
autogpt_platform/db/docker/volumes/pooler/pooler.exs
Normal file
30
autogpt_platform/db/docker/volumes/pooler/pooler.exs
Normal file
@@ -0,0 +1,30 @@
|
||||
{:ok, _} = Application.ensure_all_started(:supavisor)
|
||||
|
||||
{:ok, version} =
|
||||
case Supavisor.Repo.query!("select version()") do
|
||||
%{rows: [[ver]]} -> Supavisor.Helpers.parse_pg_version(ver)
|
||||
_ -> nil
|
||||
end
|
||||
|
||||
params = %{
|
||||
"external_id" => System.get_env("POOLER_TENANT_ID"),
|
||||
"db_host" => "db",
|
||||
"db_port" => System.get_env("POSTGRES_PORT"),
|
||||
"db_database" => System.get_env("POSTGRES_DB"),
|
||||
"require_user" => false,
|
||||
"auth_query" => "SELECT * FROM pgbouncer.get_auth($1)",
|
||||
"default_max_clients" => System.get_env("POOLER_MAX_CLIENT_CONN"),
|
||||
"default_pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
|
||||
"default_parameter_status" => %{"server_version" => version},
|
||||
"users" => [%{
|
||||
"db_user" => "pgbouncer",
|
||||
"db_password" => System.get_env("POSTGRES_PASSWORD"),
|
||||
"mode_type" => System.get_env("POOLER_POOL_MODE"),
|
||||
"pool_size" => System.get_env("POOLER_DEFAULT_POOL_SIZE"),
|
||||
"is_manager" => true
|
||||
}]
|
||||
}
|
||||
|
||||
if !Supavisor.Tenants.get_tenant_by_external_id(params["external_id"]) do
|
||||
{:ok, _} = Supavisor.Tenants.create_tenant(params)
|
||||
end
|
||||
@@ -5,7 +5,7 @@ networks:
|
||||
name: shared-network
|
||||
|
||||
volumes:
|
||||
db-config:
|
||||
supabase-config:
|
||||
|
||||
x-agpt-services:
|
||||
&agpt-services
|
||||
@@ -67,19 +67,19 @@ services:
|
||||
studio:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: studio
|
||||
|
||||
kong:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: kong
|
||||
|
||||
auth:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: auth
|
||||
environment:
|
||||
GOTRUE_MAILER_AUTOCONFIRM: true
|
||||
@@ -87,54 +87,57 @@ services:
|
||||
rest:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: rest
|
||||
|
||||
realtime:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: realtime
|
||||
|
||||
storage:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: storage
|
||||
|
||||
imgproxy:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: imgproxy
|
||||
|
||||
meta:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: meta
|
||||
|
||||
functions:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: functions
|
||||
|
||||
analytics:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: analytics
|
||||
|
||||
db:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: db
|
||||
ports:
|
||||
- ${POSTGRES_PORT}:5432 # We don't use Supavisor locally, so we expose the db directly.
|
||||
|
||||
vector:
|
||||
<<: *supabase-services
|
||||
extends:
|
||||
file: ./supabase/docker/docker-compose.yml
|
||||
file: ./db/docker/docker-compose.yml
|
||||
service: vector
|
||||
|
||||
deps:
|
||||
|
||||
@@ -1,15 +1,29 @@
|
||||
"use client";
|
||||
import { ShoppingBag } from "lucide-react";
|
||||
import { Sidebar } from "@/components/agptui/Sidebar";
|
||||
import { Users, DollarSign, LogOut } from "lucide-react";
|
||||
|
||||
import { useState } from "react";
|
||||
import Link from "next/link";
|
||||
import { BinaryIcon, XIcon } from "lucide-react";
|
||||
import { usePathname } from "next/navigation"; // Add this import
|
||||
import { IconSliders } from "@/components/ui/icons";
|
||||
|
||||
const tabs = [
|
||||
{ name: "Dashboard", href: "/admin/dashboard" },
|
||||
{ name: "Marketplace", href: "/admin/marketplace" },
|
||||
{ name: "Users", href: "/admin/users" },
|
||||
{ name: "Settings", href: "/admin/settings" },
|
||||
const sidebarLinkGroups = [
|
||||
{
|
||||
links: [
|
||||
{
|
||||
text: "Agent Management",
|
||||
href: "/admin/agents",
|
||||
icon: <Users className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "User Spending",
|
||||
href: "/admin/spending",
|
||||
icon: <DollarSign className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "Admin User Management",
|
||||
href: "/admin/settings",
|
||||
icon: <IconSliders className="h-6 w-6" />,
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
export default function AdminLayout({
|
||||
@@ -17,84 +31,10 @@ export default function AdminLayout({
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
const pathname = usePathname(); // Get the current pathname
|
||||
const [activeTab, setActiveTab] = useState(() => {
|
||||
// Set active tab based on the current route
|
||||
return tabs.find((tab) => tab.href === pathname)?.name || tabs[0].name;
|
||||
});
|
||||
const [mobileMenuOpen, setMobileMenuOpen] = useState(false);
|
||||
|
||||
return (
|
||||
<div className="min-h-screen bg-gray-100">
|
||||
<nav className="bg-white shadow-sm">
|
||||
<div className="max-w-10xl mx-auto px-4 sm:px-6 lg:px-8">
|
||||
<div className="flex h-16 items-center justify-between">
|
||||
<div className="flex items-center">
|
||||
<div className="flex-shrink-0">
|
||||
<h1 className="text-xl font-bold">Admin Panel</h1>
|
||||
</div>
|
||||
<div className="hidden sm:ml-6 sm:flex sm:space-x-8">
|
||||
{tabs.map((tab) => (
|
||||
<Link
|
||||
key={tab.name}
|
||||
href={tab.href}
|
||||
className={`${
|
||||
activeTab === tab.name
|
||||
? "border-indigo-500 text-indigo-600"
|
||||
: "border-transparent text-gray-500 hover:border-gray-300 hover:text-gray-700"
|
||||
} inline-flex items-center border-b-2 px-1 pt-1 text-sm font-medium`}
|
||||
onClick={() => setActiveTab(tab.name)}
|
||||
>
|
||||
{tab.name}
|
||||
</Link>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
<div className="sm:hidden">
|
||||
<button
|
||||
type="button"
|
||||
className="inline-flex items-center justify-center rounded-md p-2 text-gray-400 hover:bg-gray-100 hover:text-gray-500 focus:outline-none focus:ring-2 focus:ring-inset focus:ring-indigo-500"
|
||||
onClick={() => setMobileMenuOpen(!mobileMenuOpen)}
|
||||
>
|
||||
<span className="sr-only">Open main menu</span>
|
||||
{mobileMenuOpen ? (
|
||||
<XIcon className="block h-6 w-6" aria-hidden="true" />
|
||||
) : (
|
||||
<BinaryIcon className="block h-6 w-6" aria-hidden="true" />
|
||||
)}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{mobileMenuOpen && (
|
||||
<div className="sm:hidden">
|
||||
<div className="space-y-1 pb-3 pt-2">
|
||||
{tabs.map((tab) => (
|
||||
<Link
|
||||
key={tab.name}
|
||||
href={tab.href}
|
||||
className={`${
|
||||
activeTab === tab.name
|
||||
? "border-indigo-500 bg-indigo-50 text-indigo-700"
|
||||
: "border-transparent text-gray-600 hover:border-gray-300 hover:bg-gray-50 hover:text-gray-800"
|
||||
} block border-l-4 py-2 pl-3 pr-4 text-base font-medium`}
|
||||
onClick={() => {
|
||||
setActiveTab(tab.name);
|
||||
setMobileMenuOpen(false);
|
||||
}}
|
||||
>
|
||||
{tab.name}
|
||||
</Link>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</nav>
|
||||
|
||||
<main className="py-10">
|
||||
<div className="mx-auto max-w-7xl px-4 sm:px-6 lg:px-8">{children}</div>
|
||||
</main>
|
||||
<div className="flex min-h-screen w-screen max-w-[1360px] flex-col lg:flex-row">
|
||||
<Sidebar linkGroups={sidebarLinkGroups} />
|
||||
<div className="flex-1 pl-4">{children}</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import React, { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import { useParams, useRouter } from "next/navigation";
|
||||
|
||||
import { exportAsJSONFile } from "@/lib/utils";
|
||||
import { useBackendAPI } from "@/lib/autogpt-server-api/context";
|
||||
import {
|
||||
GraphExecution,
|
||||
@@ -191,19 +192,40 @@ export default function AgentRunsPage(): React.ReactElement {
|
||||
[schedules, api],
|
||||
);
|
||||
|
||||
const downloadGraph = useCallback(
|
||||
async () =>
|
||||
agent &&
|
||||
// Export sanitized graph from backend
|
||||
api
|
||||
.getGraph(agent.agent_id, agent.agent_version, true)
|
||||
.then((graph) =>
|
||||
exportAsJSONFile(graph, `${graph.name}_v${graph.version}.json`),
|
||||
),
|
||||
[api, agent],
|
||||
);
|
||||
|
||||
const agentActions: ButtonAction[] = useMemo(
|
||||
() => [
|
||||
{
|
||||
label: "Open in builder",
|
||||
callback: () => agent && router.push(`/build?flowID=${agent.agent_id}`),
|
||||
},
|
||||
...(agent?.can_access_graph
|
||||
? [
|
||||
{
|
||||
label: "Open in builder",
|
||||
callback: () =>
|
||||
agent &&
|
||||
router.push(
|
||||
`/build?flowID=${agent.agent_id}&flowVersion=${agent.agent_version}`,
|
||||
),
|
||||
},
|
||||
{ label: "Export agent to file", callback: downloadGraph },
|
||||
]
|
||||
: []),
|
||||
{
|
||||
label: "Delete agent",
|
||||
variant: "destructive",
|
||||
callback: () => setAgentDeleteDialogOpen(true),
|
||||
},
|
||||
],
|
||||
[agent, router],
|
||||
[agent, router, downloadGraph],
|
||||
);
|
||||
|
||||
if (!agent || !graph) {
|
||||
|
||||
@@ -29,7 +29,6 @@ export default function CreditsPage() {
|
||||
formatCredits,
|
||||
refundTopUp,
|
||||
refundRequests,
|
||||
fetchRefundRequests,
|
||||
} = useCredits({
|
||||
fetchInitialAutoTopUpConfig: true,
|
||||
fetchInitialRefundRequests: true,
|
||||
|
||||
@@ -1,17 +1,48 @@
|
||||
import * as React from "react";
|
||||
import { Sidebar } from "@/components/agptui/Sidebar";
|
||||
import {
|
||||
IconDashboardLayout,
|
||||
IconIntegrations,
|
||||
IconProfile,
|
||||
IconSliders,
|
||||
IconCoin,
|
||||
} from "@/components/ui/icons";
|
||||
import { KeyIcon } from "lucide-react";
|
||||
|
||||
export default function Layout({ children }: { children: React.ReactNode }) {
|
||||
const sidebarLinkGroups = [
|
||||
{
|
||||
links: [
|
||||
{ text: "Creator Dashboard", href: "/profile/dashboard" },
|
||||
{ text: "Agent dashboard", href: "/profile/agent-dashboard" },
|
||||
{ text: "Billing", href: "/profile/credits" },
|
||||
{ text: "Integrations", href: "/profile/integrations" },
|
||||
{ text: "API Keys", href: "/profile/api_keys" },
|
||||
{ text: "Profile", href: "/profile" },
|
||||
{ text: "Settings", href: "/profile/settings" },
|
||||
{
|
||||
text: "Creator Dashboard",
|
||||
href: "/profile/dashboard",
|
||||
icon: <IconDashboardLayout className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "Billing",
|
||||
href: "/profile/credits",
|
||||
icon: <IconCoin className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "Integrations",
|
||||
href: "/profile/integrations",
|
||||
icon: <IconIntegrations className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "API Keys",
|
||||
href: "/profile/api_keys",
|
||||
icon: <KeyIcon className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "Profile",
|
||||
href: "/profile",
|
||||
icon: <IconProfile className="h-6 w-6" />,
|
||||
},
|
||||
{
|
||||
text: "Settings",
|
||||
href: "/profile/settings",
|
||||
icon: <IconSliders className="h-6 w-6" />,
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
|
||||
import type { ButtonAction } from "@/components/agptui/types";
|
||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
|
||||
import { IconRefresh, IconSquare } from "@/components/ui/icons";
|
||||
import { Button } from "@/components/agptui/Button";
|
||||
import { Input } from "@/components/ui/input";
|
||||
|
||||
@@ -121,14 +122,29 @@ export default function AgentRunDetailsView({
|
||||
...(["running", "queued"].includes(runStatus)
|
||||
? ([
|
||||
{
|
||||
label: "Stop run",
|
||||
label: (
|
||||
<>
|
||||
<IconSquare className="mr-2 size-4" />
|
||||
Stop run
|
||||
</>
|
||||
),
|
||||
variant: "secondary",
|
||||
callback: stopRun,
|
||||
},
|
||||
] satisfies ButtonAction[])
|
||||
: []),
|
||||
...(["success", "failed", "stopped"].includes(runStatus)
|
||||
? [{ label: "Run again", callback: runAgain }]
|
||||
? [
|
||||
{
|
||||
label: (
|
||||
<>
|
||||
<IconRefresh className="mr-2 size-4" />
|
||||
Run again
|
||||
</>
|
||||
),
|
||||
callback: runAgain,
|
||||
},
|
||||
]
|
||||
: []),
|
||||
{ label: "Delete run", variant: "secondary", callback: deleteRun },
|
||||
],
|
||||
|
||||
@@ -7,6 +7,7 @@ import { GraphExecutionID, GraphMeta } from "@/lib/autogpt-server-api";
|
||||
import type { ButtonAction } from "@/components/agptui/types";
|
||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
|
||||
import { Button, ButtonProps } from "@/components/agptui/Button";
|
||||
import { IconPlay } from "@/components/ui/icons";
|
||||
import { Input } from "@/components/ui/input";
|
||||
|
||||
export default function AgentRunDraftView({
|
||||
@@ -31,12 +32,19 @@ export default function AgentRunDraftView({
|
||||
[api, graph, inputValues, onRun],
|
||||
);
|
||||
|
||||
const runActions: {
|
||||
label: string;
|
||||
variant?: ButtonProps["variant"];
|
||||
callback: () => void;
|
||||
}[] = useMemo(
|
||||
() => [{ label: "Run", variant: "accent", callback: () => doRun() }],
|
||||
const runActions: ButtonAction[] = useMemo(
|
||||
() => [
|
||||
{
|
||||
label: (
|
||||
<>
|
||||
<IconPlay className="mr-2 size-5" />
|
||||
Run
|
||||
</>
|
||||
),
|
||||
variant: "accent",
|
||||
callback: doRun,
|
||||
},
|
||||
],
|
||||
[doRun],
|
||||
);
|
||||
|
||||
|
||||
@@ -2,27 +2,49 @@ import * as React from "react";
|
||||
import Link from "next/link";
|
||||
import { Button } from "./Button";
|
||||
import { Sheet, SheetContent, SheetTrigger } from "@/components/ui/sheet";
|
||||
import { KeyIcon, Menu } from "lucide-react";
|
||||
import {
|
||||
IconDashboardLayout,
|
||||
IconIntegrations,
|
||||
IconProfile,
|
||||
IconSliders,
|
||||
IconCoin,
|
||||
} from "../ui/icons";
|
||||
import { Menu } from "lucide-react";
|
||||
import { IconDashboardLayout } from "../ui/icons";
|
||||
|
||||
interface SidebarLinkGroup {
|
||||
links: {
|
||||
text: string;
|
||||
href: string;
|
||||
}[];
|
||||
export interface SidebarLink {
|
||||
text: string;
|
||||
href: string;
|
||||
icon?: React.ReactNode;
|
||||
}
|
||||
|
||||
interface SidebarProps {
|
||||
export interface SidebarLinkGroup {
|
||||
links: SidebarLink[];
|
||||
}
|
||||
|
||||
export interface SidebarProps {
|
||||
linkGroups: SidebarLinkGroup[];
|
||||
}
|
||||
|
||||
// Helper function to get the default icon component based on link text
|
||||
const getDefaultIconForLink = () => {
|
||||
// Default icon
|
||||
return <IconDashboardLayout className="h-6 w-6" />;
|
||||
};
|
||||
|
||||
export const Sidebar: React.FC<SidebarProps> = ({ linkGroups }) => {
|
||||
// Extract all links from linkGroups
|
||||
const allLinks = linkGroups.flatMap((group) => group.links);
|
||||
|
||||
// Function to render link items
|
||||
const renderLinks = () => {
|
||||
return allLinks.map((link, index) => (
|
||||
<Link
|
||||
key={`${link.href}-${index}`}
|
||||
href={link.href}
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
{link.icon || getDefaultIconForLink()}
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
{link.text}
|
||||
</div>
|
||||
</Link>
|
||||
));
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<Sheet>
|
||||
@@ -41,60 +63,7 @@ export const Sidebar: React.FC<SidebarProps> = ({ linkGroups }) => {
|
||||
>
|
||||
<div className="h-full w-full rounded-2xl bg-zinc-200 dark:bg-zinc-800">
|
||||
<div className="inline-flex h-[264px] flex-col items-start justify-start gap-6 p-3">
|
||||
<Link
|
||||
href="/profile/dashboard"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconDashboardLayout className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Creator dashboard
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile/credits"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconCoin className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Billing
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile/integrations"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconIntegrations className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Integrations
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile/api_keys"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<KeyIcon className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
API Keys
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconProfile className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Profile
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile/settings"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconSliders className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Settings
|
||||
</div>
|
||||
</Link>
|
||||
{renderLinks()}
|
||||
</div>
|
||||
</div>
|
||||
</SheetContent>
|
||||
@@ -103,60 +72,7 @@ export const Sidebar: React.FC<SidebarProps> = ({ linkGroups }) => {
|
||||
<div className="relative hidden h-[912px] w-[234px] border-none lg:block">
|
||||
<div className="h-full w-full rounded-2xl bg-zinc-200 dark:bg-zinc-800">
|
||||
<div className="inline-flex h-[264px] flex-col items-start justify-start gap-6 p-3">
|
||||
<Link
|
||||
href="/profile/dashboard"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconDashboardLayout className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Agent dashboard
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile/credits"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconCoin className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Billing
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile/integrations"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconIntegrations className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Integrations
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile/api_keys"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<KeyIcon className="h-6 w-6" strokeWidth={1} />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
API Keys
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconProfile className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Profile
|
||||
</div>
|
||||
</Link>
|
||||
<Link
|
||||
href="/profile/settings"
|
||||
className="inline-flex w-full items-center gap-2.5 rounded-xl px-3 py-3 text-neutral-800 hover:bg-neutral-800 hover:text-white dark:text-neutral-200 dark:hover:bg-neutral-700 dark:hover:text-white"
|
||||
>
|
||||
<IconSliders className="h-6 w-6" />
|
||||
<div className="p-ui-medium text-base font-medium leading-normal">
|
||||
Settings
|
||||
</div>
|
||||
</Link>
|
||||
{renderLinks()}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import type { ButtonProps } from "@/components/agptui/Button";
|
||||
import React from "react";
|
||||
|
||||
export type ButtonAction = {
|
||||
label: string;
|
||||
label: React.ReactNode;
|
||||
variant?: ButtonProps["variant"];
|
||||
callback: () => void;
|
||||
};
|
||||
|
||||
@@ -197,14 +197,14 @@ export default class BackendAPI {
|
||||
getGraph(
|
||||
id: GraphID,
|
||||
version?: number,
|
||||
hide_credentials?: boolean,
|
||||
for_export?: boolean,
|
||||
): Promise<Graph> {
|
||||
let query: Record<string, any> = {};
|
||||
if (version !== undefined) {
|
||||
query["version"] = version;
|
||||
}
|
||||
if (hide_credentials !== undefined) {
|
||||
query["hide_credentials"] = hide_credentials;
|
||||
if (for_export !== undefined) {
|
||||
query["for_export"] = for_export;
|
||||
}
|
||||
return this._get(`/graphs/${id}`, query);
|
||||
}
|
||||
|
||||
@@ -5,10 +5,10 @@ import { NextResponse, type NextRequest } from "next/server";
|
||||
const PROTECTED_PAGES = [
|
||||
"/monitor",
|
||||
"/build",
|
||||
"/marketplace/profile",
|
||||
"/marketplace/settings",
|
||||
"/marketplace/dashboard",
|
||||
"/onboarding",
|
||||
"/profile",
|
||||
"/library",
|
||||
"/monitoring",
|
||||
];
|
||||
const ADMIN_PAGES = ["/admin"];
|
||||
|
||||
@@ -62,34 +62,38 @@ export async function updateSession(request: NextRequest) {
|
||||
// Get the user role
|
||||
const userRole = user?.role;
|
||||
const url = request.nextUrl.clone();
|
||||
const pathname = request.nextUrl.pathname;
|
||||
// AUTH REDIRECTS
|
||||
// If not logged in and trying to access a protected page, redirect to login
|
||||
if (
|
||||
(!user &&
|
||||
PROTECTED_PAGES.some((page) => {
|
||||
const combinedPath = `${page}`;
|
||||
// console.log("Checking pathname:", request.nextUrl.pathname, "against:", combinedPath);
|
||||
return request.nextUrl.pathname.startsWith(combinedPath);
|
||||
})) ||
|
||||
ADMIN_PAGES.some((page) => {
|
||||
const combinedPath = `${page}`;
|
||||
// console.log("Checking pathname:", request.nextUrl.pathname, "against:", combinedPath);
|
||||
return request.nextUrl.pathname.startsWith(combinedPath);
|
||||
})
|
||||
) {
|
||||
// no user, potentially respond by redirecting the user to the login page
|
||||
url.pathname = `/login`;
|
||||
return NextResponse.redirect(url);
|
||||
// 1. Check if user is not authenticated but trying to access protected content
|
||||
if (!user) {
|
||||
// Check if the user is trying to access either a protected page or an admin page
|
||||
const isAttemptingProtectedPage = PROTECTED_PAGES.some((page) =>
|
||||
request.nextUrl.pathname.startsWith(page),
|
||||
);
|
||||
|
||||
const isAttemptingAdminPage = ADMIN_PAGES.some((page) =>
|
||||
request.nextUrl.pathname.startsWith(page),
|
||||
);
|
||||
|
||||
// If trying to access any protected content without being logged in,
|
||||
// redirect to login page
|
||||
if (isAttemptingProtectedPage || isAttemptingAdminPage) {
|
||||
url.pathname = `/login`;
|
||||
return NextResponse.redirect(url);
|
||||
}
|
||||
}
|
||||
if (
|
||||
user &&
|
||||
userRole != "admin" &&
|
||||
ADMIN_PAGES.some((page) => request.nextUrl.pathname.startsWith(`${page}`))
|
||||
) {
|
||||
// no user, potentially respond by redirecting the user to the login page
|
||||
url.pathname = `/marketplace`;
|
||||
return NextResponse.redirect(url);
|
||||
|
||||
// 2. Check if user is authenticated but lacks admin role when accessing admin pages
|
||||
if (user && userRole !== "admin") {
|
||||
const isAttemptingAdminPage = ADMIN_PAGES.some((page) =>
|
||||
request.nextUrl.pathname.startsWith(page),
|
||||
);
|
||||
|
||||
// If a non-admin user is trying to access admin pages,
|
||||
// redirect to marketplace
|
||||
if (isAttemptingAdminPage) {
|
||||
url.pathname = `/marketplace`;
|
||||
return NextResponse.redirect(url);
|
||||
}
|
||||
}
|
||||
|
||||
// IMPORTANT: You *must* return the supabaseResponse object as it is. If you're
|
||||
|
||||
@@ -1,14 +1,20 @@
|
||||
import React from "react";
|
||||
import * as Sentry from "@sentry/nextjs";
|
||||
import { redirect } from "next/navigation";
|
||||
import getServerUser from "./supabase/getServerUser";
|
||||
|
||||
export async function withRoleAccess(allowedRoles: string[]) {
|
||||
console.log("withRoleAccess called:", allowedRoles);
|
||||
("use server");
|
||||
"use server";
|
||||
return await Sentry.withServerActionInstrumentation(
|
||||
"withRoleAccess",
|
||||
{},
|
||||
async () => {
|
||||
return async function <T extends React.ComponentType<any>>(Component: T) {
|
||||
const { user, role, error } = await getServerUser();
|
||||
|
||||
if (error || !user || !role || !allowedRoles.includes(role)) {
|
||||
redirect("/unauthorized");
|
||||
}
|
||||
return Component;
|
||||
};
|
||||
},
|
||||
|
||||
Submodule autogpt_platform/supabase deleted from 4647d50380
@@ -84,11 +84,11 @@ To run the backend services, follow these steps:
|
||||
```
|
||||
This command will initialize and update the submodules in the repository. The `supabase` folder will be cloned to the root directory.
|
||||
|
||||
* Copy the `.env.example` file available in the `supabase/docker` directory to `.env` in `autogpt_platform`:
|
||||
* Copy the `.env.example` file to `.env` in `autogpt_platform`:
|
||||
```
|
||||
cp supabase/docker/.env.example .env
|
||||
cp .env.example .env
|
||||
```
|
||||
This command will copy the `.env.example` file to `.env` in the `supabase/docker` directory. You can modify the `.env` file to add your own environment variables.
|
||||
This command will copy the `.env.example` file to `.env` in the `supabase` directory. You can modify the `.env` file to add your own environment variables.
|
||||
|
||||
* Run the backend services:
|
||||
```
|
||||
|
||||
Reference in New Issue
Block a user