mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-06 22:03:59 -05:00
We have been submoduling Supabase for provisioning local Supabase instances using docker-compose. Aside from the huge size of unrelated code being pulled, there is also the risk of pulling unintentional breaking change from the upstream to the platform. The latest Supabase changes hide the 5432 port from the supabase-db container and shift it to the supavisor, the instance that we are currently not using. This causes an error in the existing setup. ## BREAKING CHANGES This change will introduce different volume locations for the database content, pulling this change will make the data content fresh from the start. To keep your old data with this change, execute this command: ``` cp -r supabase/docker/volumes/db/data db/docker/volumes/db/data ``` ### Changes 🏗️ The scope of this PR is snapshotting the current docker-compose code obtained from the Supabase repository and embedding it into our repository. This will eliminate the need for submodule / recursive cloning and bringing the entire Supabase repository into the platform. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: <!-- Put your test plan here: --> - [x] Existing CI
527 lines
17 KiB
YAML
527 lines
17 KiB
YAML
# Usage
|
|
# Start: docker compose up
|
|
# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up
|
|
# Stop: docker compose down
|
|
# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
|
|
# Reset everything: ./reset.sh
|
|
|
|
name: supabase
|
|
|
|
services:
|
|
|
|
studio:
|
|
container_name: supabase-studio
|
|
image: supabase/studio:20250224-d10db0f
|
|
restart: unless-stopped
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"node",
|
|
"-e",
|
|
"fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})"
|
|
]
|
|
timeout: 10s
|
|
interval: 5s
|
|
retries: 3
|
|
depends_on:
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
STUDIO_PG_META_URL: http://meta:8080
|
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
|
|
|
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
|
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
|
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
|
|
|
|
SUPABASE_URL: http://kong:8000
|
|
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
|
SUPABASE_ANON_KEY: ${ANON_KEY}
|
|
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
|
AUTH_JWT_SECRET: ${JWT_SECRET}
|
|
|
|
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
|
LOGFLARE_URL: http://analytics:4000
|
|
NEXT_PUBLIC_ENABLE_LOGS: true
|
|
# Comment to use Big Query backend for analytics
|
|
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
|
|
# Uncomment to use Big Query backend for analytics
|
|
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
|
|
|
|
kong:
|
|
container_name: supabase-kong
|
|
image: kong:2.8.1
|
|
restart: unless-stopped
|
|
ports:
|
|
- ${KONG_HTTP_PORT}:8000/tcp
|
|
- ${KONG_HTTPS_PORT}:8443/tcp
|
|
volumes:
|
|
# https://github.com/supabase/supabase/issues/12661
|
|
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
|
depends_on:
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
KONG_DATABASE: "off"
|
|
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
|
# https://github.com/supabase/cli/issues/14
|
|
KONG_DNS_ORDER: LAST,A,CNAME
|
|
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
|
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
|
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
|
SUPABASE_ANON_KEY: ${ANON_KEY}
|
|
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
|
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
|
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
|
# https://unix.stackexchange.com/a/294837
|
|
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
|
|
|
auth:
|
|
container_name: supabase-auth
|
|
image: supabase/gotrue:v2.170.0
|
|
restart: unless-stopped
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"wget",
|
|
"--no-verbose",
|
|
"--tries=1",
|
|
"--spider",
|
|
"http://localhost:9999/health"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
GOTRUE_API_HOST: 0.0.0.0
|
|
GOTRUE_API_PORT: 9999
|
|
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
|
|
|
GOTRUE_DB_DRIVER: postgres
|
|
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
|
|
|
GOTRUE_SITE_URL: ${SITE_URL}
|
|
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
|
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
|
|
|
GOTRUE_JWT_ADMIN_ROLES: service_role
|
|
GOTRUE_JWT_AUD: authenticated
|
|
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
|
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
|
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
|
|
|
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
|
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
|
|
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
|
|
|
# Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile.
|
|
# GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true
|
|
|
|
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
|
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
|
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
|
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
|
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
|
GOTRUE_SMTP_USER: ${SMTP_USER}
|
|
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
|
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
|
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
|
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
|
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
|
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
|
|
|
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
|
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
|
# Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook
|
|
|
|
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true"
|
|
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook"
|
|
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "<standard-base64-secret>"
|
|
|
|
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true"
|
|
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt"
|
|
|
|
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true"
|
|
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt"
|
|
|
|
# GOTRUE_HOOK_SEND_SMS_ENABLED: "false"
|
|
# GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook"
|
|
# GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
|
|
|
|
# GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false"
|
|
# GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender"
|
|
# GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
|
|
|
|
rest:
|
|
container_name: supabase-rest
|
|
image: postgrest/postgrest:v12.2.8
|
|
restart: unless-stopped
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
|
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
|
PGRST_DB_ANON_ROLE: anon
|
|
PGRST_JWT_SECRET: ${JWT_SECRET}
|
|
PGRST_DB_USE_LEGACY_GUCS: "false"
|
|
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
|
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
|
command:
|
|
[
|
|
"postgrest"
|
|
]
|
|
|
|
realtime:
|
|
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
|
container_name: realtime-dev.supabase-realtime
|
|
image: supabase/realtime:v2.34.40
|
|
restart: unless-stopped
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
analytics:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"curl",
|
|
"-sSfL",
|
|
"--head",
|
|
"-o",
|
|
"/dev/null",
|
|
"-H",
|
|
"Authorization: Bearer ${ANON_KEY}",
|
|
"http://localhost:4000/api/tenants/realtime-dev/health"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
environment:
|
|
PORT: 4000
|
|
DB_HOST: ${POSTGRES_HOST}
|
|
DB_PORT: ${POSTGRES_PORT}
|
|
DB_USER: supabase_admin
|
|
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
|
DB_NAME: ${POSTGRES_DB}
|
|
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
|
DB_ENC_KEY: supabaserealtime
|
|
API_JWT_SECRET: ${JWT_SECRET}
|
|
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
|
ERL_AFLAGS: -proto_dist inet_tcp
|
|
DNS_NODES: "''"
|
|
RLIMIT_NOFILE: "10000"
|
|
APP_NAME: realtime
|
|
SEED_SELF_HOST: true
|
|
RUN_JANITOR: true
|
|
|
|
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
|
|
storage:
|
|
container_name: supabase-storage
|
|
image: supabase/storage-api:v1.19.3
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ./volumes/storage:/var/lib/storage:z
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"wget",
|
|
"--no-verbose",
|
|
"--tries=1",
|
|
"--spider",
|
|
"http://storage:5000/status"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
rest:
|
|
condition: service_started
|
|
imgproxy:
|
|
condition: service_started
|
|
environment:
|
|
ANON_KEY: ${ANON_KEY}
|
|
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
|
POSTGREST_URL: http://rest:3000
|
|
PGRST_JWT_SECRET: ${JWT_SECRET}
|
|
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
|
FILE_SIZE_LIMIT: 52428800
|
|
STORAGE_BACKEND: file
|
|
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
|
TENANT_ID: stub
|
|
# TODO: https://github.com/supabase/storage-api/issues/55
|
|
REGION: stub
|
|
GLOBAL_S3_BUCKET: stub
|
|
ENABLE_IMAGE_TRANSFORMATION: "true"
|
|
IMGPROXY_URL: http://imgproxy:5001
|
|
|
|
imgproxy:
|
|
container_name: supabase-imgproxy
|
|
image: darthsim/imgproxy:v3.8.0
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ./volumes/storage:/var/lib/storage:z
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"imgproxy",
|
|
"health"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
environment:
|
|
IMGPROXY_BIND: ":5001"
|
|
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
|
IMGPROXY_USE_ETAG: "true"
|
|
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
|
|
|
meta:
|
|
container_name: supabase-meta
|
|
image: supabase/postgres-meta:v0.86.1
|
|
restart: unless-stopped
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
PG_META_PORT: 8080
|
|
PG_META_DB_HOST: ${POSTGRES_HOST}
|
|
PG_META_DB_PORT: ${POSTGRES_PORT}
|
|
PG_META_DB_NAME: ${POSTGRES_DB}
|
|
PG_META_DB_USER: supabase_admin
|
|
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
|
|
|
functions:
|
|
container_name: supabase-edge-functions
|
|
image: supabase/edge-runtime:v1.67.2
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ./volumes/functions:/home/deno/functions:Z
|
|
depends_on:
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
JWT_SECRET: ${JWT_SECRET}
|
|
SUPABASE_URL: http://kong:8000
|
|
SUPABASE_ANON_KEY: ${ANON_KEY}
|
|
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
|
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
|
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
|
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
|
command:
|
|
[
|
|
"start",
|
|
"--main-service",
|
|
"/home/deno/functions/main"
|
|
]
|
|
|
|
analytics:
|
|
container_name: supabase-analytics
|
|
image: supabase/logflare:1.12.5
|
|
restart: unless-stopped
|
|
ports:
|
|
- 4000:4000
|
|
# Uncomment to use Big Query backend for analytics
|
|
# volumes:
|
|
# - type: bind
|
|
# source: ${PWD}/gcloud.json
|
|
# target: /opt/app/rel/logflare/bin/gcloud.json
|
|
# read_only: true
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"curl",
|
|
"http://localhost:4000/health"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 10
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
environment:
|
|
LOGFLARE_NODE_HOST: 127.0.0.1
|
|
DB_USERNAME: supabase_admin
|
|
DB_DATABASE: _supabase
|
|
DB_HOSTNAME: ${POSTGRES_HOST}
|
|
DB_PORT: ${POSTGRES_PORT}
|
|
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
|
DB_SCHEMA: _analytics
|
|
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
|
LOGFLARE_SINGLE_TENANT: true
|
|
LOGFLARE_SUPABASE_MODE: true
|
|
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
|
|
|
# Comment variables to use Big Query backend for analytics
|
|
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
|
|
POSTGRES_BACKEND_SCHEMA: _analytics
|
|
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
|
# Uncomment to use Big Query backend for analytics
|
|
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
|
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
|
|
|
# Comment out everything below this point if you are using an external Postgres database
|
|
db:
|
|
container_name: supabase-db
|
|
image: supabase/postgres:15.8.1.049
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
|
# Must be superuser to create event trigger
|
|
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
|
# Must be superuser to alter reserved role
|
|
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
|
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
|
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
|
# PGDATA directory is persisted between restarts
|
|
- ./volumes/db/data:/var/lib/postgresql/data:Z
|
|
# Changes required for internal supabase data such as _analytics
|
|
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
|
|
# Changes required for Analytics support
|
|
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
|
# Changes required for Pooler support
|
|
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
|
|
# Use named volume to persist pgsodium decryption key between restarts
|
|
- db-config:/etc/postgresql-custom
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"pg_isready",
|
|
"-U",
|
|
"postgres",
|
|
"-h",
|
|
"localhost"
|
|
]
|
|
interval: 5s
|
|
timeout: 5s
|
|
retries: 10
|
|
depends_on:
|
|
vector:
|
|
condition: service_healthy
|
|
environment:
|
|
POSTGRES_HOST: /var/run/postgresql
|
|
PGPORT: ${POSTGRES_PORT}
|
|
POSTGRES_PORT: ${POSTGRES_PORT}
|
|
PGPASSWORD: ${POSTGRES_PASSWORD}
|
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
|
PGDATABASE: ${POSTGRES_DB}
|
|
POSTGRES_DB: ${POSTGRES_DB}
|
|
JWT_SECRET: ${JWT_SECRET}
|
|
JWT_EXP: ${JWT_EXPIRY}
|
|
command:
|
|
[
|
|
"postgres",
|
|
"-c",
|
|
"config_file=/etc/postgresql/postgresql.conf",
|
|
"-c",
|
|
"log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs
|
|
]
|
|
|
|
vector:
|
|
container_name: supabase-vector
|
|
image: timberio/vector:0.28.1-alpine
|
|
restart: unless-stopped
|
|
volumes:
|
|
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
|
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"wget",
|
|
"--no-verbose",
|
|
"--tries=1",
|
|
"--spider",
|
|
"http://vector:9001/health"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
environment:
|
|
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
|
command:
|
|
[
|
|
"--config",
|
|
"/etc/vector/vector.yml"
|
|
]
|
|
|
|
# Update the DATABASE_URL if you are using an external Postgres database
|
|
supavisor:
|
|
container_name: supabase-pooler
|
|
image: supabase/supavisor:2.4.12
|
|
restart: unless-stopped
|
|
ports:
|
|
- ${POSTGRES_PORT}:5432
|
|
- ${POOLER_PROXY_PORT_TRANSACTION}:6543
|
|
volumes:
|
|
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"curl",
|
|
"-sSfL",
|
|
"--head",
|
|
"-o",
|
|
"/dev/null",
|
|
"http://127.0.0.1:4000/api/health"
|
|
]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
depends_on:
|
|
db:
|
|
condition: service_healthy
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
PORT: 4000
|
|
POSTGRES_PORT: ${POSTGRES_PORT}
|
|
POSTGRES_DB: ${POSTGRES_DB}
|
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
|
DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase
|
|
CLUSTER_POSTGRES: true
|
|
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
|
|
VAULT_ENC_KEY: ${VAULT_ENC_KEY}
|
|
API_JWT_SECRET: ${JWT_SECRET}
|
|
METRICS_JWT_SECRET: ${JWT_SECRET}
|
|
REGION: local
|
|
ERL_AFLAGS: -proto_dist inet_tcp
|
|
POOLER_TENANT_ID: ${POOLER_TENANT_ID}
|
|
POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE}
|
|
POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN}
|
|
POOLER_POOL_MODE: transaction
|
|
command:
|
|
[
|
|
"/bin/sh",
|
|
"-c",
|
|
"/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server"
|
|
]
|
|
|
|
volumes:
|
|
db-config:
|