mirror of
https://github.com/Significant-Gravitas/AutoGPT.git
synced 2026-01-06 22:03:59 -05:00
We have been submoduling Supabase for provisioning local Supabase instances using docker-compose. Aside from the huge size of unrelated code being pulled, there is also the risk of pulling unintentional breaking change from the upstream to the platform. The latest Supabase changes hide the 5432 port from the supabase-db container and shift it to the supavisor, the instance that we are currently not using. This causes an error in the existing setup. ## BREAKING CHANGES This change will introduce different volume locations for the database content, pulling this change will make the data content fresh from the start. To keep your old data with this change, execute this command: ``` cp -r supabase/docker/volumes/db/data db/docker/volumes/db/data ``` ### Changes 🏗️ The scope of this PR is snapshotting the current docker-compose code obtained from the Supabase repository and embedding it into our repository. This will eliminate the need for submodule / recursive cloning and bringing the entire Supabase repository into the platform. ### Checklist 📋 #### For code changes: - [x] I have clearly listed my changes in the PR description - [x] I have made a test plan - [x] I have tested my changes according to the test plan: <!-- Put your test plan here: --> - [x] Existing CI
233 lines
7.1 KiB
YAML
233 lines
7.1 KiB
YAML
api:
|
|
enabled: true
|
|
address: 0.0.0.0:9001
|
|
|
|
sources:
|
|
docker_host:
|
|
type: docker_logs
|
|
exclude_containers:
|
|
- supabase-vector
|
|
|
|
transforms:
|
|
project_logs:
|
|
type: remap
|
|
inputs:
|
|
- docker_host
|
|
source: |-
|
|
.project = "default"
|
|
.event_message = del(.message)
|
|
.appname = del(.container_name)
|
|
del(.container_created_at)
|
|
del(.container_id)
|
|
del(.source_type)
|
|
del(.stream)
|
|
del(.label)
|
|
del(.image)
|
|
del(.host)
|
|
del(.stream)
|
|
router:
|
|
type: route
|
|
inputs:
|
|
- project_logs
|
|
route:
|
|
kong: '.appname == "supabase-kong"'
|
|
auth: '.appname == "supabase-auth"'
|
|
rest: '.appname == "supabase-rest"'
|
|
realtime: '.appname == "supabase-realtime"'
|
|
storage: '.appname == "supabase-storage"'
|
|
functions: '.appname == "supabase-functions"'
|
|
db: '.appname == "supabase-db"'
|
|
# Ignores non nginx errors since they are related with kong booting up
|
|
kong_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.kong
|
|
source: |-
|
|
req, err = parse_nginx_log(.event_message, "combined")
|
|
if err == null {
|
|
.timestamp = req.timestamp
|
|
.metadata.request.headers.referer = req.referer
|
|
.metadata.request.headers.user_agent = req.agent
|
|
.metadata.request.headers.cf_connecting_ip = req.client
|
|
.metadata.request.method = req.method
|
|
.metadata.request.path = req.path
|
|
.metadata.request.protocol = req.protocol
|
|
.metadata.response.status_code = req.status
|
|
}
|
|
if err != null {
|
|
abort
|
|
}
|
|
# Ignores non nginx errors since they are related with kong booting up
|
|
kong_err:
|
|
type: remap
|
|
inputs:
|
|
- router.kong
|
|
source: |-
|
|
.metadata.request.method = "GET"
|
|
.metadata.response.status_code = 200
|
|
parsed, err = parse_nginx_log(.event_message, "error")
|
|
if err == null {
|
|
.timestamp = parsed.timestamp
|
|
.severity = parsed.severity
|
|
.metadata.request.host = parsed.host
|
|
.metadata.request.headers.cf_connecting_ip = parsed.client
|
|
url, err = split(parsed.request, " ")
|
|
if err == null {
|
|
.metadata.request.method = url[0]
|
|
.metadata.request.path = url[1]
|
|
.metadata.request.protocol = url[2]
|
|
}
|
|
}
|
|
if err != null {
|
|
abort
|
|
}
|
|
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
|
|
auth_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.auth
|
|
source: |-
|
|
parsed, err = parse_json(.event_message)
|
|
if err == null {
|
|
.metadata.timestamp = parsed.time
|
|
.metadata = merge!(.metadata, parsed)
|
|
}
|
|
# PostgREST logs are structured so we separate timestamp from message using regex
|
|
rest_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.rest
|
|
source: |-
|
|
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
|
|
if err == null {
|
|
.event_message = parsed.msg
|
|
.timestamp = to_timestamp!(parsed.time)
|
|
.metadata.host = .project
|
|
}
|
|
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
|
|
realtime_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.realtime
|
|
source: |-
|
|
.metadata.project = del(.project)
|
|
.metadata.external_id = .metadata.project
|
|
parsed, err = parse_regex(.event_message, r'^(?P<time>\d+:\d+:\d+\.\d+) \[(?P<level>\w+)\] (?P<msg>.*)$')
|
|
if err == null {
|
|
.event_message = parsed.msg
|
|
.metadata.level = parsed.level
|
|
}
|
|
# Storage logs may contain json objects so we parse them for completeness
|
|
storage_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.storage
|
|
source: |-
|
|
.metadata.project = del(.project)
|
|
.metadata.tenantId = .metadata.project
|
|
parsed, err = parse_json(.event_message)
|
|
if err == null {
|
|
.event_message = parsed.msg
|
|
.metadata.level = parsed.level
|
|
.metadata.timestamp = parsed.time
|
|
.metadata.context[0].host = parsed.hostname
|
|
.metadata.context[0].pid = parsed.pid
|
|
}
|
|
# Postgres logs some messages to stderr which we map to warning severity level
|
|
db_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.db
|
|
source: |-
|
|
.metadata.host = "db-default"
|
|
.metadata.parsed.timestamp = .timestamp
|
|
|
|
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
|
|
|
|
if err != null || parsed == null {
|
|
.metadata.parsed.error_severity = "info"
|
|
}
|
|
if parsed != null {
|
|
.metadata.parsed.error_severity = parsed.level
|
|
}
|
|
if .metadata.parsed.error_severity == "info" {
|
|
.metadata.parsed.error_severity = "log"
|
|
}
|
|
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
|
|
|
|
sinks:
|
|
logflare_auth:
|
|
type: 'http'
|
|
inputs:
|
|
- auth_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
|
logflare_realtime:
|
|
type: 'http'
|
|
inputs:
|
|
- realtime_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
|
logflare_rest:
|
|
type: 'http'
|
|
inputs:
|
|
- rest_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
|
logflare_db:
|
|
type: 'http'
|
|
inputs:
|
|
- db_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
|
|
# lead to broken queries from studio. This works by the assumption that containers are started in the
|
|
# following order: vector > db > logflare > kong
|
|
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
|
logflare_functions:
|
|
type: 'http'
|
|
inputs:
|
|
- router.functions
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
|
logflare_storage:
|
|
type: 'http'
|
|
inputs:
|
|
- storage_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|
|
logflare_kong:
|
|
type: 'http'
|
|
inputs:
|
|
- kong_logs
|
|
- kong_err
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
|