Files
hub-monorepo/packages/hub-nodejs/examples/replicate-data-postgres/docker-compose.yml
Shane da Silva 5fee6b4f25 chore: Optimize replication example (#955)
Fetch data in batches and execute jobs concurrently. This reduces the
time estimate from ~4 hours to under 1 hour on my machine.

We could probably make this even faster by implementing a
`getAllMessages` endpoint on the hubs.
2023-05-09 22:07:25 -07:00

46 lines
1.0 KiB
YAML

version: '3.9'
services:
app:
image: 'node:20-alpine'
restart: unless-stopped
command: ["sh", "-c", "yarn install && exec yarn start"]
init: true
environment:
- NODE_OPTIONS=--max-old-space-size=512 # Limit memory usage
- POSTGRES_URL=postgres://app:password@postgres:5432/hub
volumes:
- .:/home/node/app
- app_node_modules:/home/node/app/node_modules
working_dir: /home/node/app
depends_on:
- postgres
networks:
- my_network
postgres:
image: 'postgres:15-alpine'
restart: unless-stopped
ports:
- '6541:5432' # Use a port unlikely to be in use so the example "Just Works"
environment:
- POSTGRES_DB=hub
- POSTGRES_USER=app
- POSTGRES_PASSWORD=password
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ['CMD-SHELL', 'pg_isready']
interval: 10s
timeout: 10s
retries: 3
networks:
- my_network
volumes:
pgdata:
app_node_modules:
networks:
my_network: