Files
sim/docker-compose.ollama.yml
Waleed Latif 717e17d02a feat(bun): upgrade to bun, reduce docker image size by 95%, upgrade docs & ci (#371)
* migrate to bun

* added envvars to drizzle

* upgrade bun devcontainer feature to a valid one

* added bun, docker not working

* updated envvars, updated to bunder and esnext modules

* fixed build, reinstated otel

* feat: optimized multi-stage docker images

* add coerce for boolean envvar

* feat: add docker-compose configuration for local LLM services and remove legacy Dockerfile and entrypoint script

* feat: add docker-compose files for local and production environments, and implement GitHub Actions for Docker image build and publish

* refactor: remove unused generateStaticParams function from various API routes and maintain dynamic rendering

* cleanup

* upgraded bun

* updated ci

* fixed build

---------

Co-authored-by: Aditya Tripathi <aditya@climactic.co>
2025-05-18 01:01:32 -07:00

49 lines
1.2 KiB
YAML

services:
local-llm-gpu:
profiles:
- local-gpu # This profile requires both 'local' and 'gpu'
image: ollama/ollama:latest
pull_policy: always
volumes:
- ${HOME}/.ollama:/root/.ollama
ports:
- '11434:11434'
environment:
- NVIDIA_DRIVER_CAPABILITIES=all
- OLLAMA_LOAD_TIMEOUT=-1
- OLLAMA_KEEP_ALIVE=-1
- OLLAMA_DEBUG=1
command: 'serve'
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:11434/']
interval: 10s
timeout: 5s
retries: 5
local-llm-cpu:
profiles:
- local-cpu # This profile requires both 'local' and 'cpu'
image: ollama/ollama:latest
pull_policy: always
volumes:
- ${HOME}/.ollama:/root/.ollama
ports:
- '11434:11434'
environment:
- OLLAMA_LOAD_TIMEOUT=-1
- OLLAMA_KEEP_ALIVE=-1
- OLLAMA_DEBUG=1
command: 'serve'
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:11434/']
interval: 10s
timeout: 5s
retries: 5