Files
sim/docker-compose.yml
Arunabh Sharma fe2c7d8d98 feat(ollama): Adding ollama for enabling local model agents (#153)
* feat(ollama): add ollama package dependency, add two separate deployment docker compose files and add a shell script to toggle between the deployment docker compose files

add base ollama.ts implementation

add latest attempt to fetch Ollama models dynamically

fix ollama dynamic model fetching, models now being rendered on GUI

fix package and package-lock.json to remove ollama dependency and add types.ts for ollama

switch MODEL_PROVIDERS to getModelProviders

make dynamic ollama model dropdown change using zustland store

make dynamic ollama model changes to router and evaluator ts too

* feat(ollama): fix evaluated options by de-duplicating it

* feat(ollama): make README.md change to reflect local model workflow

* feat(ollama): add base non-ollama docker compose file, add --local flag to start_simstudio_docker.sh with ollama service

* feat(ollama): fix README.md local model instructions

* feat(ollama): remove de-duplication logic and separate getModelProviders into two

* fix non-local init and translate.ts

* create combined docker-compose file and fix start_simstudio_docker script too

* update package-lock.json

* feat(ollama): fix README.md instructions and docker compose

---------

Co-authored-by: Arunabh Sharma <arunabh.sharma@supernal.aero>
2025-03-29 13:34:44 -07:00

100 lines
2.4 KiB
YAML

version: '3.8'
services:
simstudio:
build:
context: .
dockerfile: Dockerfile
ports:
- "3000:3000"
volumes:
- ./sim:/app
- /app/node_modules
- /app/.next
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio
- POSTGRES_URL=postgresql://postgres:postgres@db:5432/simstudio
- BETTER_AUTH_URL=http://localhost:3000
- NEXT_PUBLIC_APP_URL=http://localhost:3000
- BETTER_AUTH_SECRET=your_auth_secret_here
- ENCRYPTION_KEY=your_encryption_key_here
- GOOGLE_CLIENT_ID=placeholder
- GOOGLE_CLIENT_SECRET=placeholder
- GITHUB_CLIENT_ID=placeholder
- GITHUB_CLIENT_SECRET=placeholder
- RESEND_API_KEY=placeholder
- WEBCONTAINER_CLIENT_ID=placeholder
depends_on:
db:
condition: service_healthy
db:
image: postgres:16
restart: always
ports:
- "5432:5432"
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=simstudio
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
local-llm-gpu:
profiles:
- local-gpu # This profile requires both 'local' and 'gpu'
image: ollama/ollama:latest
pull_policy: always
volumes:
- ${HOME}/.ollama:/root/.ollama
ports:
- "11434:11434"
environment:
- NVIDIA_DRIVER_CAPABILITIES=all
- OLLAMA_LOAD_TIMEOUT=-1
- OLLAMA_KEEP_ALIVE=-1
- OLLAMA_DEBUG=1
command: "serve"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/"]
interval: 10s
timeout: 5s
retries: 5
local-llm-cpu:
profiles:
- local-cpu # This profile requires both 'local' and 'cpu'
image: ollama/ollama:latest
pull_policy: always
volumes:
- ${HOME}/.ollama:/root/.ollama
ports:
- "11434:11434"
environment:
- OLLAMA_LOAD_TIMEOUT=-1
- OLLAMA_KEEP_ALIVE=-1
- OLLAMA_DEBUG=1
command: "serve"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:11434/"]
interval: 10s
timeout: 5s
retries: 5
volumes:
postgres_data: