services: ebook2audiobook: image: ebook2audiobook:${DEVICE_TAG:-cpu} # e.g. DEVICE_TAG=cu128 → ebook2audiobook:cu128 build: context: . dockerfile: Dockerfile args: APP_VERSION: ${APP_VERSION:-25.12.32} DEVICE_TAG: ${DEVICE_TAG:-cpu} # e.g. cu128, cu118, rocm, xpu, cpu container_name: ebook2audiobook working_dir: /app entrypoint: ["python", "app.py", "--script_mode", "full_docker"] tty: true stdin_open: true ports: - "7860:7860" privileged: true # Kept as required for some Unraid users # NVIDIA GPU passthrough – works on Docker hosts with NVIDIA Container Toolkit # Ignored safely if toolkit not installed or no NVIDIA GPU deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] volumes: - .:/app:rw # Bind mount current directory – works everywhere restart: unless-stopped # Legacy NVIDIA runtime fallback – harmless on non-NVIDIA hosts environment: - NVIDIA_VISIBLE_DEVICES=all - NVIDIA_DRIVER_CAPABILITIES=compute,utility