Build and publish cuda enabled docker image (#291)

This commit is contained in:
Han
2026-02-12 20:53:34 +09:00
committed by GitHub
parent 66c516a24d
commit 6b25e94cb0
25 changed files with 654 additions and 329 deletions

View File

@@ -11,27 +11,27 @@ BUILD_COMPILER=false
BUILD_SERVER=false
BUILD_CLUSTER=false
CUDA=false
CUDA_ARCH=""
CUDA_ARCHS=""
RUSTFLAGS=""
usage() {
echo "Usage: $0 --zkvm <zkvm> --tag <tag> [--base] [--compiler] [--server] [--cluster] [--registry <registry>] [--cuda] [--cuda-arch <arch>] [--rustflags <flags>]"
echo "Usage: $0 --zkvm <zkvm> --tag <tag> [--base] [--compiler] [--server] [--cluster] [--registry <registry>] [--cuda] [--cuda-archs <archs>] [--rustflags <flags>]"
echo ""
echo "Required:"
echo " --zkvm <zkvm> zkVM to build for (e.g., zisk, sp1, risc0)"
echo " --tag <tag> Image tag (e.g., 0.1.3, a8d7bc0, local)"
echo " --zkvm <zkvm> zkVM to build for (e.g., zisk, sp1, risc0)"
echo " --tag <tag> Image tag (e.g., 0.1.3, a8d7bc0, local, local-cuda)"
echo ""
echo "Image types (at least one required):"
echo " --base Build the base images"
echo " --compiler Build the compiler image"
echo " --server Build the server image"
echo " --cluster Build the cluster image"
echo " --base Build the base images"
echo " --compiler Build the compiler image"
echo " --server Build the server image"
echo " --cluster Build the cluster image"
echo ""
echo "Optional:"
echo " --registry <reg> Registry prefix (e.g., ghcr.io/eth-act/ere)"
echo " --cuda Enable CUDA support (appends -cuda to tag)"
echo " --cuda-arch <arch> Set CUDA architecture (e.g., sm_120)"
echo " --rustflags <flags> Pass RUSTFLAGS to build"
echo " --registry <registry> Registry prefix (e.g., ghcr.io/eth-act/ere)"
echo " --cuda Enable CUDA support"
echo " --cuda-archs <archs> Set CUDA architectures (comma-separated, e.g., 89,120). Implies --cuda."
echo " --rustflags <flags> Pass RUSTFLAGS to build"
exit 1
}
@@ -70,8 +70,9 @@ while [[ $# -gt 0 ]]; do
CUDA=true
shift
;;
--cuda-arch)
CUDA_ARCH="$2"
--cuda-archs)
CUDA_ARCHS="$2"
CUDA=true
shift 2
;;
--rustflags)
@@ -104,11 +105,6 @@ if [ "$BUILD_BASE" = false ] && [ "$BUILD_COMPILER" = false ] && [ "$BUILD_SERVE
usage
fi
# Format tag with optional -cuda suffix
if [ "$CUDA" = true ]; then
IMAGE_TAG="${IMAGE_TAG}-cuda"
fi
# Format image prefix
if [ -n "$IMAGE_REGISTRY" ]; then
# Remove trailing slash if present
@@ -141,10 +137,49 @@ if [ "$CUDA" = true ]; then
CLUSTER_ZKVM_BUILD_ARGS+=(--build-arg "CUDA=1")
fi
if [ -n "$CUDA_ARCH" ]; then
BASE_ZKVM_BUILD_ARGS+=(--build-arg "CUDA_ARCH=$CUDA_ARCH")
SERVER_ZKVM_BUILD_ARGS+=(--build-arg "CUDA_ARCH=$CUDA_ARCH")
CLUSTER_ZKVM_BUILD_ARGS+=(--build-arg "CUDA_ARCH=$CUDA_ARCH")
# Default CUDA_ARCHS when --cuda is set but --cuda-archs not specified
if [ "$CUDA" = true ] && [ -z "$CUDA_ARCHS" ]; then
case "$ZKVM" in
zisk) CUDA_ARCHS="120" ;; # Default to RTX 50 series (ZisK only support setting single CUDA arch)
*) CUDA_ARCHS="89,120" ;; # Default to RTX 40 and 50 series
esac
fi
# Per-zkVM CUDA architecture translation
if [ "$CUDA" = true ] && [ -n "$CUDA_ARCHS" ]; then
case "$ZKVM" in
airbender)
CUDAARCHS=$(echo "$CUDA_ARCHS" | tr ',' ';')
BASE_ZKVM_BUILD_ARGS+=(--build-arg "CUDAARCHS=$CUDAARCHS")
SERVER_ZKVM_BUILD_ARGS+=(--build-arg "CUDAARCHS=$CUDAARCHS")
;;
openvm)
BASE_ZKVM_BUILD_ARGS+=(--build-arg "CUDA_ARCH=$CUDA_ARCHS")
SERVER_ZKVM_BUILD_ARGS+=(--build-arg "CUDA_ARCH=$CUDA_ARCHS")
;;
risc0)
NVCC_APPEND_FLAGS=""
IFS=',' read -ra ARCH_ARRAY <<< "$CUDA_ARCHS"
for arch in "${ARCH_ARRAY[@]}"; do
NVCC_APPEND_FLAGS+=" --generate-code arch=compute_${arch},code=sm_${arch}"
done
NVCC_APPEND_FLAGS="${NVCC_APPEND_FLAGS# }"
BASE_ZKVM_BUILD_ARGS+=(--build-arg "NVCC_APPEND_FLAGS=$NVCC_APPEND_FLAGS")
SERVER_ZKVM_BUILD_ARGS+=(--build-arg "NVCC_APPEND_FLAGS=$NVCC_APPEND_FLAGS")
;;
zisk)
IFS=',' read -ra ARCH_ARRAY <<< "$CUDA_ARCHS"
if [ "${#ARCH_ARRAY[@]}" -ne 1 ]; then
echo "Error: Multiple CUDA architectures are not supported for zisk: $CUDA_ARCHS"
exit 1
fi
BASE_ZKVM_BUILD_ARGS+=(--build-arg "CUDA_ARCH=sm_${ARCH_ARRAY[0]}")
SERVER_ZKVM_BUILD_ARGS+=(--build-arg "CUDA_ARCH=sm_${ARCH_ARRAY[0]}")
CLUSTER_ZKVM_BUILD_ARGS+=(--build-arg "CUDA_ARCH=sm_${ARCH_ARRAY[0]}")
;;
*)
;;
esac
fi
if [ -n "$RUSTFLAGS" ]; then

View File

@@ -9,17 +9,19 @@ ZKVM=""
IMAGE_REGISTRY=""
IMAGE_TAG=""
CACHED_IMAGE_TAG=""
CUDA_ARCHS=""
usage() {
echo "Usage: $0 --zkvm <zkvm> --registry <registry> --tag <tag> [--cached-tag <cached-tag>]"
echo "Usage: $0 --zkvm <zkvm> --tag <tag> [--registry <registry>] [--cached-tag <cached-tag>] [--cuda-archs <archs>]"
echo ""
echo "Required:"
echo " --zkvm <zkvm> zkVM to build for (e.g., zisk, sp1, risc0)"
echo " --registry <registry> Registry prefix (e.g., ghcr.io/eth-act/ere)"
echo " --tag <tag> Image tag (e.g., 0.1.3, a8d7bc0)"
echo " --tag <tag> Image tag (e.g., 0.1.3, a8d7bc0, local, local-cuda)"
echo ""
echo "Optional:"
echo " --registry <registry> Registry prefix (e.g., ghcr.io/eth-act/ere)"
echo " --cached-tag <cached-tag> Cached image tag to try pulling from (skips pull if empty)"
echo " --cuda-archs <archs> Set CUDA architectures (comma-separated, e.g., 89,120)"
exit 1
}
@@ -42,6 +44,10 @@ while [[ $# -gt 0 ]]; do
CACHED_IMAGE_TAG="$2"
shift 2
;;
--cuda-archs)
CUDA_ARCHS="$2"
shift 2
;;
--help|-h)
usage
;;
@@ -58,35 +64,36 @@ if [ -z "$ZKVM" ]; then
usage
fi
if [ -z "$IMAGE_REGISTRY" ]; then
echo "Error: --registry is required"
usage
fi
if [ -z "$IMAGE_TAG" ]; then
echo "Error: --tag is required"
usage
fi
BASE_IMAGE="$IMAGE_REGISTRY/ere-base:$IMAGE_TAG"
BASE_ZKVM_IMAGE="$IMAGE_REGISTRY/ere-base-$ZKVM:$IMAGE_TAG"
CACHED_BASE_IMAGE="$IMAGE_REGISTRY/ere-base:$CACHED_IMAGE_TAG"
CACHED_BASE_ZKVM_IMAGE="$IMAGE_REGISTRY/ere-base-$ZKVM:$CACHED_IMAGE_TAG"
# Format image prefix
if [ -n "$IMAGE_REGISTRY" ]; then
# Remove trailing slash if present
IMAGE_REGISTRY="${IMAGE_REGISTRY%/}"
IMAGE_PREFIX="${IMAGE_REGISTRY}/"
else
IMAGE_PREFIX=""
fi
# Format image
BASE_ZKVM_IMAGE="${IMAGE_PREFIX}ere-base-${ZKVM}:${IMAGE_TAG}"
CACHED_BASE_ZKVM_IMAGE="${IMAGE_PREFIX}ere-base-${ZKVM}:${CACHED_IMAGE_TAG}"
# Pull or build ere-base and ere-base-$ZKVM locally
if [ -n "$CACHED_IMAGE_TAG" ] \
&& docker image pull "$CACHED_BASE_IMAGE" \
&& docker image pull "$CACHED_BASE_ZKVM_IMAGE";
then
echo "Tagging ere-base from cache"
docker tag "$CACHED_BASE_IMAGE" "$BASE_IMAGE"
echo "Tagging ere-base-$ZKVM from cache"
docker tag "$CACHED_BASE_ZKVM_IMAGE" "$BASE_ZKVM_IMAGE"
else
echo "Building base images using build-image.sh"
"$SCRIPT_DIR/build-image.sh" \
--zkvm "$ZKVM" \
--registry "$IMAGE_REGISTRY" \
--tag "$IMAGE_TAG" \
--base
BUILD_ARGS=(--zkvm "$ZKVM" --registry "$IMAGE_REGISTRY" --tag "$IMAGE_TAG" --base)
if [ -n "$CUDA_ARCHS" ]; then
BUILD_ARGS+=(--cuda-archs "$CUDA_ARCHS")
fi
"$SCRIPT_DIR/build-image.sh" "${BUILD_ARGS[@]}"
fi

View File

@@ -0,0 +1,186 @@
name: Build and push images
on:
push:
branches:
- master
env:
CUDA_ARCHS: '89,120'
jobs:
image_meta:
name: Get image metadata
runs-on: ubuntu-latest
outputs:
sha_tag: ${{ steps.meta.outputs.sha_tag }}
registry: ${{ steps.meta.outputs.registry }}
steps:
- name: Get image metadata
id: meta
run: |
GIT_SHA="${{ github.sha }}"
echo "sha_tag=${GIT_SHA:0:7}" >> $GITHUB_OUTPUT
echo "registry=ghcr.io/${{ github.repository }}" >> $GITHUB_OUTPUT
build_and_push:
name: Build and push docker image (${{ matrix.zkvm }})
needs: image_meta
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
strategy:
fail-fast: false
matrix:
zkvm:
- airbender
- jolt
- miden
- nexus
- openvm
- pico
- risc0
- sp1
- ziren
- zisk
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Free up disk space
run: bash .github/scripts/free-up-disk-space.sh
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build ere-base and ere-base-${{ matrix.zkvm }} images
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ matrix.zkvm }} \
--registry ${{ needs.image_meta.outputs.registry }} \
--tag ${{ needs.image_meta.outputs.sha_tag }} \
--base
- name: Push ere-base and ere-base-${{ matrix.zkvm }} images
run: |
docker push ${{ needs.image_meta.outputs.registry }}/ere-base-${{ matrix.zkvm }}:${{ needs.image_meta.outputs.sha_tag }}
- name: Build ere-compiler-${{ matrix.zkvm }} and ere-server-${{ matrix.zkvm }} images
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ matrix.zkvm }} \
--registry ${{ needs.image_meta.outputs.registry }} \
--tag ${{ needs.image_meta.outputs.sha_tag }} \
--compiler \
--server
- name: Push ere-compiler-${{ matrix.zkvm }} and ere-server-${{ matrix.zkvm }} images
run: |
docker push ${{ needs.image_meta.outputs.registry }}/ere-compiler-${{ matrix.zkvm }}:${{ needs.image_meta.outputs.sha_tag }}
docker push ${{ needs.image_meta.outputs.registry }}/ere-server-${{ matrix.zkvm }}:${{ needs.image_meta.outputs.sha_tag }}
build_and_push_cuda:
name: Build and push CUDA docker image (${{ matrix.zkvm }})
needs: image_meta
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
strategy:
fail-fast: false
matrix:
zkvm:
- airbender
- openvm
- risc0
- sp1
- zisk
include:
- zkvm: zisk
cuda_archs: '120'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Free up disk space
run: bash .github/scripts/free-up-disk-space.sh
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build ere-base and ere-base-${{ matrix.zkvm }} images with CUDA enabled
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ matrix.zkvm }} \
--registry ${{ needs.image_meta.outputs.registry }} \
--tag ${{ needs.image_meta.outputs.sha_tag }}-cuda \
--base \
--cuda-archs '${{ matrix.cuda_archs || env.CUDA_ARCHS }}'
- name: Push ere-base and ere-base-${{ matrix.zkvm }} images with CUDA enabled
run: |
docker push ${{ needs.image_meta.outputs.registry }}/ere-base-${{ matrix.zkvm }}:${{ needs.image_meta.outputs.sha_tag }}-cuda
- name: Build ere-server-${{ matrix.zkvm }} image with CUDA enabled
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ matrix.zkvm }} \
--registry ${{ needs.image_meta.outputs.registry }} \
--tag ${{ needs.image_meta.outputs.sha_tag }}-cuda \
--server \
--cuda-archs '${{ matrix.cuda_archs || env.CUDA_ARCHS }}'
- name: Push ere-server-${{ matrix.zkvm }} image with CUDA enabled
run: |
docker push ${{ needs.image_meta.outputs.registry }}/ere-server-${{ matrix.zkvm }}:${{ needs.image_meta.outputs.sha_tag }}-cuda
build_and_push_cluster:
name: Build and push cluster docker image (${{ matrix.zkvm }})
needs: image_meta
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
strategy:
fail-fast: false
matrix:
zkvm:
- zisk
include:
- zkvm: zisk
cuda_archs: '120'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Free up disk space
run: bash .github/scripts/free-up-disk-space.sh
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build ere-cluster-${{ matrix.zkvm }} image with CUDA enabled
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ matrix.zkvm }} \
--registry ${{ needs.image_meta.outputs.registry }} \
--tag ${{ needs.image_meta.outputs.sha_tag }}-cuda \
--cluster \
--cuda-archs '${{ matrix.cuda_archs || env.CUDA_ARCHS }}'
- name: Push ere-cluster-${{ matrix.zkvm }} image with CUDA enabled
run: |
docker push ${{ needs.image_meta.outputs.registry }}/ere-cluster-${{ matrix.zkvm }}:${{ needs.image_meta.outputs.sha_tag }}-cuda

View File

@@ -2,7 +2,7 @@ name: Check formatting
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:

90
.github/workflows/push-semver-tag.yml vendored Normal file
View File

@@ -0,0 +1,90 @@
name: Push SemVer tag
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
tags:
- 'v*'
jobs:
image_meta:
name: Get image metadata
runs-on: ubuntu-latest
outputs:
sha_tag: ${{ steps.meta.outputs.sha_tag }}
semver_tag: ${{ steps.meta.outputs.semver_tag }}
registry: ${{ steps.meta.outputs.registry }}
steps:
- name: Get image metadata
id: meta
run: |
GIT_SHA="${{ github.sha }}"
GIT_TAG="${{ github.ref_name }}"
echo "sha_tag=${GIT_SHA:0:7}" >> $GITHUB_OUTPUT
echo "semver_tag=${GIT_TAG#v}" >> $GITHUB_OUTPUT
echo "registry=ghcr.io/${{ github.repository }}" >> $GITHUB_OUTPUT
create_semver_tag:
name: Tag images with SemVer (${{ matrix.zkvm }})
needs: image_meta
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
strategy:
fail-fast: false
matrix:
include:
- { zkvm: airbender, cuda: true , cluster: false }
- { zkvm: jolt, cuda: false, cluster: false }
- { zkvm: miden, cuda: false, cluster: false }
- { zkvm: nexus, cuda: false, cluster: false }
- { zkvm: openvm, cuda: true , cluster: false }
- { zkvm: pico, cuda: false, cluster: false }
- { zkvm: risc0, cuda: true , cluster: false }
- { zkvm: sp1, cuda: true , cluster: false }
- { zkvm: ziren, cuda: false, cluster: false }
- { zkvm: zisk, cuda: true , cluster: true }
steps:
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Add SemVer tag to images
run: |
SHA_TAG="${{ needs.image_meta.outputs.sha_tag }}"
SEMVER_TAG="${{ needs.image_meta.outputs.semver_tag }}"
for IMAGE_KIND in base compiler server; do
IMAGE="${{ needs.image_meta.outputs.registry }}/ere-$IMAGE_KIND-${{ matrix.zkvm }}"
echo "Tagging $IMAGE:$SHA_TAG as $IMAGE:$SEMVER_TAG"
docker buildx imagetools create "$IMAGE:$SHA_TAG" --tag "$IMAGE:$SEMVER_TAG"
done
- name: Add SemVer tag to images with CUDA enabled
if: matrix.cuda
run: |
SHA_TAG="${{ needs.image_meta.outputs.sha_tag }}-cuda"
SEMVER_TAG="${{ needs.image_meta.outputs.semver_tag }}-cuda"
for IMAGE_KIND in base server; do
IMAGE="${{ needs.image_meta.outputs.registry }}/ere-$IMAGE_KIND-${{ matrix.zkvm }}"
echo "Tagging $IMAGE:$SHA_TAG as $IMAGE:$SEMVER_TAG"
docker buildx imagetools create "$IMAGE:$SHA_TAG" --tag "$IMAGE:$SEMVER_TAG"
done
- name: Add SemVer tag to cluster image with CUDA enabled
if: matrix.cluster
run: |
SHA_TAG="${{ needs.image_meta.outputs.sha_tag }}-cuda"
SEMVER_TAG="${{ needs.image_meta.outputs.semver_tag }}-cuda"
IMAGE="${{ needs.image_meta.outputs.registry }}/ere-cluster-${{ matrix.zkvm }}"
echo "Tagging $IMAGE:$SHA_TAG as $IMAGE:$SEMVER_TAG"
docker buildx imagetools create "$IMAGE:$SHA_TAG" --tag "$IMAGE:$SEMVER_TAG"

View File

@@ -2,7 +2,7 @@ name: Test and clippy common crates
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:

View File

@@ -2,22 +2,19 @@ name: Test and clippy Airbender
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: airbender
skip_prove_test: true
cuda: true

View File

@@ -2,21 +2,17 @@ name: Test and clippy Jolt
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: jolt

View File

@@ -2,21 +2,17 @@ name: Test and clippy Miden
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: miden

View File

@@ -2,21 +2,17 @@ name: Test and clippy Nexus
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: nexus

View File

@@ -2,21 +2,18 @@ name: Test and clippy OpenVM
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: openvm
cuda: true

View File

@@ -2,21 +2,17 @@ name: Test and clippy Pico
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: pico

View File

@@ -2,21 +2,18 @@ name: Test and clippy Risc0
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: risc0
cuda: true

View File

@@ -2,21 +2,18 @@ name: Test and clippy SP1
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: sp1
cuda: true

View File

@@ -2,21 +2,17 @@ name: Test and clippy Ziren
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: ziren

View File

@@ -2,22 +2,21 @@ name: Test and clippy ZisK
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
on:
push:
branches:
- master
tags:
- 'v*'
workflow_run:
workflows: ['Build and push images']
types: [completed]
pull_request:
jobs:
test:
if: github.event_name == 'pull_request' || github.event.workflow_run.conclusion == 'success'
uses: ./.github/workflows/test-zkvm.yml
permissions:
contents: read
packages: write
with:
zkvm: zisk
cuda: true
cuda_archs: '120'
cluster: true
skip_prove_test: true

View File

@@ -7,6 +7,21 @@ on:
description: 'zkVM to test'
required: true
type: string
cuda:
description: 'Whether to build CUDA-enabled images'
required: false
type: boolean
default: false
cuda_archs:
description: 'Comma-separated CUDA archs to gencode'
required: false
type: string
default: '89,120'
cluster:
description: 'Whether to build cluster image'
required: false
type: boolean
default: false
# Remove when we use larger runners, currently only needed to skip for zisk
skip_prove_test:
description: 'Whether to skip prove test and ere-dockerized test or not'
@@ -18,73 +33,20 @@ env:
CARGO_TERM_COLOR: always
jobs:
create_semver_image_tag:
name: Tag image with SemVer
if: startsWith(github.ref, 'refs/tags/')
image_meta:
name: Get image metadata
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Add SemVer tag to images
run: |
GIT_SHA="${{ github.sha }}"
GIT_SHA_TAG="${GIT_SHA:0:7}"
GIT_TAG="${{ github.ref_name }}"
SEMVER_TAG="${GIT_TAG#v}"
IMAGE_REGISTRY="ghcr.io/${{ github.repository }}"
for IMAGE in \
"ere-base" \
"ere-base-${{ inputs.zkvm }}" \
"ere-compiler-${{ inputs.zkvm }}" \
"ere-server-${{ inputs.zkvm }}"
do
echo "Tagging $IMAGE_REGISTRY/$IMAGE:$GIT_SHA_TAG as $SEMVER_TAG"
docker buildx imagetools create \
"$IMAGE_REGISTRY/$IMAGE:$GIT_SHA_TAG" \
--tag "$IMAGE_REGISTRY/$IMAGE:$SEMVER_TAG"
done
build_image:
name: Build image
if: github.event_name == 'pull_request' || (github.event_name == 'push' && github.ref == 'refs/heads/master')
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
outputs:
dockerfile_changed: ${{ steps.changed_files.outputs.any_changed }}
image_registry: ${{ steps.image_meta.outputs.image_registry }}
image_tag: ${{ steps.image_meta.outputs.image_tag }}
cached_image_tag: ${{ steps.image_meta.outputs.cached_image_tag }}
base_image: ${{ steps.image_meta.outputs.base_image }}
base_zkvm_image: ${{ steps.image_meta.outputs.base_zkvm_image }}
compiler_zkvm_image: ${{ steps.image_meta.outputs.compiler_zkvm_image }}
server_zkvm_image: ${{ steps.image_meta.outputs.server_zkvm_image }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Free up disk space
if: github.event_name == 'push'
run: bash .github/scripts/free-up-disk-space.sh
- name: Log in to GitHub Container Registry
if: github.event_name == 'push'
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Check Dockerfile changes
id: changed_files
@@ -97,7 +59,11 @@ jobs:
- name: Get image metadata
id: image_meta
run: |
GIT_SHA="${{ github.sha }}"
if [ "${{ github.event_name }}" == "workflow_run" ]; then
GIT_SHA="${{ github.event.workflow_run.head_sha }}"
else
GIT_SHA="${{ github.sha }}"
fi
IMAGE_TAG="${GIT_SHA:0:7}"
CACHED_IMAGE_TAG=""
@@ -107,53 +73,17 @@ jobs:
fi
IMAGE_REGISTRY="ghcr.io/${{ github.repository }}"
BASE_IMAGE="$IMAGE_REGISTRY/ere-base:$IMAGE_TAG"
BASE_ZKVM_IMAGE="$IMAGE_REGISTRY/ere-base-${{ inputs.zkvm }}:$IMAGE_TAG"
COMPILER_ZKVM_IMAGE="$IMAGE_REGISTRY/ere-compiler-${{ inputs.zkvm }}:$IMAGE_TAG"
SERVER_ZKVM_IMAGE="$IMAGE_REGISTRY/ere-server-${{ inputs.zkvm }}:$IMAGE_TAG"
echo "image_registry=$IMAGE_REGISTRY" >> $GITHUB_OUTPUT
echo "image_tag=$IMAGE_TAG" >> $GITHUB_OUTPUT
echo "cached_image_tag=$CACHED_IMAGE_TAG" >> $GITHUB_OUTPUT
echo "base_image=$BASE_IMAGE" >> $GITHUB_OUTPUT
echo "base_zkvm_image=$BASE_ZKVM_IMAGE" >> $GITHUB_OUTPUT
echo "compiler_zkvm_image=$COMPILER_ZKVM_IMAGE" >> $GITHUB_OUTPUT
echo "server_zkvm_image=$SERVER_ZKVM_IMAGE" >> $GITHUB_OUTPUT
- name: Build and push ere-base and ere-base-${{ inputs.zkvm }} images
if: github.event_name == 'push'
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ steps.image_meta.outputs.image_registry }} \
--tag ${{ steps.image_meta.outputs.image_tag }} \
--base
docker push ${{ steps.image_meta.outputs.base_image }}
docker push ${{ steps.image_meta.outputs.base_zkvm_image }}
- name: Build and push ere-compiler-${{ inputs.zkvm }} image
if: github.event_name == 'push'
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ steps.image_meta.outputs.image_registry }} \
--tag ${{ steps.image_meta.outputs.image_tag }} \
--compiler
docker push ${{ steps.image_meta.outputs.compiler_zkvm_image }}
- name: Build and push ere-server-${{ inputs.zkvm }} image
if: github.event_name == 'push'
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ steps.image_meta.outputs.image_registry }} \
--tag ${{ steps.image_meta.outputs.image_tag }} \
--server
docker push ${{ steps.image_meta.outputs.server_zkvm_image }}
clippy_via_docker:
name: Clippy via Docker
needs: build_image
build_image_cuda_check:
name: Build image with CUDA enabled
if: inputs.cuda && github.event_name == 'pull_request'
needs: image_meta
runs-on: ubuntu-latest
steps:
- name: Checkout repository
@@ -162,23 +92,69 @@ jobs:
- name: Free up disk space
run: bash .github/scripts/free-up-disk-space.sh
- name: Pull or build ere-base and ere-base-${{ inputs.zkvm }} images with CUDA enabled
run: |
CACHED_TAG="${{ needs.image_meta.outputs.cached_image_tag }}"
if [ -n "$CACHED_TAG" ]; then
CACHED_TAG="${CACHED_TAG}-cuda"
fi
bash .github/scripts/pull-or-build-base-zkvm-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ needs.image_meta.outputs.image_registry }} \
--tag ${{ needs.image_meta.outputs.image_tag }}-cuda \
--cached-tag "$CACHED_TAG" \
--cuda-archs '${{ inputs.cuda_archs }}'
- name: Build ere-server-${{ inputs.zkvm }} image with CUDA enabled
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ needs.image_meta.outputs.image_registry }} \
--tag ${{ needs.image_meta.outputs.image_tag }}-cuda \
--server \
--cuda-archs '${{ inputs.cuda_archs }}'
- name: Build ere-cluster-${{ inputs.zkvm }} image with CUDA enabled
if: ${{ inputs.cluster && needs.image_meta.outputs.dockerfile_changed == 'true' }}
run: |
bash .github/scripts/build-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ needs.image_meta.outputs.image_registry }} \
--tag ${{ needs.image_meta.outputs.image_tag }}-cuda \
--cluster \
--cuda-archs '${{ inputs.cuda_archs }}'
clippy_via_docker:
name: Clippy via Docker
needs: image_meta
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Free up disk space
run: bash .github/scripts/free-up-disk-space.sh
- name: Cache dependencies
uses: Swatinem/rust-cache@v2
with:
key: rust-${{ inputs.zkvm }}-${{ hashFiles('Cargo.lock') }}
- name: Pull published base zkvm image
if: github.event_name == 'workflow_run'
run: |
docker pull ${{ needs.image_meta.outputs.base_zkvm_image }}
- name: Pull base zkvm image or build locally
if: github.event_name == 'pull_request'
run: |
bash .github/scripts/pull-or-build-base-zkvm-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ needs.build_image.outputs.image_registry }} \
--tag ${{ needs.build_image.outputs.image_tag }} \
--cached-tag "${{ needs.build_image.outputs.cached_image_tag }}"
- name: Pull base zkvm image
if: github.event_name == 'push'
run: docker pull ${{ needs.build_image.outputs.base_zkvm_image }}
--registry ${{ needs.image_meta.outputs.image_registry }} \
--tag ${{ needs.image_meta.outputs.image_tag }} \
--cached-tag "${{ needs.image_meta.outputs.cached_image_tag }}"
- name: Run cargo clippy for ere-${{ inputs.zkvm }} via Docker
run: |
@@ -189,7 +165,7 @@ jobs:
--volume $HOME/.cargo/registry:/usr/local/cargo/registry \
--volume $HOME/.cargo/git:/usr/local/cargo/git \
--workdir /ere \
${{ needs.build_image.outputs.base_zkvm_image }} \
${{ needs.image_meta.outputs.base_zkvm_image }} \
/bin/bash"
cat <<EOF | $DOCKER_CMD
@@ -207,11 +183,13 @@ jobs:
test_via_docker:
name: Test via Docker
needs: build_image
needs: image_meta
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Free up disk space
run: bash .github/scripts/free-up-disk-space.sh
@@ -221,18 +199,19 @@ jobs:
with:
key: rust-${{ inputs.zkvm }}-${{ hashFiles('Cargo.lock') }}
- name: Pull published base zkvm image
if: github.event_name == 'workflow_run'
run: |
docker pull ${{ needs.image_meta.outputs.base_zkvm_image }}
- name: Pull base zkvm image or build locally
if: github.event_name == 'pull_request'
run: |
bash .github/scripts/pull-or-build-base-zkvm-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ needs.build_image.outputs.image_registry }} \
--tag ${{ needs.build_image.outputs.image_tag }} \
--cached-tag "${{ needs.build_image.outputs.cached_image_tag }}"
- name: Pull base zkvm image
if: github.event_name == 'push'
run: docker pull ${{ needs.build_image.outputs.base_zkvm_image }}
--registry ${{ needs.image_meta.outputs.image_registry }} \
--tag ${{ needs.image_meta.outputs.image_tag }} \
--cached-tag "${{ needs.image_meta.outputs.cached_image_tag }}"
- name: Run cargo test for ere-${{ inputs.zkvm }} via Docker
run: |
@@ -243,7 +222,7 @@ jobs:
--volume $HOME/.cargo/registry:/usr/local/cargo/registry \
--volume $HOME/.cargo/git:/usr/local/cargo/git \
--workdir /ere \
${{ needs.build_image.outputs.base_zkvm_image }} \
${{ needs.image_meta.outputs.base_zkvm_image }} \
/bin/bash"
cat <<EOF | $DOCKER_CMD
@@ -259,11 +238,13 @@ jobs:
test_ere_dockerized:
name: Test ere-dockerized with the selected zkVM
needs: build_image
needs: image_meta
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Free up disk space
run: bash .github/scripts/free-up-disk-space.sh
@@ -278,32 +259,32 @@ jobs:
with:
key: rust-${{ inputs.zkvm }}-${{ hashFiles('Cargo.lock') }}
- name: Pull published images
if: github.event_name == 'workflow_run'
run: |
docker pull ${{ needs.image_meta.outputs.image_registry }}/ere-compiler-${{ inputs.zkvm }}:${{ needs.image_meta.outputs.image_tag }}
docker pull ${{ needs.image_meta.outputs.image_registry }}/ere-server-${{ inputs.zkvm }}:${{ needs.image_meta.outputs.image_tag }}
- name: Pull images or build locally
if: github.event_name == 'pull_request'
run: |
bash .github/scripts/pull-or-build-base-zkvm-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ needs.build_image.outputs.image_registry }} \
--tag ${{ needs.build_image.outputs.image_tag }} \
--cached-tag "${{ needs.build_image.outputs.cached_image_tag }}"
--registry ${{ needs.image_meta.outputs.image_registry }} \
--tag ${{ needs.image_meta.outputs.image_tag }} \
--cached-tag "${{ needs.image_meta.outputs.cached_image_tag }}"
# Build ere-compiler-${{ inputs.zkvm }} and ere-server-${{ inputs.zkvm }}
bash .github/scripts/build-image.sh \
--zkvm ${{ inputs.zkvm }} \
--registry ${{ needs.build_image.outputs.image_registry }} \
--tag ${{ needs.build_image.outputs.image_tag }} \
--registry ${{ needs.image_meta.outputs.image_registry }} \
--tag ${{ needs.image_meta.outputs.image_tag }} \
--compiler \
--server
- name: Pull ere-compiler-${{ inputs.zkvm }} and ere-server-${{ inputs.zkvm }} images
if: github.event_name == 'push'
run: |
docker pull ${{ needs.build_image.outputs.compiler_zkvm_image }}
docker pull ${{ needs.build_image.outputs.server_zkvm_image }}
- name: Run cargo test for ere-${{ inputs.zkvm }} via ere-dockerized
env:
ERE_IMAGE_REGISTRY: ${{ needs.build_image.outputs.image_registry }}
ERE_IMAGE_REGISTRY: ${{ needs.image_meta.outputs.image_registry }}
run: |
cargo test --release --package ere-dockerized \
-- ${{ inputs.zkvm }} ${{ inputs.skip_prove_test && '--skip prove' || '' }} --test-threads=1

View File

@@ -5,9 +5,14 @@ pub fn image_tag(zkvm_kind: zkVMKind, gpu: bool) -> String {
let suffix = match (zkvm_kind, gpu) {
// Only the following zkVMs requires CUDA setup in the base image
// when GPU support is required.
(zkVMKind::Airbender | zkVMKind::OpenVM | zkVMKind::Risc0 | zkVMKind::Zisk, true) => {
"-cuda"
}
(
zkVMKind::Airbender
| zkVMKind::OpenVM
| zkVMKind::Risc0
| zkVMKind::SP1
| zkVMKind::Zisk,
true,
) => "-cuda",
_ => "",
};
format!("{DOCKER_IMAGE_TAG}{suffix}")

View File

@@ -1,52 +1,60 @@
use std::{env, process::Command};
use tracing::{info, warn};
/// Returns Cuda GPU compute capability, for example
/// - RTX 50 series - returns `12.0`
/// - RTX 40 series - returns `8.9`
/// Detects CUDA compute capabilities of all visible GPUs.
///
/// If there are multiple GPUs available, the first result will be returned.
pub fn cuda_compute_cap() -> Option<String> {
let output = Command::new("nvidia-smi")
/// Returns a sorted, deduplicated list of numeric compute capabilities
/// (e.g. `[89, 120]` for a mix of RTX 40 and RTX 50 series GPUs).
///
/// Returns an empty vec if `nvidia-smi` is not available or fails.
pub fn detect_compute_caps() -> Vec<u32> {
let Ok(output) = Command::new("nvidia-smi")
.args(["--query-gpu=compute_cap", "--format=csv,noheader"])
.output()
.ok()?;
else {
return vec![];
};
if !output.status.success() {
return None;
return vec![];
}
Some(
String::from_utf8_lossy(&output.stdout)
.lines()
.next()?
.trim()
.to_string(),
)
let mut caps: Vec<u32> = String::from_utf8_lossy(&output.stdout)
.lines()
.filter_map(|line| line.trim().replace('.', "").parse::<u32>().ok())
.collect();
caps.sort_unstable();
caps.dedup();
caps
}
/// Returns the GPU code in format `sm_{numeric_compute_cap}` (e.g. `sm_120`).
/// Returns CUDA architectures as a list of numeric values (e.g. `[89, 120]`).
///
/// It does the following checks and returns the first valid value:
/// 1. Read env variable `CUDA_ARCH` and check if it is in valid format.
/// 2. Detect compute capability of the first visible GPU and format to GPU code.
/// 1. Read env variable `CUDA_ARCHS` and validate format (comma-separated numbers).
/// 2. Detect compute capabilities of all visible GPUs.
///
/// Otherwise it returns `None`.
pub fn cuda_arch() -> Option<String> {
if let Ok(cuda_arch) = env::var("CUDA_ARCH") {
if cuda_arch.starts_with("sm_") && cuda_arch[3..].parse::<usize>().is_ok() {
info!("Using CUDA_ARCH {cuda_arch} from env variable");
Some(cuda_arch)
} else {
warn!(
"Skipping CUDA_ARCH {cuda_arch} from env variable (expected to be in format `sm_XX`)"
);
None
/// Returns an empty vec if neither source provides valid architectures.
pub fn cuda_archs() -> Vec<u32> {
if let Ok(val) = env::var("CUDA_ARCHS") {
let archs: Option<Vec<u32>> = val.split(',').map(|s| s.parse::<u32>().ok()).collect();
match archs {
Some(archs) if !archs.is_empty() => {
info!("Using CUDA_ARCHS {val} from env variable");
return archs;
}
_ => warn!(
"Skipping CUDA_ARCHS {val} from env variable \
(expected comma-separated numbers, e.g. \"89,120\")"
),
}
} else if let Some(cap) = cuda_compute_cap() {
info!("Using CUDA compute capability {} detected", cap);
Some(format!("sm_{}", cap.replace(".", "")))
} else {
None
}
let caps = detect_compute_caps();
if !caps.is_empty() {
info!("Detected CUDA compute capabilities (CUDA_ARCHS={caps:?})");
return caps;
}
vec![]
}

View File

@@ -2,7 +2,7 @@ use crate::{
compiler::SerializedProgram,
image::{base_image, base_zkvm_image, server_zkvm_image},
util::{
cuda::cuda_arch,
cuda::cuda_archs,
docker::{
DockerBuildCmd, DockerRunCmd, docker_container_exists, docker_image_exists,
docker_pull_image, stop_docker_container,
@@ -35,12 +35,68 @@ use std::{
};
use tempfile::TempDir;
use tokio::{sync::RwLock, time::sleep};
use tracing::{error, info};
use tracing::{error, info, warn};
mod error;
pub use error::Error;
/// Applies per-zkVM CUDA architecture build args to a Docker build command.
///
/// Each zkVM expects a different format for specifying CUDA architectures:
/// - Airbender: `CUDAARCHS` (semicolon-separated, e.g. "89;120")
/// - OpenVM: `CUDA_ARCH` (comma-separated, e.g. "89,120")
/// - Risc0: `NVCC_APPEND_FLAGS` (nvcc --generate-code flags)
/// - Zisk: `CUDA_ARCH` (support only one CUDA architecture, e.g. "sm_120")
fn apply_cuda_build_args(
cmd: DockerBuildCmd,
zkvm_kind: zkVMKind,
cuda_archs: &[u32],
) -> Result<DockerBuildCmd, Error> {
if cuda_archs.is_empty() {
warn!("No CUDA_ARCHS set or detected, use default value in Dockerfile");
return Ok(cmd);
}
Ok(match zkvm_kind {
zkVMKind::Airbender => {
let value = cuda_archs
.iter()
.map(|arch| arch.to_string())
.collect::<Vec<_>>()
.join(";");
cmd.build_arg("CUDAARCHS", value)
}
zkVMKind::OpenVM => {
let value = cuda_archs
.iter()
.map(|arch| arch.to_string())
.collect::<Vec<_>>()
.join(",");
cmd.build_arg("CUDA_ARCH", value)
}
zkVMKind::Risc0 => {
let value = cuda_archs
.iter()
.map(|arch| format!("--generate-code arch=compute_{arch},code=sm_{arch}"))
.collect::<Vec<_>>()
.join(" ");
cmd.build_arg("NVCC_APPEND_FLAGS", value)
}
zkVMKind::Zisk => {
if cuda_archs.len() != 1 {
return Err(Error::UnsupportedMultiCudaArchs(
zkVMKind::Zisk,
cuda_archs.to_vec(),
));
}
let value = format!("sm_{}", cuda_archs[0]);
cmd.build_arg("CUDA_ARCH", value)
}
_ => cmd,
})
}
/// This method builds 3 Docker images in sequence:
/// 1. `ere-base:{version}` - Base image with common dependencies
/// 2. `ere-base-{zkvm}:{version}` - zkVM-specific base image with the zkVM SDK
@@ -77,6 +133,9 @@ fn build_server_image(zkvm_kind: zkVMKind, gpu: bool) -> Result<(), Error> {
let docker_dir = workspace_dir.join("docker");
let docker_zkvm_dir = docker_dir.join(zkvm_kind.as_str());
// Resolve CUDA architectures once for both base-zkvm and server builds.
let cuda_archs = if gpu { cuda_archs() } else { vec![] };
// Build `ere-base`
if force_rebuild || !docker_image_exists(&base_image)? {
info!("Building image {base_image}...");
@@ -104,15 +163,7 @@ fn build_server_image(zkvm_kind: zkVMKind, gpu: bool) -> Result<(), Error> {
if gpu {
cmd = cmd.build_arg("CUDA", "1");
match zkvm_kind {
zkVMKind::Airbender | zkVMKind::OpenVM | zkVMKind::Risc0 | zkVMKind::Zisk => {
if let Some(cuda_arch) = cuda_arch() {
cmd = cmd.build_arg("CUDA_ARCH", cuda_arch)
}
}
_ => {}
}
cmd = apply_cuda_build_args(cmd, zkvm_kind, &cuda_archs)?;
}
cmd.exec(&workspace_dir)?;
@@ -129,6 +180,7 @@ fn build_server_image(zkvm_kind: zkVMKind, gpu: bool) -> Result<(), Error> {
if gpu {
cmd = cmd.build_arg("CUDA", "1");
cmd = apply_cuda_build_args(cmd, zkvm_kind, &cuda_archs)?;
}
cmd.exec(&workspace_dir)?;

View File

@@ -1,3 +1,4 @@
use ere_common::zkVMKind;
use ere_server::client::{self, ParseError, TwirpErrorResponse};
use ere_zkvm_interface::CommonError;
use thiserror::Error;
@@ -19,6 +20,10 @@ pub enum Error {
CommonError(#[from] CommonError),
#[error(transparent)]
ParseUrl(#[from] ParseError),
#[error(
"Multiple CUDA architectures are not supported for {0:?}, CUDA_ARCHS set or detected: {1:?}"
)]
UnsupportedMultiCudaArchs(zkVMKind, Vec<u32>),
#[error("zkVM method error: {0}")]
zkVM(String),
#[error("Connection to zkVM server timeout after 5 minutes")]

View File

@@ -8,11 +8,8 @@ RUN rustup default nightly-2025-07-25
# Whether to enable CUDA feature or not.
ARG CUDA
# Default to build for RTX 50 series
ARG CUDA_ARCH=sm_120
# Env read by Airbender crate `gpu_prover`, only taking the numeric part
ARG CUDAARCHS=${CUDA_ARCH#sm_}
# Env read by Airbender crate `gpu_prover`, numeric cuda arch IDs (e.g. "120" or "89;120")
ARG CUDAARCHS=120
# Copy the Airbender SDK installer script from the workspace context
COPY --chmod=755 scripts/sdk_installers/install_airbender_sdk.sh /tmp/install_airbender_sdk.sh

View File

@@ -8,15 +8,6 @@ FROM $BASE_IMAGE
# Set default toolchain to nightly
RUN rustup default nightly
# Whether to enable CUDA feature or not.
ARG CUDA
# Default to build for RTX 50 series
ARG CUDA_ARCH=sm_120
# Env variable read by OpenVM crate `cuda-builder`, need to persist it for building `ere-openvm`.
ENV CUDA_ARCH=${CUDA_ARCH#sm_}
# Copy the OpenVM SDK installer script from the workspace context
COPY --chmod=755 scripts/sdk_installers/install_openvm_sdk.sh /tmp/install_openvm_sdk.sh

View File

@@ -14,6 +14,9 @@ WORKDIR /ere
ARG CUDA
ARG RUSTFLAGS
# Env variable read by OpenVM crate `openvm-cuda-builder`, comma-separated numeric arch IDs (e.g. "120" or "89,120")
ARG CUDA_ARCH=120
RUN cargo build --release --package ere-server --bin ere-server --features openvm${CUDA:+,cuda} \
&& mkdir bin && mv target/release/ere-server bin/ere-server \
&& cargo clean && rm -rf $CARGO_HOME/registry/

View File

@@ -13,10 +13,8 @@ RUN [ -n "$CUDA" ] && \
(curl -o protoc.zip -L https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
&& unzip protoc.zip -d /usr/local) || true
# Default to build for RTX 50 series
ARG CUDA_ARCH=sm_120
ARG NVCC_APPEND_FLAGS="--generate-code arch=compute_${CUDA_ARCH#sm_},code=sm_${CUDA_ARCH#sm_}"
ENV NVCC_APPEND_FLAGS=${NVCC_APPEND_FLAGS}
# Fully formed NVCC flags for CUDA arch targeting (e.g. "--generate-code arch=compute_120,code=sm_120")
ARG NVCC_APPEND_FLAGS="--generate-code arch=compute_120,code=sm_120"
# Copy and run the Risc0 SDK installer script
COPY --chmod=755 scripts/sdk_installers/install_risc0_sdk.sh /tmp/install_risc0_sdk.sh