Upgrade risc0 to 3.0.1 and use the new actor system prover (#103)

Co-authored-by: Ignacio Hagopian <jsign.uy@gmail.com>
This commit is contained in:
Han
2025-08-22 12:00:54 +08:00
committed by GitHub
parent 0505f0e6e9
commit 5556109753
14 changed files with 494 additions and 859 deletions

545
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -70,9 +70,8 @@ pico-vm = { git = "https://github.com/brevis-network/pico.git", tag = "v1.1.4" }
pico-sdk = { git = "https://github.com/brevis-network/pico.git", tag = "v1.1.4" }
# Risc0 dependencies
risc0-build = "2.3.1"
risc0-zkvm = { version = "2.3.1", default-features = false }
bonsai-sdk = { version = "1.4.0" }
risc0-build = "3.0.1"
risc0-zkvm = { version = "3.0.1", default-features = false }
# SP1 dependencies
sp1-sdk = "5.1.0"

View File

@@ -374,10 +374,9 @@ impl zkVM for EreDockerizedzkVM {
// spin up, so we don't need to set here.
ErezkVM::SP1 => cmd.mount_docker_socket().network("host"),
ErezkVM::Risc0 => cmd
.mount_docker_socket()
.network("host")
.inherit_env("CUDA_VISIBLE_DEVICES")
.inherit_env("SEGMENT_SIZE")
.gpus("all")
.inherit_env("RISC0_DEFAULT_PROVER_NUM_GPUS")
.inherit_env("RISC0_SEGMENT_PO2")
.inherit_env("RISC0_KECCAK_PO2"),
ErezkVM::Zisk => cmd.gpus("all"),
_ => cmd,

View File

@@ -7,20 +7,15 @@ license.workspace = true
[dependencies]
anyhow.workspace = true
bincode.workspace = true
borsh.workspace = true
bytemuck.workspace = true
cargo_metadata.workspace = true
serde = { workspace = true, features = ["derive", "rc"] }
serde_yaml.workspace = true
tempfile.workspace = true
thiserror.workspace = true
tracing.workspace = true
# Risc0 dependencies
risc0-build = { workspace = true, default-features = true, features = ["unstable"] }
risc0-build = { workspace = true, features = ["unstable"] }
risc0-zkvm = { workspace = true, default-features = true, features = ["unstable"] }
bonsai-sdk.workspace = true
# Local dependencies
zkvm-interface.workspace = true

View File

@@ -1,226 +0,0 @@
# Copied and modified from https://github.com/risc0/risc0/blob/v2.3.1/compose.yml.
# MODIFIED: Use ere-risc0 as project name
name: ere-risc0
services:
redis:
hostname: ${REDIS_HOST}
image: ${REDIS_IMG}
restart: always
ports:
- 6379:6379
volumes:
- redis-data:/data
postgres:
hostname: ${POSTGRES_HOST}
image: ${POSTGRES_IMG}
restart: always
environment:
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
expose:
- '${POSTGRES_PORT}'
ports:
- '${POSTGRES_PORT}:${POSTGRES_PORT}'
volumes:
- postgres-data:/var/lib/postgresql/data
command: -p ${POSTGRES_PORT}
minio:
hostname: ${MINIO_HOST}
image: ${MINIO_IMG}
ports:
- '9000:9000'
- '9001:9001'
volumes:
- minio-data:/data
command: server /data --console-address ":9001"
environment:
- MINIO_ROOT_USER=${MINIO_ROOT_USER}
- MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASS}
- MINIO_DEFAULT_BUCKETS=${MINIO_BUCKET}
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5
# MODIFIED: Commented-out to avoid unnecessary resource waste.
# grafana:
# image: ${GRAFANA_IMG}
# restart: unless-stopped
# ports:
# - '3000:3000'
# environment:
# - GF_SECURITY_ADMIN_USER=admin
# - GF_SECURITY_ADMIN_PASSWORD=admin
# - GF_LOG_LEVEL=WARN
# - POSTGRES_HOST=${POSTGRES_HOST}
# - POSTGRES_DB=${POSTGRES_DB}
# - POSTGRES_PORT={POSTGRES_PORT}
# - POSTGRES_USER=${POSTGRES_USER}
# - POSTGRES_PASS=${POSTGRES_PASSWORD}
# volumes:
# - ./grafana:/etc/grafana/provisioning/
# - grafana-data:/var/lib/grafana
# depends_on:
# - postgres
# - redis
# - minio
exec_agent:
# MODIFIED: Specify the pre-built image.
image: ere-risc0/agent:${RISC0_VERSION}
runtime: nvidia
# MODIFIED: Added to avoid image pulling.
pull_policy: never
restart: always
depends_on:
- postgres
- redis
- minio
# MODIFIED: Commented-out because the image should be pre-built.
# build:
# context: .
# dockerfile: bento/dockerfiles/agent.dockerfile
# args:
# NVCC_APPEND_FLAGS: "--gpu-architecture=compute_86 --gpu-code=compute_86,sm_86 --generate-code arch=compute_86,code=sm_86"
# CUDA_OPT_LEVEL: 1
mem_limit: 4G
cpu_count: 4
environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
REDIS_URL: redis://${REDIS_HOST}:6379
S3_URL: http://${MINIO_HOST}:9000
S3_BUCKET: ${MINIO_BUCKET}
# TODO: create a lower perm user on startup of minio to use for agents
S3_ACCESS_KEY: ${MINIO_ROOT_USER}
S3_SECRET_KEY: ${MINIO_ROOT_PASS}
RUST_LOG: ${RUST_LOG}
# MODIFIED: Added to set log color
NO_COLOR: ${NO_COLOR}
RUST_BACKTRACE: 1
RISC0_KECCAK_PO2: ${RISC0_KECCAK_PO2}
# Enable / disable along with gpu_*_agent*
# JOIN_STREAM: 1
# COPROC_STREAM: 1
entrypoint: /app/agent -t exec --segment-po2 ${SEGMENT_SIZE}
aux_agent:
# MODIFIED: Specify the pre-built image.
image: ere-risc0/agent:${RISC0_VERSION}
runtime: nvidia
pull_policy: never
restart: always
depends_on:
- postgres
- redis
- minio
mem_limit: 256M
cpu_count: 1
environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
REDIS_URL: redis://${REDIS_HOST}:6379
S3_URL: http://${MINIO_HOST}:9000
S3_BUCKET: ${MINIO_BUCKET}
S3_ACCESS_KEY: ${MINIO_ROOT_USER}
S3_SECRET_KEY: ${MINIO_ROOT_PASS}
RUST_LOG: ${RUST_LOG}
# MODIFIED: Added to set log color
NO_COLOR: ${NO_COLOR}
RUST_BACKTRACE: 1
# NOTE: if adding more aux workers, only one needs --monitor-requeue (for task reaping)
entrypoint: /app/agent -t aux --monitor-requeue
# MODIFIED: Moved to `gpu_prover_agent.yml` as template.
# gpu_prove_agent0: &gpu
# image: agent
# runtime: nvidia
# pull_policy: never
# restart: always
# depends_on:
# - postgres
# - redis
# - minio
# mem_limit: 4G
# cpu_count: 4
# environment:
# DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# REDIS_URL: redis://${REDIS_HOST}:6379
# S3_URL: http://${MINIO_HOST}:9000
# S3_BUCKET: ${MINIO_BUCKET}
# S3_ACCESS_KEY: ${MINIO_ROOT_USER}
# S3_SECRET_KEY: ${MINIO_ROOT_PASS}
# RUST_LOG: ${RUST_LOG}
# RUST_BACKTRACE: 1
# entrypoint: /app/agent -t prove
# # comment-out if running in CPU proving mode
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# # TODO: how to scale this with N gpus?
# device_ids: ['0']
# capabilities: [gpu]
# MODIFIED: Commented-out because we don't need to wrap it for on-chain verification.
# snark_agent:
# image: agent
# runtime: nvidia
# pull_policy: never
# restart: always
# depends_on:
# - postgres
# - redis
# - minio
# environment:
# DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# REDIS_URL: redis://${REDIS_HOST}:6379
# S3_URL: http://${MINIO_HOST}:9000
# S3_BUCKET: ${MINIO_BUCKET}
# S3_ACCESS_KEY: ${MINIO_ROOT_USER}
# S3_SECRET_KEY: ${MINIO_ROOT_PASS}
# RUST_LOG: ${RUST_LOG}
# RUST_BACKTRACE: 1
# entrypoint: /app/agent -t snark
# ulimits:
# # Needed for stark_verify bin
# # TODO: this number was just guess and check found
# stack: 90000000
rest_api:
# MODIFIED: Specify the pre-built image.
image: ere-risc0/rest_api:${RISC0_VERSION}
restart: always
depends_on:
- postgres
- minio
# MODIFIED: Commented-out because the image should be pre-built.
# build:
# context: .
# dockerfile: bento/dockerfiles/rest_api.dockerfile
mem_limit: 1G
cpu_count: 1
environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
S3_URL: http://${MINIO_HOST}:9000
S3_BUCKET: ${MINIO_BUCKET}
S3_ACCESS_KEY: ${MINIO_ROOT_USER}
S3_SECRET_KEY: ${MINIO_ROOT_PASS}
RUST_LOG: ${RUST_LOG}
# MODIFIED: Added to set log color
NO_COLOR: ${NO_COLOR}
RUST_BACKTRACE: 1
ports:
- '8081:8081'
entrypoint: /app/rest_api --bind-addr 0.0.0.0:8081
volumes:
redis-data:
postgres-data:
minio-data:
# MODIFIED: Commented-out to avoid unnecessary resource waste.
# grafana-data:

View File

@@ -1,39 +0,0 @@
# Copied and modified from https://github.com/risc0/risc0/blob/v2.3.1/compose.yml.
# In runtime, `ere-risc0` will check env `CUDA_VISIBLE_DEVICES`:
# If it's not set, it will spin up a container of the following service with all
# GPUs access
# If it's set to some device ids, it will spin up containers and each will have
# 1 GPU access.
# MODIFIED: Rename to local image with namespace and version tag.
image: ere-risc0/agent:${RISC0_VERSION}
runtime: nvidia
pull_policy: never
restart: always
depends_on:
- postgres
- redis
- minio
mem_limit: 4G
cpu_count: 4
environment:
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
REDIS_URL: redis://${REDIS_HOST}:6379
S3_URL: http://${MINIO_HOST}:9000
S3_BUCKET: ${MINIO_BUCKET}
S3_ACCESS_KEY: ${MINIO_ROOT_USER}
S3_SECRET_KEY: ${MINIO_ROOT_PASS}
RUST_LOG: ${RUST_LOG}
# MODIFIED: Added to set log color
NO_COLOR: ${NO_COLOR}
RUST_BACKTRACE: 1
entrypoint: /app/agent -t prove
# comment-out if running in CPU proving mode
deploy:
resources:
reservations:
devices:
- driver: nvidia
# TODO: how to scale this with N gpus?
device_ids: ['0']
capabilities: [gpu]

View File

@@ -1,28 +0,0 @@
# Copied and modified from https://github.com/risc0/risc0/blob/v2.3.1/compose.yml.
# Prover node configs
RUST_LOG=info
# MODIFIED: Added to inherit from host.
NO_COLOR=
REDIS_HOST=redis
REDIS_IMG=redis:7.2.5-alpine3.19
POSTGRES_HOST=postgres
POSTGRES_IMG=postgres:16.3-bullseye
POSTGRES_DB=taskdb
POSTGRES_PORT=5432
POSTGRES_USER=worker
POSTGRES_PASSWORD=password
MINIO_HOST=minio
MINIO_IMG=minio/minio:RELEASE.2024-05-28T17-19-04Z
MINIO_ROOT_USER=admin
MINIO_ROOT_PASS=password
MINIO_BUCKET=workflow
GRAFANA_IMG=grafana/grafana:11.0.0
SEGMENT_SIZE=21
RISC0_KECCAK_PO2=17

View File

@@ -1,13 +1,16 @@
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
use crate::{compile::compile_risc0_program, error::Risc0Error};
use crate::{
compile::{Risc0Program, compile_risc0_program},
error::Risc0Error,
};
use borsh::{BorshDeserialize, BorshSerialize};
use risc0_zkvm::{
ExecutorEnv, ExecutorEnvBuilder, InnerReceipt, Journal, Receipt, ReceiptClaim, SuccinctReceipt,
default_executor,
DEFAULT_MAX_PO2, DefaultProver, ExecutorEnv, ExecutorEnvBuilder, ExternalProver, InnerReceipt,
Journal, ProverOpts, Receipt, ReceiptClaim, SuccinctReceipt, default_executor, default_prover,
};
use serde::{Deserialize, Serialize};
use std::{path::Path, time::Instant};
use std::{env, ops::RangeInclusive, path::Path, rc::Rc, time::Instant};
use zkvm_interface::{
Compiler, Input, InputItem, ProgramExecutionReport, ProgramProvingReport, ProverResourceType,
zkVM, zkVMError,
@@ -17,9 +20,32 @@ include!(concat!(env!("OUT_DIR"), "/name_and_sdk_version.rs"));
mod compile;
mod error;
mod prove;
pub use compile::Risc0Program;
/// Default logarithmic segment size from [`DEFAULT_SEGMENT_LIMIT_PO2`].
///
/// [`DEFAULT_SEGMENT_LIMIT_PO2`]: https://github.com/risc0/risc0/blob/v3.0.1/risc0/circuit/rv32im/src/execute/mod.rs#L39.
const DEFAULT_SEGMENT_PO2: usize = 20;
/// Supported range of logarithmic segment size.
///
/// The minimum is by [`MIN_LIFT_PO2`] to be lifted.
///
/// The maximum is by [`DEFAULT_MAX_PO2`], although the real maximum is `24`,
/// but it requires us to set the `control_ids` manually in the `ProverOpts`.
///
/// [`MIN_LIFT_PO2`]: https://github.com/risc0/risc0/blob/v3.0.1/risc0/circuit/recursion/src/control_id.rs#L19
/// [`DEFAULT_MAX_PO2`]: https://github.com/risc0/risc0/blob/v3.0.1/risc0/zkvm/src/receipt.rs#L884
const SEGMENT_PO2_RANGE: RangeInclusive<usize> = 14..=DEFAULT_MAX_PO2;
/// Default logarithmic keccak size from [`KECCAK_DEFAULT_PO2`].
///
/// [`KECCAK_DEFAULT_PO2`]: https://github.com/risc0/risc0/blob/v3.0.1/risc0/circuit/keccak/src/lib.rs#L27.
const DEFAULT_KECCAK_PO2: usize = 17;
/// Supported range of logarithmic keccak size from [`KECCAK_PO2_RANGE`].
///
/// [`KECCAK_PO2_RANGE`]: https://github.com/risc0/risc0/blob/v3.0.1/risc0/circuit/keccak/src/lib.rs#L29.
const KECCAK_PO2_RANGE: RangeInclusive<usize> = 14..=18;
#[allow(non_camel_case_types)]
pub struct RV32_IM_RISC0_ZKVM_ELF;
@@ -64,6 +90,8 @@ impl From<Risc0ProofWithPublicValues> for Receipt {
pub struct EreRisc0 {
program: <RV32_IM_RISC0_ZKVM_ELF as Compiler>::Program,
resource: ProverResourceType,
segment_po2: usize,
keccak_po2: usize,
}
impl EreRisc0 {
@@ -71,24 +99,33 @@ impl EreRisc0 {
program: <RV32_IM_RISC0_ZKVM_ELF as Compiler>::Program,
resource: ProverResourceType,
) -> Result<Self, zkVMError> {
match resource {
ProverResourceType::Cpu => {}
ProverResourceType::Gpu => {
// If not using Metal, we use the bento stack which requires
// Docker to spin up the proving services that use Cuda.
if !cfg!(feature = "metal") {
prove::bento::build_bento_images().map_err(zkVMError::other)?;
prove::bento::docker_compose_bento_up().map_err(zkVMError::other)?;
}
}
ProverResourceType::Network(_) => {
panic!(
"Network proving not yet implemented for RISC Zero. Use CPU or GPU resource type."
);
}
if matches!(resource, ProverResourceType::Network(_)) {
panic!(
"Network proving not yet implemented for RISC Zero. Use CPU or GPU resource type."
);
}
Ok(Self { program, resource })
let [segment_po2, keccak_po2] = [
("RISC0_SEGMENT_PO2", DEFAULT_SEGMENT_PO2, SEGMENT_PO2_RANGE),
("RISC0_KECCAK_PO2", DEFAULT_KECCAK_PO2, KECCAK_PO2_RANGE),
]
.map(|(key, default, range)| {
let val = env::var(key)
.ok()
.and_then(|po2| po2.parse::<usize>().ok())
.unwrap_or(default);
if !range.contains(&val) {
panic!("Unsupported po2 value {val} of {key}, expected in range {range:?}")
}
val
});
Ok(Self {
program,
resource,
segment_po2,
keccak_po2,
})
}
}
@@ -111,17 +148,20 @@ impl zkVM for EreRisc0 {
}
fn prove(&self, inputs: &Input) -> Result<(Vec<u8>, ProgramProvingReport), zkVMError> {
let (receipt, proving_time) = match self.resource {
ProverResourceType::Cpu => prove::default::prove(&self.program, inputs)?,
let prover = match self.resource {
ProverResourceType::Cpu => Rc::new(ExternalProver::new("ipc", "r0vm")),
ProverResourceType::Gpu => {
if cfg!(feature = "metal") {
// The default prover selects the prover depending on the
// feature flag, if non enabled, it executes the pre-installed
// binary to generate the proof; if `metal` is enabled, it
// uses the local built binary.
prove::default::prove(&self.program, inputs)?
// When `metal` is enabled, we use the `LocalProver` to do
// proving. but it's not public so we use `default_prover`
// to instantiate it.
default_prover()
} else {
prove::bento::prove(&self.program, inputs)?
// The `DefaultProver` uses `r0vm-cuda` to spawn multiple
// workers to do multi-gpu proving.
// It uses env `RISC0_DEFAULT_PROVER_NUM_GPUS` to determine
// how many available GPUs there are.
Rc::new(DefaultProver::new("r0vm-cuda").map_err(zkVMError::other)?)
}
}
ProverResourceType::Network(_) => {
@@ -131,9 +171,23 @@ impl zkVM for EreRisc0 {
}
};
let proof =
borsh::to_vec(&Risc0ProofWithPublicValues::from(receipt)).map_err(zkVMError::other)?;
let mut env = ExecutorEnv::builder();
serialize_inputs(&mut env, inputs).map_err(zkVMError::other)?;
let env = env
.segment_limit_po2(self.segment_po2 as _)
.keccak_max_po2(self.keccak_po2 as _)
.map_err(zkVMError::other)?
.build()
.map_err(zkVMError::other)?;
let now = std::time::Instant::now();
let prove_info = prover
.prove_with_opts(env, &self.program.elf, &ProverOpts::succinct())
.map_err(zkVMError::other)?;
let proving_time = now.elapsed();
let proof = borsh::to_vec(&Risc0ProofWithPublicValues::from(prove_info.receipt))
.map_err(zkVMError::other)?;
Ok((proof, ProgramProvingReport::new(proving_time)))
}
@@ -156,16 +210,6 @@ impl zkVM for EreRisc0 {
}
}
impl Drop for EreRisc0 {
fn drop(&mut self) {
if matches!(self.resource, ProverResourceType::Gpu) && !cfg!(feature = "metal") {
prove::bento::docker_compose_bento_down().unwrap_or_else(|err| {
tracing::error!("Failed to shutdown bento docker compose sevices\n{err}")
})
}
}
}
fn serialize_inputs(env: &mut ExecutorEnvBuilder, inputs: &Input) -> Result<(), anyhow::Error> {
for input in inputs.iter() {
match input {

View File

@@ -1,2 +0,0 @@
pub mod bento;
pub mod default;

View File

@@ -1,265 +0,0 @@
use crate::{Risc0Program, SDK_VERSION};
use bonsai_sdk::blocking::Client;
use risc0_zkvm::{Receipt, VERSION, serde::to_vec};
use std::{
env,
ffi::OsStr,
io::{self, Write},
process::{Command, Output, Stdio},
time::Duration,
};
use tempfile::tempdir;
use zkvm_interface::{Input, InputItem, zkVMError};
const URL: &str = "http://localhost:8081";
const KEY: &str = "";
// Copied and modified from https://github.com/risc0/risc0/blob/main/bento/crates/bento-client/src/bento_cli.rs.
pub fn prove(program: &Risc0Program, inputs: &Input) -> Result<(Receipt, Duration), zkVMError> {
let client = Client::from_parts(URL.to_string(), KEY.to_string(), VERSION)
.map_err(|err| zkVMError::Other(err.into()))?;
// Serialize `inputs` in the same way `ExecutorEnv` does.
let mut input_bytes = Vec::new();
for input in inputs.iter() {
match input {
InputItem::Object(obj) => {
input_bytes.extend(bytemuck::cast_slice(&to_vec(obj).unwrap()));
}
InputItem::SerializedObject(bytes) => {
input_bytes.extend(bytes);
}
InputItem::Bytes(bytes) => {
input_bytes.extend((bytes.len() as u32).to_le_bytes());
input_bytes.extend(bytes);
}
}
}
client
.upload_img(&program.image_id.to_string(), program.elf.clone())
.map_err(|err| zkVMError::Other(err.into()))?;
let input_id = client
.upload_input(input_bytes)
.map_err(|err| zkVMError::Other(err.into()))?;
let now = std::time::Instant::now();
let session = client
.create_session(program.image_id.to_string(), input_id, vec![], false)
.map_err(|err| zkVMError::Other(err.into()))?;
loop {
// By interval check if the proving is still running or already succeeded.
// FIXME: The response `SessionStatusRes` has a field `elapsed_time` but
// currently always set to `None` because it's not implemented.
// So we setting 200ms to not make the proving time measurement too
// inaccurate, but if `RUST_LOG=debug` is set, we should be able to do
// `docker log {container} --since` and grep the following pattern:
// ```
// {timestamp} DEBUG workflow::tasks::resolve: Resolve complete for job_id: {session.uuid}.
// ```
const INTERVAL_MILLIS: u64 = 200;
let res = session
.status(&client)
.map_err(|err| zkVMError::Other(err.into()))?;
match res.status.as_ref() {
"RUNNING" => {
std::thread::sleep(Duration::from_millis(INTERVAL_MILLIS));
continue;
}
"SUCCEEDED" => {
let receipt_bytes = client
.receipt_download(&session)
.map_err(|err| zkVMError::Other(err.into()))?;
break Ok((bincode::deserialize(&receipt_bytes).unwrap(), now.elapsed()));
}
"FAILED" => {
return Err(zkVMError::Other(
format!(
"Job failed with error message: {}",
res.error_msg.unwrap_or_default()
)
.into(),
));
}
_ => {
return Err(zkVMError::Other(
format!("Unexpected proving status: {}", res.status).into(),
));
}
}
}
}
fn cmd_output_checked(cmd: &mut Command) -> Result<Output, io::Error> {
let output = cmd.output()?;
if !output.status.success() {
return Err(io::Error::other(format!("Failed to run `{cmd:?}`")));
}
Ok(output)
}
fn cmd_exec_checked(cmd: &mut Command) -> Result<(), io::Error> {
let status = cmd.status()?;
if !status.success() {
return Err(io::Error::other(format!("Failed to run `{cmd:?}`")));
}
Ok(())
}
fn docker_image_exists(image: impl AsRef<OsStr>) -> Result<bool, io::Error> {
let output = cmd_output_checked(
Command::new("docker")
.args(["images", "--quiet"])
.arg(image),
)?;
// If image exists, image id will be printed hence stdout will be non-empty.
Ok(!output.stdout.is_empty())
}
fn docker_image_tag(src: impl AsRef<OsStr>, dst: impl AsRef<OsStr>) -> Result<(), io::Error> {
cmd_exec_checked(
Command::new("docker")
.args(["image", "tag"])
.arg(src)
.arg(dst),
)
}
pub fn build_bento_images() -> Result<(), io::Error> {
let agent_tag = format!("ere-risc0/agent:{SDK_VERSION}");
let rest_api_tag = format!("ere-risc0/rest_api:{SDK_VERSION}");
if docker_image_exists(&agent_tag)? && docker_image_exists(&rest_api_tag)? {
return Ok(());
}
let tempdir = tempdir()?;
cmd_exec_checked(
Command::new("git")
.args([
"clone",
"--depth",
"1",
"--branch",
&format!("v{SDK_VERSION}"),
"https://github.com/risc0/risc0.git",
])
.arg(tempdir.path()),
)?;
cmd_exec_checked(
Command::new("docker")
.arg("compose")
.arg("--file")
.arg(tempdir.path().join("compose.yml"))
.arg("--env-file")
.arg(tempdir.path().join("bento/dockerfiles/sample.env"))
.arg("build"),
)?;
docker_image_tag("agent", agent_tag)?;
docker_image_tag("bento-rest_api", rest_api_tag)?;
Ok(())
}
const BENTO_ENV: &str = include_str!("../../sample.env");
const BENTO_COMPOSE: &str = include_str!("../../compose.yml");
const BENTO_GPU_PROVER_AGENT_TEMPLATE: &str = include_str!("../../gpu_prover_agent.yml");
fn bento_compose_file() -> String {
let cuda_visible_devices = env::var("CUDA_VISIBLE_DEVICES").unwrap_or_else(|_| "".to_string());
let cuda_visible_devices = cuda_visible_devices
.split(",")
.flat_map(|device_id| device_id.parse::<usize>().ok())
.collect::<Vec<_>>();
let mut compose: serde_yaml::Mapping = serde_yaml::from_str(BENTO_COMPOSE).unwrap();
let gpu_prover_agent: serde_yaml::Mapping =
serde_yaml::from_str(BENTO_GPU_PROVER_AGENT_TEMPLATE).unwrap();
if cuda_visible_devices.is_empty() {
// If env `CUDA_VISIBLE_DEVICES` is not specified, only spin up single prover with all GPUs.
let mut gpu_prover_agent = gpu_prover_agent.clone();
let device = gpu_prover_agent["deploy"]["resources"]["reservations"]["devices"][0]
.as_mapping_mut()
.unwrap();
device.remove("device_ids").unwrap();
device.insert("count".into(), "all".into());
compose["services"]
.as_mapping_mut()
.unwrap()
.insert("gpu_prove_agent0".into(), gpu_prover_agent.into());
} else {
// Otherwise spin up provers with each having 1 GPU.
for idx in cuda_visible_devices {
let mut gpu_prover_agent = gpu_prover_agent.clone();
let device = gpu_prover_agent["deploy"]["resources"]["reservations"]["devices"][0]
.as_mapping_mut()
.unwrap();
device["device_ids"][0] = idx.to_string().into();
compose["services"].as_mapping_mut().unwrap().insert(
format!("gpu_prove_agent{idx}").into(),
gpu_prover_agent.into(),
);
}
}
serde_yaml::to_string(&compose).unwrap()
}
/// Execute `docker compose ... {command}` with `bento_compose_file()`.
fn docker_compose_bento<I, S>(command: I) -> Result<(), io::Error>
where
I: Clone + IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
let envs = BENTO_ENV
.lines()
.flat_map(|line| {
line.split_once("=")
.map(|(key, val)| (key, env::var(key).unwrap_or_else(|_| val.to_string())))
})
.collect::<Vec<_>>();
let mut child = Command::new("docker")
.envs(envs)
.env("RISC0_VERSION", SDK_VERSION)
.args(["compose", "--file", "-"]) // Compose file from stdin.
.args(command.clone())
.stdin(Stdio::piped())
.spawn()?;
let mut stdin = child.stdin.take().unwrap();
stdin.write_all(bento_compose_file().as_bytes())?;
drop(stdin);
let output = child.wait_with_output()?;
if !output.status.success() {
return Err(io::Error::other(format!(
"Failed to spawn `docker compose --file - ${}`",
command
.into_iter()
.map(|s| s.as_ref().to_string_lossy().to_string())
.collect::<Vec<_>>()
.join(" ")
)));
}
Ok(())
}
/// Execute `docker compose ... up --detach` with `bento_compose_file()`.
pub fn docker_compose_bento_up() -> Result<(), io::Error> {
docker_compose_bento(["up", "--detach", "--wait"])
}
/// Execute `docker compose ... down --volumes` with `bento_compose_file()`.
pub fn docker_compose_bento_down() -> Result<(), io::Error> {
docker_compose_bento(["down", "--volumes"])
}

View File

@@ -1,19 +0,0 @@
use crate::{Risc0Program, serialize_inputs};
use risc0_zkvm::{ExecutorEnv, ProverOpts, Receipt, default_prover};
use std::time::Duration;
use zkvm_interface::{Input, zkVMError};
pub fn prove(program: &Risc0Program, inputs: &Input) -> Result<(Receipt, Duration), zkVMError> {
let prover = default_prover();
let mut env = ExecutorEnv::builder();
serialize_inputs(&mut env, inputs).map_err(|err| zkVMError::Other(err.into()))?;
let env = env.build().map_err(|err| zkVMError::Other(err.into()))?;
let now = std::time::Instant::now();
let prove_info = prover
.prove_with_opts(env, &program.elf, &ProverOpts::succinct())
.map_err(|err| zkVMError::Other(err.into()))?;
let proving_time = now.elapsed();
Ok((prove_info.receipt, proving_time))
}

View File

@@ -2,22 +2,47 @@ ARG BASE_IMAGE_TAG=ere-base:latest
FROM ${BASE_IMAGE_TAG}
# If current environment is in CI or not.
ARG CI
# Install CUDA Toolkit 12.9 and clang
# If argument `CI` is set, we skip the installation.
RUN [ -n "$CI" ] || \
(wget https://developer.download.nvidia.com/compute/cuda/repos/$(. /etc/os-release && echo "${ID}${VERSION_ID}" | tr -d '.')/$(uname -i)/cuda-keyring_1.1-1_all.deb && \
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm cuda-keyring_1.1-1_all.deb && \
apt update && \
apt install -y cuda-toolkit-12-9 clang && \
apt-get clean && rm -rf /var/lib/apt/lists/*)
# Install protoc with same version as https://github.com/risc0/risc0/blob/v3.0.1/bento/dockerfiles/agent.dockerfile#L24-L26.
# If argument `CI` is set, we skip the installation.
RUN [ -n "$CI" ] || \
(curl -o protoc.zip -L https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip && \
unzip protoc.zip -d /usr/local)
# Build for L40S, RTX 40 series, RTX 50 series
ARG NVCC_APPEND_FLAGS="\
--generate-code arch=compute_89,code=sm_89 \
--generate-code arch=compute_120,code=sm_120"
ENV NVCC_APPEND_FLAGS=${NVCC_APPEND_FLAGS}
# Add nvcc to path
ENV PATH=/usr/local/cuda/bin:$PATH
# Copy and run the Risc0 SDK installer script
COPY scripts/sdk_installers/install_risc0_sdk.sh /tmp/install_risc0_sdk.sh
COPY --chmod=+x scripts/sdk_installers/install_risc0_sdk.sh /tmp/install_risc0_sdk.sh
# The install_risc0_sdk.sh script will respect these ENV variables.
ENV RISC0_CLI_VERSION="2.3.1" \
ENV RISC0_VERSION="3.0.1" \
RISC0_CPP_VERSION="2024.1.5" \
RISC0_R0VM_VERSION="2.3.1" \
RISC0_RUST_VERSION="1.88.0"
# Run the script without version arguments to install latest
RUN chmod +x /tmp/install_risc0_sdk.sh && /tmp/install_risc0_sdk.sh
# Run the Risc0 SDK installation script
# It will use the RISC0_VERSION, RISC0_CPP_VERSION and RISC0_RUST_VERSION defined above.
RUN /tmp/install_risc0_sdk.sh && rm /tmp/install_risc0_sdk.sh
# Verify Risc0 installation (script also does this, but good for Dockerfile sanity)
RUN echo "Verifying Risc0 installation in Dockerfile (post-script)..." && cargo risczero --version
# Get docker for `cargo risczero build`
RUN curl -fsSL https://get.docker.com | sh
CMD ["/bin/bash"]

View File

@@ -63,16 +63,14 @@ fi
# Now that rzup is confirmed to be in PATH for this script, install the Risc0 toolchain
echo "Running 'rzup install' to install/update Risc0 toolchain..."
if [[ -n "$RISC0_CLI_VERSION" && -n "$RISC0_CPP_VERSION" && -n "$RISC0_R0VM_VERSION" && -n "$RISC0_RUST_VERSION" ]]; then
# If versions are specified, install each component by their version
rzup install cargo-risczero $RISC0_CLI_VERSION
rzup install cpp $RISC0_CPP_VERSION
rzup install r0vm $RISC0_R0VM_VERSION
rzup install rust $RISC0_RUST_VERSION
else
# Otherwise just install all components with by their latest version
rzup install
fi
RISC0_VERSION="${RISC0_VERSION:-3.0.1}"
RISC0_CPP_VERSION="${RISC0_CPP_VERSION:-2024.1.5}"
RISC0_RUST_VERSION="${RISC0_RUST_VERSION:-1.88.0}"
rzup install cargo-risczero "$RISC0_VERSION"
rzup install cpp "$RISC0_CPP_VERSION"
rzup install r0vm "$RISC0_VERSION"
rzup install rust "$RISC0_RUST_VERSION"
# Verify Risc0 installation
echo "Verifying Risc0 installation..."
@@ -82,4 +80,23 @@ cargo risczero --version || (echo "Error: cargo risczero command failed!" >&2 &&
echo "Risc0 Toolchain installation (latest release) successful."
echo "The rzup installer might have updated your shell configuration files (e.g., ~/.bashrc, ~/.zshrc)."
echo "To ensure rzup and Risc0 tools are available in your current shell session if this was a new installation,"
echo "you may need to source your shell profile (e.g., 'source ~/.bashrc') or open a new terminal."
echo "you may need to source your shell profile (e.g., 'source ~/.bashrc') or open a new terminal."
# Build r0vm from source with CUDA features enabled (skip if in CI)
if [ -z $CI ]; then
CARGO_HOME="${CARGO_HOME:-$HOME/.cargo}"
RISC0_BIN_DIR="$HOME/.risc0/extensions/v$RISC0_VERSION-cargo-risczero-x86_64-unknown-linux-gnu"
TEMP_DIR=$(mktemp -d)
git clone https://github.com/risc0/risc0.git --depth 1 --branch "v$RISC0_VERSION" "$TEMP_DIR/risc0"
cd "$TEMP_DIR/risc0"
# Build with feature `cuda`
RUSTFLAGS="-C target-cpu=native" cargo build --release --features cuda --bin r0vm
# Copy the binary to the same directory with `cargo-risczero` and `r0vm`
cp ./target/release/r0vm "$RISC0_BIN_DIR/r0vm-cuda"
# Create symbolic link as `cargo-risczero` and `r0vm`
ln -s "$RISC0_BIN_DIR/r0vm-cuda" "$CARGO_HOME/bin/r0vm-cuda"
rm -rf "$TEMP_DIR"
fi

View File

@@ -6,5 +6,5 @@ edition = "2021"
[workspace]
[dependencies]
risc0-zkvm = { version = "2.3.1", default-features = false, features = ['std', 'unstable'] }
risc0-zkvm = { version = "3.0.1", default-features = false, features = ["std", "unstable"] }
test-utils = { path = "../../../crates/test-utils" }