Add openvm crates (#2714)

Based on commit 1dbe4db
- Split into two crates, lib and cli
- upgrade stwo, marked one stwo test `should_panic` @ShuangWu121 
- various clippy and fmt fixes linked to the rust version update
- bring all rust versions to 2025-05-14. CI still installs other
versions for openvm which uses them internally. The stable rust version
we test on is bumped to 1.85
- remove `examples` and related tests, which test the powdr crate on the
previous version of powdr (since it uses another nightly). Happy to
discuss this if it's important @leonardoalt
This commit is contained in:
Thibaut Schaeffer
2025-05-16 16:30:09 +02:00
committed by GitHub
parent 357894b539
commit 2d6708bbc5
96 changed files with 4285 additions and 337 deletions

View File

@@ -24,16 +24,16 @@ jobs:
##### The block below is shared between cache build and PR build workflows #####
- name: Install EStarkPolygon prover dependencies
run: sudo apt-get update && sudo apt-get install -y nlohmann-json3-dev libpqxx-dev nasm libgrpc++-dev uuid-dev
- name: Install Rust toolchain nightly-2024-12-17 (with clippy and rustfmt)
run: rustup toolchain install nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu
- name: Install Rust toolchain 1.81 (stable)
run: rustup toolchain install 1.81-x86_64-unknown-linux-gnu
- name: Install Rust toolchain nightly-2025-05-14 (with clippy and rustfmt)
run: rustup toolchain install nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install Rust toolchain 1.85 (stable)
run: rustup toolchain install 1.85-x86_64-unknown-linux-gnu
- name: Set cargo to perform shallow clones
run: echo "CARGO_NET_GIT_FETCH_WITH_CLI=true" >> $GITHUB_ENV
- name: Format
run: cargo fmt --all --check --verbose
- name: Cargo check with Rust 1.81 (default features)
run: cargo +1.81-x86_64-unknown-linux-gnu check --all-targets
- name: Cargo check with Rust 1.85 (default features)
run: cargo +1.85-x86_64-unknown-linux-gnu check --all-targets
- name: Lint no default features
run: cargo clippy --all --all-targets --no-default-features --profile pr-tests --verbose -- -D warnings
- name: Lint all features

View File

@@ -59,14 +59,14 @@ jobs:
~/.cargo/git
target
key: ${{ runner.os }}-cargo-release-${{ hashFiles('**/Cargo.toml') }}
- name: Install Rust toolchain 1.81
run: rustup toolchain install 1.81-x86_64-unknown-linux-gnu
- name: Install Rust toolchain 1.85
run: rustup toolchain install 1.85-x86_64-unknown-linux-gnu
- name: Install nightly
run: rustup toolchain install nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup toolchain install nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install std source
run: rustup component add rust-src --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install riscv target
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install test dependencies
run: sudo apt-get update && sudo apt-get install -y binutils-riscv64-unknown-elf lld
- name: Install EStarkPolygon prover dependencies

View File

@@ -47,16 +47,16 @@ jobs:
##### The block below is shared between cache build and PR build workflows #####
- name: Install EStarkPolygon prover dependencies
run: sudo apt-get update && sudo apt-get install -y nlohmann-json3-dev libpqxx-dev nasm libgrpc++-dev uuid-dev
- name: Install Rust toolchain nightly-2024-12-17 (with clippy and rustfmt)
run: rustup toolchain install nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu
- name: Install Rust toolchain 1.81 (stable)
run: rustup toolchain install 1.81-x86_64-unknown-linux-gnu
- name: Install Rust toolchain nightly-2025-05-14 (with clippy and rustfmt)
run: rustup toolchain install nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install Rust toolchain 1.85 (stable)
run: rustup toolchain install 1.85-x86_64-unknown-linux-gnu
- name: Set cargo to perform shallow clones
run: echo "CARGO_NET_GIT_FETCH_WITH_CLI=true" >> $GITHUB_ENV
- name: Format
run: cargo fmt --all --check --verbose
- name: Cargo check with Rust 1.81 (default features)
run: cargo +1.81-x86_64-unknown-linux-gnu check --all-targets
- name: Cargo check with Rust 1.85 (default features)
run: cargo +1.85-x86_64-unknown-linux-gnu check --all-targets
- name: Lint no default features
run: cargo clippy --all --all-targets --no-default-features --profile pr-tests --verbose -- -D warnings
- name: Lint all features
@@ -95,16 +95,18 @@ jobs:
uses: actions/download-artifact@v4
with:
name: tests_archive
- name: Install Rust toolchain nightly-2024-12-17 (with clippy and rustfmt)
run: rustup toolchain install nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu
- name: Install nightly-2024-08-01
run: rustup toolchain install nightly-2024-08-01-x86_64-unknown-linux-gnu
- name: Install Rust toolchain nightly-2025-05-14 (with clippy and rustfmt)
run: rustup toolchain install nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install std source
run: rustup component add rust-src --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install riscv target
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install test dependencies
run: sudo apt-get update && sudo apt-get install -y binutils-riscv64-unknown-elf lld
- name: Install Rust deps
run: rustup install nightly-2025-05-14 && rustup component add rust-src --toolchain nightly-2025-05-14
- name: Install Rust deps
run: rustup install nightly-2025-02-14 && rustup component add rust-src --toolchain nightly-2025-02-14
- uses: taiki-e/install-action@nextest
- name: Run default tests
run: cargo nextest run --archive-file tests.tar.zst --workspace-remap . --verbose --partition count:"${{ matrix.test }}"/2 --no-tests=warn
@@ -119,14 +121,12 @@ jobs:
with:
submodules: recursive
# Do not use the cache because we are compiling a different version of powdr.
- name: Install Rust toolchain nightly-2024-12-17 (with clippy and rustfmt)
run: rustup toolchain install nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu
- name: Install nightly
run: rustup toolchain install nightly-2024-08-01-x86_64-unknown-linux-gnu
- name: Install Rust toolchain nightly-2025-05-14 (with clippy and rustfmt)
run: rustup toolchain install nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install std source
run: rustup component add rust-src --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install riscv target
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install test dependencies
run: sudo apt-get update && sudo apt-get install -y binutils-riscv64-unknown-elf lld
- name: Run examples that cargo accepts as examples
@@ -135,8 +135,6 @@ jobs:
run: cd powdr-test/examples/serialized-inputs && cargo run -r
- name: Run crate example fibonacci with the given branch
run: cd powdr-test/examples/fibonacci && cargo run -r
- name: Run crate example fibonacci with the latest powdr release
run: cd examples/fibonacci && cargo run -r
test_estark_polygon:
needs: build
@@ -149,14 +147,12 @@ jobs:
uses: actions/download-artifact@v4
with:
name: tests_archive
- name: Install Rust toolchain nightly-2024-12-17(with clippy and rustfmt)
run: rustup toolchain install nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu
- name: Install nightly-2024-08-01
run: rustup toolchain install nightly-2024-08-01-x86_64-unknown-linux-gnu
- name: Install Rust toolchain nightly-2025-05-14(with clippy and rustfmt)
run: rustup toolchain install nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install std source
run: rustup component add rust-src --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install riscv target
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install EStarkPolygon prover system dependency
run: sudo apt-get update && sudo apt-get install -y nlohmann-json3-dev
- uses: taiki-e/install-action@nextest
@@ -199,16 +195,18 @@ jobs:
uses: actions/download-artifact@v4
with:
name: tests_archive
- name: Install Rust toolchain nightly-2024-12-17 (with clippy and rustfmt)
run: rustup toolchain install nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu
- name: Install Rust toolchain nightly-2025-05-14 (with clippy and rustfmt)
run: rustup toolchain install nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install test dependencies
run: sudo apt-get update && sudo apt-get install -y binutils-riscv64-unknown-elf lld
- name: Install nightly-2024-08-01
run: rustup toolchain install nightly-2024-08-01-x86_64-unknown-linux-gnu
- name: Install Rust deps
run: rustup install nightly-2025-05-14 && rustup component add rust-src --toolchain nightly-2025-05-14
- name: Install Rust deps
run: rustup install nightly-2025-02-14 && rustup component add rust-src --toolchain nightly-2025-02-14
- name: Install std source
run: rustup component add rust-src --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install riscv target
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- uses: taiki-e/install-action@nextest
- name: Run slow tests
run: |
@@ -240,16 +238,14 @@ jobs:
target/
Cargo.lock
key: ${{ runner.os }}-cargo-pr-tests
- name: Install Rust toolchain nightly-2024-12-17 (with clippy and rustfmt)
run: rustup toolchain install nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2024-12-17-x86_64-unknown-linux-gnu
- name: Install Rust toolchain 1.81
run: rustup toolchain install 1.81-x86_64-unknown-linux-gnu
- name: Install nightly
run: rustup toolchain install nightly-2024-08-01-x86_64-unknown-linux-gnu
- name: Install Rust toolchain nightly-2025-05-14 (with clippy and rustfmt)
run: rustup toolchain install nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add clippy --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu && rustup component add rustfmt --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install Rust toolchain 1.85
run: rustup toolchain install 1.85-x86_64-unknown-linux-gnu
- name: Install std source
run: rustup component add rust-src --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install riscv target
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2024-08-01-x86_64-unknown-linux-gnu
run: rustup target add riscv32imac-unknown-none-elf --toolchain nightly-2025-05-14-x86_64-unknown-linux-gnu
- name: Install test dependencies
run: sudo apt-get update && sudo apt-get install -y binutils-riscv64-unknown-elf lld
- name: Install EStarkPolygon prover dependencies

View File

@@ -33,6 +33,8 @@ members = [
"backend-utils",
"executor-utils",
"autoprecompiles",
"openvm",
"cli-openvm",
]
exclude = ["riscv-runtime"]
@@ -72,6 +74,8 @@ powdr-riscv = { path = "./riscv", version = "0.1.4" }
powdr-riscv-executor = { path = "./riscv-executor", version = "0.1.4" }
powdr-syscalls = { path = "./syscalls", version = "0.1.4" }
powdr-schemas = { path = "./schemas", version = "0.1.4" }
powdr-autoprecompiles = { path = "./autoprecompiles", version = "0.1.4" }
powdr-openvm = { path = "./openvm", version = "0.1.4" }
[profile.pr-tests]
inherits = "dev"

View File

@@ -138,9 +138,11 @@ impl Display for SubmachineDeclaration {
"{} {}{}",
self.ty,
self.name,
(!self.args.is_empty())
.then(|| format!("({})", self.args.iter().format(", ")))
.unwrap_or_default()
if !self.args.is_empty() {
format!("({})", self.args.iter().format(", "))
} else {
Default::default()
}
)
}
}

View File

@@ -5,7 +5,7 @@ use std::{
btree_map::{IntoIter, Iter, IterMut},
BTreeMap, BTreeSet, HashSet,
},
iter::{once, repeat},
iter::{once, repeat_n},
ops::ControlFlow,
};
@@ -164,13 +164,13 @@ impl FunctionStatements {
pub fn iter_batches(&self) -> impl Iterator<Item = BatchRef> {
match &self.batches {
Some(batches) => Either::Left(batches.iter()),
None => Either::Right(
repeat(&BatchMetadata {
None => Either::Right(repeat_n(
&BatchMetadata {
size: 1,
reason: None,
})
.take(self.inner.len()),
),
},
self.inner.len(),
)),
}
.scan(0, move |start, batch| {
let res = BatchRef {
@@ -191,13 +191,13 @@ impl FunctionStatements {
match self.batches {
Some(batches) => Either::Left(batches.into_iter()),
None => Either::Right(
repeat(BatchMetadata {
None => Either::Right(repeat_n(
BatchMetadata {
size: 1,
reason: None,
})
.take(len),
),
},
len,
)),
}
.map(move |batch| Batch {
reason: batch.reason,

View File

@@ -1,6 +1,6 @@
use std::{
fmt::{Display, Formatter},
iter::{empty, once, repeat},
iter::{empty, once, repeat_n},
str::FromStr,
};
@@ -52,6 +52,7 @@ pub struct SymbolDefinition {
pub value: SymbolValue,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone, PartialEq, Eq, From)]
pub enum SymbolValue {
/// A machine definition
@@ -304,17 +305,19 @@ impl AbsoluteSymbolPath {
let common_prefix_len = self.common_prefix(base).parts.len();
// Start with max(0, base.parts.len() - common_root.parts.len())
// repetitions of "super".
let parts = repeat(Part::Super)
.take(base.parts.len().saturating_sub(common_prefix_len))
// append the parts of self after the common root.
.chain(
self.parts
.iter()
.skip(common_prefix_len)
.cloned()
.map(Part::Named),
)
.collect();
let parts = repeat_n(
Part::Super,
base.parts.len().saturating_sub(common_prefix_len),
)
// append the parts of self after the common root.
.chain(
self.parts
.iter()
.skip(common_prefix_len)
.cloned()
.map(Part::Named),
)
.collect();
SymbolPath { parts }
}

View File

@@ -25,7 +25,7 @@ pub fn substitute_algebraic<T: Clone>(
expr: &mut AlgebraicExpression<T>,
sub: &BTreeMap<Column, AlgebraicExpression<T>>,
) {
expr.visit_expressions_mut(
let _ = expr.visit_expressions_mut(
&mut |expr| {
if let AlgebraicExpression::Reference(r) = expr {
if let Some(sub_expr) = sub.get(&Column::from(&*r)) {
@@ -40,7 +40,7 @@ pub fn substitute_algebraic<T: Clone>(
pub fn make_refs_zero<T: FieldElement>(expr: &mut AlgebraicExpression<T>) {
let zero = AlgebraicExpression::Number(T::zero());
expr.visit_expressions_mut(
let _ = expr.visit_expressions_mut(
&mut |expr| {
if let AlgebraicExpression::Reference(AlgebraicReference { .. }) = expr {
*expr = zero.clone();
@@ -63,7 +63,7 @@ pub fn has_ref<T: Clone + std::cmp::PartialEq>(
r: &AlgebraicExpression<T>,
) -> bool {
let mut seen = false;
expr.visit_expressions(
let _ = expr.visit_expressions(
&mut |expr| {
if expr == r {
seen = true;
@@ -85,7 +85,7 @@ pub fn substitute_algebraic_algebraic<T: Clone + std::cmp::Ord>(
expr: &mut AlgebraicExpression<T>,
sub: &BTreeMap<AlgebraicExpression<T>, AlgebraicExpression<T>>,
) {
expr.visit_expressions_mut(
let _ = expr.visit_expressions_mut(
&mut |expr| {
if let Some(sub_expr) = sub.get(expr) {
*expr = sub_expr.clone();
@@ -101,7 +101,7 @@ pub fn collect_cols_algebraic<T: Clone + Ord>(
expr: &AlgebraicExpression<T>,
) -> BTreeSet<AlgebraicExpression<T>> {
let mut cols: BTreeSet<AlgebraicExpression<T>> = Default::default();
expr.visit_expressions(
let _ = expr.visit_expressions(
&mut |expr| {
if let AlgebraicExpression::Reference(AlgebraicReference {
poly_id:
@@ -190,7 +190,7 @@ pub fn reassign_ids<T: FieldElement>(
.collect();
// Update the machine with the new global column names
machine.visit_expressions_mut(
let _ = machine.visit_expressions_mut(
&mut |e| {
if let AlgebraicExpression::Reference(r) = e {
let new_col = subs.get(&Column::from(&*r)).unwrap().clone();
@@ -221,7 +221,7 @@ pub fn reassign_ids<T: FieldElement>(
}
pub fn substitute(expr: &mut Expression, sub: &BTreeMap<String, Expression>) {
expr.visit_expressions_mut(
let _ = expr.visit_expressions_mut(
&mut |expr| {
match expr {
Expression::Reference(_, ref mut r) => {

View File

@@ -63,7 +63,7 @@ p3-commit = { git = "https://github.com/plonky3/Plonky3.git", rev = "2192432ddf2
], optional = true }
p3-matrix = { git = "https://github.com/plonky3/Plonky3.git", rev = "2192432ddf28e7359dd2c577447886463e6124f0", optional = true }
p3-uni-stark = { git = "https://github.com/plonky3/Plonky3.git", rev = "2192432ddf28e7359dd2c577447886463e6124f0", optional = true }
stwo-prover = { git = "https://github.com/starkware-libs/stwo.git", optional = true, rev = "81d1fe349b490089f65723ad49ef72b9d09495ba", features = [
stwo-prover = { git = "https://github.com/starkware-libs/stwo.git", optional = true, rev = "c26d2ab", features = [
"parallel",
] }

View File

@@ -159,16 +159,18 @@ impl<'a, F: FieldElement> ConnectionConstraintChecker<'a, F> {
.flatten()
.collect::<Vec<_>>();
(!errors.is_empty())
.then(|| {
if !errors.is_empty() {
{
let error = Errors {
connection_count: self.connections.len(),
errors,
};
log::error!("{error}");
Err(error)
})
.unwrap_or(Ok(()))
}
} else {
Ok(())
}
}
/// Checks a single connection.

View File

@@ -13,7 +13,7 @@ use tracing::{span, Level};
extern crate alloc;
use alloc::collections::{btree_map::BTreeMap, btree_set::BTreeSet};
use std::iter::repeat;
use std::iter::repeat_n;
use std::marker::PhantomData;
use std::sync::Arc;
use std::{fmt, io};
@@ -571,7 +571,8 @@ where
let constant_col_log_sizes = iter
.clone()
.flat_map(|(pil, machine_log_size)| {
repeat(machine_log_size).take(
repeat_n(
machine_log_size,
pil.constant_count()
+ get_constant_with_next_list(pil).len()
+ pil.publics_count(),
@@ -582,14 +583,14 @@ where
let stage0_witness_col_log_sizes = iter
.clone()
.flat_map(|(pil, machine_log_size)| {
repeat(machine_log_size).take(pil.stage_commitment_count(0))
repeat_n(machine_log_size, pil.stage_commitment_count(0))
})
.collect_vec();
let stage1_witness_col_log_sizes = iter
.clone()
.flat_map(|(pil, machine_log_size)| {
repeat(machine_log_size).take(pil.stage_commitment_count(1))
repeat_n(machine_log_size, pil.stage_commitment_count(1))
})
.collect_vec();

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2024-12-17"
channel = "nightly-2025-05-14"

29
cli-openvm/Cargo.toml Normal file
View File

@@ -0,0 +1,29 @@
[package]
name = "cli-openvm"
version.workspace = true
edition.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
[[bin]]
name = "powdr_openvm"
path = "src/main.rs"
bench = false # See https://github.com/bheisler/criterion.rs/issues/458
[dependencies]
openvm-sdk = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-stark-sdk = { git = "https://github.com/powdr-labs/stark-backend.git", rev = "dacb25f", default-features = false }
openvm-stark-backend = { git = "https://github.com/powdr-labs/stark-backend.git", rev = "dacb25f", default-features = false }
powdr-openvm.workspace = true
eyre = "0.6.12"
tracing = "0.1.40"
clap = { version = "^4.3", features = ["derive"] }
serde_cbor = "0.11.2"
[lints]
workspace = true

30
cli-openvm/README.md Normal file
View File

@@ -0,0 +1,30 @@
# cli-openvm
Use command `execute` to run the program only, and `prove` to prove.
The `prove` command has a `mock` option to only check the constraints.
Examples:
```sh
# Run the original program
RUSTFLAGS='-C target-cpu=native' cargo run -r execute guest
# Prove the original program
RUSTFLAGS='-C target-cpu=native' cargo run -r prove guest
# Check the constraints and witness of the original program
RUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --mock
# Run the program with autoprecompiles
RUSTFLAGS='-C target-cpu=native' cargo run -r execute guest --skip 37 --autoprecompiles 1
# Run the program with optimized autoprecompiles
RUSTFLAGS='-C target-cpu=native' cargo run -r execute guest --skip 37 --autoprecompiles 1 --optimize
# Prove the program with autoprecompiles
RUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --skip 37 --autoprecompiles 1
# Prove the program with optimized autoprecompiles
RUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --skip 37 --autoprecompiles 1 --optimize
# Check the constraints and witness of the program with autoprecompiles
RUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --skip 37 --autoprecompiles 1 --mock
# Check the constraints and witness of the program with optimized autoprecompiles
RUSTFLAGS='-C target-cpu=native' cargo run -r prove guest --skip 37 --autoprecompiles 1 --mock --optimize
```
It is recommended to use at least `RUST_LOG=info` for information, and `RUST_LOG=debug` for benchmarks.

170
cli-openvm/src/main.rs Normal file
View File

@@ -0,0 +1,170 @@
use eyre::Result;
use openvm_sdk::StdIn;
use openvm_stark_backend::p3_field::PrimeField32;
use openvm_stark_sdk::config::setup_tracing_with_log_level;
use powdr_openvm::CompiledProgram;
use clap::{CommandFactory, Parser, Subcommand};
use std::io;
use tracing::Level;
#[derive(Parser)]
#[command(name = "powdr-openvm", author, version, about, long_about = None)]
struct Cli {
#[command(subcommand)]
command: Option<Commands>,
}
#[derive(Subcommand)]
enum Commands {
Compile {
guest: String,
#[arg(long, default_value_t = 0)]
autoprecompiles: usize,
#[arg(long, default_value_t = 0)]
skip: usize,
#[arg(long, default_value_t = false)]
pgo: bool,
#[arg(long)]
input: Option<u32>,
},
Execute {
guest: String,
#[arg(long, default_value_t = 0)]
autoprecompiles: usize,
#[arg(long, default_value_t = 0)]
skip: usize,
#[arg(long, default_value_t = false)]
pgo: bool,
#[arg(long)]
input: Option<u32>,
},
Pgo {
guest: String,
#[arg(long)]
input: Option<u32>,
},
Prove {
guest: String,
#[arg(long, default_value_t = 0)]
autoprecompiles: usize,
#[arg(long, default_value_t = 0)]
skip: usize,
#[arg(long)]
#[arg(default_value_t = false)]
mock: bool,
#[arg(long)]
#[arg(default_value_t = false)]
recursion: bool,
#[arg(long, default_value_t = false)]
pgo: bool,
#[arg(long)]
input: Option<u32>,
},
}
fn main() -> Result<(), io::Error> {
let args = Cli::parse();
setup_tracing_with_log_level(Level::WARN);
if let Some(command) = args.command {
run_command(command);
Ok(())
} else {
Cli::command().print_help()
}
}
fn run_command(command: Commands) {
match command {
Commands::Compile {
guest,
autoprecompiles,
skip,
pgo,
input,
} => {
let pc_idx_count =
pgo.then(|| powdr_openvm::get_pc_idx_count(&guest, stdin_from(input)));
let program =
powdr_openvm::compile_guest(&guest, autoprecompiles, skip, pc_idx_count).unwrap();
write_program_to_file(program, &format!("{guest}_compiled.cbor")).unwrap();
}
Commands::Execute {
guest,
autoprecompiles,
skip,
pgo,
input,
} => {
let pc_idx_count =
pgo.then(|| powdr_openvm::get_pc_idx_count(&guest, stdin_from(input)));
let program =
powdr_openvm::compile_guest(&guest, autoprecompiles, skip, pc_idx_count).unwrap();
powdr_openvm::execute(program, stdin_from(input)).unwrap();
}
Commands::Prove {
guest,
autoprecompiles,
skip,
mock,
recursion,
pgo,
input,
} => {
let pc_idx_count =
pgo.then(|| powdr_openvm::get_pc_idx_count(&guest, stdin_from(input)));
let program =
powdr_openvm::compile_guest(&guest, autoprecompiles, skip, pc_idx_count).unwrap();
powdr_openvm::prove(&program, mock, recursion, stdin_from(input)).unwrap();
}
// Run Pgo on the original openvm program (without powdr extension)
// By default, Compile, Execute, and Prove all run Pgo first
// This command is only used to test the powdr_openvm::pgo API
Commands::Pgo { guest, input } => {
let program = powdr_openvm::compile_openvm(&guest).unwrap();
powdr_openvm::pgo(program, stdin_from(input)).unwrap();
}
}
}
fn write_program_to_file<F: PrimeField32>(
program: CompiledProgram<F>,
filename: &str,
) -> Result<(), io::Error> {
use std::fs::File;
let mut file = File::create(filename)?;
serde_cbor::to_writer(&mut file, &program).map_err(io::Error::other)?;
Ok(())
}
fn stdin_from(input: Option<u32>) -> StdIn {
let mut s = StdIn::default();
if let Some(i) = input {
s.write(&i)
}
s
}

View File

@@ -64,7 +64,7 @@ struct Cli {
#[derive(Subcommand)]
enum Commands {
/// Compile rust code to Powdr assembly.
/// Needs `rustup component add rust-src --toolchain nightly-2024-08-01`.
/// Needs `rustup component add rust-src --toolchain nightly-2025-05-14`.
Compile {
/// input rust code, points to a crate dir or its Cargo.toml file
file: String,
@@ -409,8 +409,8 @@ fn execute_fast<F: FieldElement>(
);
let duration = start.elapsed();
log::info!("Executor done in: {:?}", duration);
log::info!("Execution trace length: {}", trace_len);
log::info!("Executor done in: {duration:?}");
log::info!("Execution trace length: {trace_len}");
Ok(())
}
@@ -458,7 +458,7 @@ fn execute<F: FieldElement>(
);
let duration = start.elapsed();
log::info!("Executor done in: {:?}", duration);
log::info!("Executor done in: {duration:?}");
log::info!("Execution trace length: {}", execution.trace_len);
let witness_cols: Vec<_> = pil

View File

@@ -1,18 +0,0 @@
[package]
name = "fibonacci"
version = "0.1.0"
edition = "2021"
[features]
default = []
simd = ["powdr/plonky3-simd"]
[dependencies]
powdr = { git = "https://github.com/powdr-labs/powdr", tag = "v0.1.3", features = [
"plonky3",
] }
env_logger = "0.10.2"
log = "0.4.27"
[workspace]

View File

@@ -1,9 +0,0 @@
# powdrVM Fibonacci example
This example computes Fibonacci in the guest and demonstrates
the case when the execution trace is split into multiple chunks.
If you're curious about what happens internally, run:
```console
RUST_LOG=info cargo run -r
```

View File

@@ -1,11 +0,0 @@
[package]
name = "powdr-guest"
version = "0.1.0"
edition = "2021"
[dependencies]
powdr-riscv-runtime = { git = "https://github.com/powdr-labs/powdr", tag = "v0.1.3", features = [
"std",
] }
[workspace]

View File

@@ -1,20 +0,0 @@
use powdr_riscv_runtime;
use powdr_riscv_runtime::commit;
use powdr_riscv_runtime::io::{read, write};
fn fib(n: u32) -> u32 {
if n <= 1 {
return n;
}
fib(n - 1) + fib(n - 2)
}
fn main() {
// Read input from stdin.
let n: u32 = read(0);
let r = fib(n);
// Write result to stdout.
write(1, r);
// Commit the result as a public.
commit::commit(r);
}

View File

@@ -1,2 +0,0 @@
[toolchain]
channel = "nightly-2024-12-17"

View File

@@ -1,19 +0,0 @@
use powdr::Session;
fn main() {
env_logger::init();
let n = 22;
let mut session = Session::builder()
.guest_path("./guest")
.out_path("powdr-target")
.chunk_size_log2(18)
.build()
// Compute Fibonacci of 21 in the guest.
.write(0, &n);
// Fast dry run to test execution.
session.run();
session.prove();
}

View File

@@ -1,19 +0,0 @@
[package]
name = "keccak"
version = "0.1.0"
edition = "2021"
[features]
default = []
simd = ["powdr/plonky3-simd"]
[dependencies]
powdr = { git = "https://github.com/powdr-labs/powdr", tag = "v0.1.3", features = [
"plonky3",
] }
hex = "0.4"
env_logger = "0.10.2"
log = "0.4.27"
[workspace]

View File

@@ -1,16 +0,0 @@
# powdrVM tinykeccak example
This example demonstrates how to use the `powdrVM` to run a simple
keccak hash function using the `tinykeccak` crate.
We want to prove that we know the pre-image of a hash.
The host takes in a 32-byte challenge hash `C` as a hex string,
and the pre-image string `P`, such that `Keccak(P) = C`.
The guest receives the same data from the host and verifies that the claim is true.
For a valid hash example, you can run:
```console
cargo run -r "56c73097b157bbe90a5b273a6bb93eb5e89ab1ac0364a73a4e107187c63f7256" "my powdr hash"
```

View File

@@ -1,12 +0,0 @@
[package]
name = "powdr-guest"
version = "0.1.0"
edition = "2021"
[dependencies]
powdr-riscv-runtime = { git = "https://github.com/powdr-labs/powdr", tag = "v0.1.3", features = [
"std",
] }
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
[workspace]

View File

@@ -1,16 +0,0 @@
use powdr_riscv_runtime;
use powdr_riscv_runtime::io::read;
use tiny_keccak::{Hasher, Keccak};
fn main() {
let challenge: [u8; 32] = read(1);
let preimg: Vec<u8> = read(2);
let mut output = [0u8; 32];
let mut hasher = Keccak::v256();
hasher.update(&preimg);
hasher.finalize(&mut output);
assert_eq!(output, challenge);
}

View File

@@ -1,2 +0,0 @@
[toolchain]
channel = "nightly-2024-12-17"

View File

@@ -1,45 +0,0 @@
use powdr::Session;
use hex::FromHex;
use std::convert::TryInto;
use std::env;
fn main() {
env_logger::init();
let args: Vec<String> = env::args().collect();
if args.len() < 3 {
panic!("Please provide two arguments: <challenge> <preimg>");
}
let challenge = parse_hash(&args[1]);
let preimg = args[2].clone().into_bytes();
let mut session = Session::builder()
.guest_path("./guest")
.out_path("powdr-target")
.chunk_size_log2(18)
.build()
.write(1, &challenge)
.write(2, &preimg);
// Fast dry run to test execution.
session.run();
// Uncomment to compute the proof.
//session.prove();
}
fn parse_hash(s: &str) -> [u8; 32] {
match Vec::from_hex(s) {
Ok(bytes) => {
if bytes.len() == 32 {
bytes.try_into().expect("length checked to be 32")
} else {
panic!("The pre-image must be exactly 64 hex characters (32 bytes).");
}
}
Err(e) => panic!("Error parsing the pre-image as hex: {e}"),
}
}

View File

@@ -14,7 +14,7 @@ pub struct PaddedBitVec {
impl PaddedBitVec {
pub fn new(bits_per_row: usize) -> Self {
let words_per_row = (bits_per_row + 31) / 32;
let words_per_row = bits_per_row.div_ceil(32);
Self {
data: Vec::new(),
bits_per_row,

View File

@@ -85,6 +85,7 @@ impl<'a, T: FieldElement> Processor<'a, T> {
self
}
#[allow(clippy::result_large_err)]
pub fn generate_code<FixedEval: FixedEvaluator<T>>(
self,
can_process: impl CanProcessCall<T>,
@@ -125,6 +126,7 @@ impl<'a, T: FieldElement> Processor<'a, T> {
self.generate_code_for_branch(can_process, witgen, identity_queue, branch_depth)
}
#[allow(clippy::result_large_err)]
fn generate_code_for_branch<FixedEval: FixedEvaluator<T>>(
&self,
can_process: impl CanProcessCall<T>,

View File

@@ -168,6 +168,7 @@ impl<T> LookupCell<'_, T> {
/// All known implementations of [Machine].
/// We cannot use Box<dyn Machine<..>> because the trait is not object-safe,
/// since it has generic methods.
#[allow(clippy::large_enum_variant)]
pub enum KnownMachine<'a, T: FieldElement> {
SecondStageMachine(SecondStageMachine<'a, T>),
SortedWitnesses(SortedWitnesses<'a, T>),

24
openvm/.gitignore vendored Normal file
View File

@@ -0,0 +1,24 @@
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
#Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
# RustRover
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
guest/openvm
guest-keccak/target

88
openvm/Cargo.toml Normal file
View File

@@ -0,0 +1,88 @@
[package]
name = "powdr-openvm"
version.workspace = true
edition.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
[dependencies]
openvm = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-build = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-rv32im-circuit = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-rv32im-transpiler = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-rv32im-guest = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049", default-features = false }
openvm-transpiler = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-circuit = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-circuit-derive = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-circuit-primitives = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-circuit-primitives-derive = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-instructions = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-instructions-derive = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-sdk = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-ecc-transpiler = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-keccak256-circuit = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-keccak256-transpiler = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-sha256-circuit = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-sha256-transpiler = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-algebra-transpiler = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049" }
openvm-native-circuit = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049", default-features = false }
openvm-native-recursion = { git = "https://github.com/powdr-labs/openvm.git", rev = "2868049", default-features = false }
openvm-stark-sdk = { git = "https://github.com/powdr-labs/stark-backend.git", rev = "dacb25f", default-features = false }
openvm-stark-backend = { git = "https://github.com/powdr-labs/stark-backend.git", rev = "dacb25f", default-features = false }
powdr.workspace = true
powdr-autoprecompiles.workspace = true
powdr-constraint-solver.workspace = true
eyre = "0.6.12"
serde = "1.0.217"
derive_more = { version = "2.0.1", default-features = false, features = [
"from",
] }
strum = "0.26.3"
itertools = "0.14.0"
tracing = "0.1.40"
tracing-subscriber = { version = "0.3.17", features = ["std", "env-filter"] }
clap = { version = "^4.3", features = ["derive"] }
log = "0.4.17"
serde_cbor = "0.11.2"
[dev-dependencies]
test-log = { version = "0.2.17", features = ["trace"] }
[lib]
bench = false # See https://github.com/bheisler/criterion.rs/issues/458
# Uncomment both patches below for local stark-backend and openvm.
# The local openvm also needs to have stark-backend patched so all types match.
#[patch."https://github.com/powdr-labs/stark-backend.git"]
#openvm-stark-sdk = { path = "../../stark-backend/crates/stark-sdk", default-features = false }
#openvm-stark-backend = { path = "../../stark-backend/crates/stark-backend", default-features = false }
#
#[patch."https://github.com/powdr-labs/openvm.git"]
#openvm-transpiler = { path = "../../openvm/crates/toolchain/transpiler" }
#openvm = { path = "../../openvm/crates/toolchain/openvm" }
#openvm-build = { path = "../../openvm/crates/toolchain/build" }
#openvm-rv32im-circuit = { path = "../../openvm/extensions/rv32im/circuit/" }
#openvm-rv32im-transpiler = { path = "../../openvm/extensions/rv32im/transpiler" }
#openvm-rv32im-guest = { path = "../../openvm/extensions/rv32im/guest" }
#openvm-circuit = { path = "../../openvm/crates/vm" }
#openvm-circuit-derive = { path = "../../openvm/crates/vm/derive" }
#openvm-circuit-primitives = { path = "../../openvm/crates/circuits/primitives" }
#openvm-circuit-primitives-derive = { path = "../../openvm/crates/circuits/primitives/derive" }
#openvm-instructions = { path = "../../openvm/crates/toolchain/instructions" }
#openvm-instructions-derive = { path = "../../openvm/crates/toolchain/instructions/derive" }
#openvm-sdk = { path = "../../openvm/crates/sdk" }
#openvm-ecc-transpiler = { path = "../../openvm/extensions/ecc/transpiler" }
#openvm-keccak256-circuit = { path = "../../openvm/extensions/keccak256/circuit" }
#openvm-keccak256-transpiler = { path = "../../openvm/extensions/keccak256/transpiler" }
#openvm-sha256-circuit = { path = "../../openvm/extensions/sha256/circuit" }
#openvm-sha256-transpiler = { path = "../../openvm/extensions/sha256/transpiler" }
#openvm-algebra-transpiler = { path = "../../openvm/extensions/algebra/transpiler" }
#openvm-native-circuit = { path = "../../openvm/extensions/native/circuit" }
#openvm-native-recursion = { path = "../../openvm/extensions/native/recursion" }

View File

@@ -0,0 +1,18 @@
[package]
name = "keccak-example"
version = "0.0.0"
edition = "2021"
[workspace]
members = []
[dependencies]
openvm = { git = "https://github.com/powdr-labs/openvm.git", branch = "powdr" }
openvm-platform = { git = "https://github.com/powdr-labs/openvm.git", branch = "powdr" }
openvm-keccak256-guest = { git = "https://github.com/powdr-labs/openvm.git", branch = "powdr" }
[features]
default = []
std = [
"openvm/std",
]

View File

@@ -0,0 +1,4 @@
[app_vm_config.rv32i]
[app_vm_config.rv32m]
[app_vm_config.io]
[app_vm_config.keccak]

View File

@@ -0,0 +1,22 @@
#![cfg_attr(not(feature = "std"), no_main)]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use core::hint::black_box;
use openvm::io::reveal;
use openvm_keccak256_guest::set_keccak256;
openvm::entry!(main);
const N: usize = 5_000;
pub fn main() {
let mut output = [0u8; 32];
for _ in 0..N {
set_keccak256(&black_box(output), &mut output);
}
reveal(output[0] as u32, 0);
}

View File

@@ -0,0 +1,13 @@
[workspace]
[package]
name = "guest-keccak-stdin"
version = "0.0.0"
edition = "2021"
[dependencies]
openvm = { git = "https://github.com/powdr-labs/openvm.git", branch = "powdr" }
tiny-keccak = { version = "2.0.2", features = ["keccak"] }
[profile.release-with-debug]
inherits = "release"
debug = true

View File

@@ -0,0 +1,21 @@
#![cfg_attr(target_os = "zkvm", no_main)]
#![cfg_attr(target_os = "zkvm", no_std)]
openvm::entry!(main);
use core::hint::black_box;
use openvm::io::{reveal, read};
use tiny_keccak::{Hasher, Keccak};
pub fn main() {
let n: u32 = read();
let mut output = black_box([0u8; 32]);
for _ in 0..n {
let mut hasher = Keccak::v256();
hasher.update(&output);
hasher.finalize(&mut output);
}
reveal(output[0] as u32, 0);
}

View File

@@ -0,0 +1,12 @@
[workspace]
[package]
name = "powdr-openvm-matmul-test"
version = "0.0.0"
edition = "2021"
[dependencies]
openvm = { git = "https://github.com/powdr-labs/openvm.git", branch = "powdr" }
[profile.release-with-debug]
inherits = "release"
debug = true

View File

@@ -0,0 +1,275 @@
#![cfg_attr(target_os = "zkvm", no_main)]
#![cfg_attr(target_os = "zkvm", no_std)]
openvm::entry!(main);
use openvm::io::reveal;
pub fn main() {
loop_test_matrix();
}
const SIZE: usize = 6;
type Mat = [[i32; SIZE]; SIZE];
#[inline(never)]
fn matrix_multiply_unrolled(a: &Mat, b: &Mat, c: &mut Mat) {
c[0][0] = a[0][0] * b[0][0]
+ a[0][1] * b[1][0]
+ a[0][2] * b[2][0]
+ a[0][3] * b[3][0]
+ a[0][4] * b[4][0]
+ a[0][5] * b[5][0];
c[0][1] = a[0][0] * b[0][1]
+ a[0][1] * b[1][1]
+ a[0][2] * b[2][1]
+ a[0][3] * b[3][1]
+ a[0][4] * b[4][1]
+ a[0][5] * b[5][1];
c[0][2] = a[0][0] * b[0][2]
+ a[0][1] * b[1][2]
+ a[0][2] * b[2][2]
+ a[0][3] * b[3][2]
+ a[0][4] * b[4][2]
+ a[0][5] * b[5][2];
c[0][3] = a[0][0] * b[0][3]
+ a[0][1] * b[1][3]
+ a[0][2] * b[2][3]
+ a[0][3] * b[3][3]
+ a[0][4] * b[4][3]
+ a[0][5] * b[5][3];
c[0][4] = a[0][0] * b[0][4]
+ a[0][1] * b[1][4]
+ a[0][2] * b[2][4]
+ a[0][3] * b[3][4]
+ a[0][4] * b[4][4]
+ a[0][5] * b[5][4];
c[0][5] = a[0][0] * b[0][5]
+ a[0][1] * b[1][5]
+ a[0][2] * b[2][5]
+ a[0][3] * b[3][5]
+ a[0][4] * b[4][5]
+ a[0][5] * b[5][5];
c[1][0] = a[1][0] * b[0][0]
+ a[1][1] * b[1][0]
+ a[1][2] * b[2][0]
+ a[1][3] * b[3][0]
+ a[1][4] * b[4][0]
+ a[1][5] * b[5][0];
c[1][1] = a[1][0] * b[0][1]
+ a[1][1] * b[1][1]
+ a[1][2] * b[2][1]
+ a[1][3] * b[3][1]
+ a[1][4] * b[4][1]
+ a[1][5] * b[5][1];
c[1][2] = a[1][0] * b[0][2]
+ a[1][1] * b[1][2]
+ a[1][2] * b[2][2]
+ a[1][3] * b[3][2]
+ a[1][4] * b[4][2]
+ a[1][5] * b[5][2];
c[1][3] = a[1][0] * b[0][3]
+ a[1][1] * b[1][3]
+ a[1][2] * b[2][3]
+ a[1][3] * b[3][3]
+ a[1][4] * b[4][3]
+ a[1][5] * b[5][3];
c[1][4] = a[1][0] * b[0][4]
+ a[1][1] * b[1][4]
+ a[1][2] * b[2][4]
+ a[1][3] * b[3][4]
+ a[1][4] * b[4][4]
+ a[1][5] * b[5][4];
c[1][5] = a[1][0] * b[0][5]
+ a[1][1] * b[1][5]
+ a[1][2] * b[2][5]
+ a[1][3] * b[3][5]
+ a[1][4] * b[4][5]
+ a[1][5] * b[5][5];
c[2][0] = a[2][0] * b[0][0]
+ a[2][1] * b[1][0]
+ a[2][2] * b[2][0]
+ a[2][3] * b[3][0]
+ a[2][4] * b[4][0]
+ a[2][5] * b[5][0];
c[2][1] = a[2][0] * b[0][1]
+ a[2][1] * b[1][1]
+ a[2][2] * b[2][1]
+ a[2][3] * b[3][1]
+ a[2][4] * b[4][1]
+ a[2][5] * b[5][1];
c[2][2] = a[2][0] * b[0][2]
+ a[2][1] * b[1][2]
+ a[2][2] * b[2][2]
+ a[2][3] * b[3][2]
+ a[2][4] * b[4][2]
+ a[2][5] * b[5][2];
c[2][3] = a[2][0] * b[0][3]
+ a[2][1] * b[1][3]
+ a[2][2] * b[2][3]
+ a[2][3] * b[3][3]
+ a[2][4] * b[4][3]
+ a[2][5] * b[5][3];
c[2][4] = a[2][0] * b[0][4]
+ a[2][1] * b[1][4]
+ a[2][2] * b[2][4]
+ a[2][3] * b[3][4]
+ a[2][4] * b[4][4]
+ a[2][5] * b[5][4];
c[2][5] = a[2][0] * b[0][5]
+ a[2][1] * b[1][5]
+ a[2][2] * b[2][5]
+ a[2][3] * b[3][5]
+ a[2][4] * b[4][5]
+ a[2][5] * b[5][5];
c[3][0] = a[3][0] * b[0][0]
+ a[3][1] * b[1][0]
+ a[3][2] * b[2][0]
+ a[3][3] * b[3][0]
+ a[3][4] * b[4][0]
+ a[3][5] * b[5][0];
c[3][1] = a[3][0] * b[0][1]
+ a[3][1] * b[1][1]
+ a[3][2] * b[2][1]
+ a[3][3] * b[3][1]
+ a[3][4] * b[4][1]
+ a[3][5] * b[5][1];
c[3][2] = a[3][0] * b[0][2]
+ a[3][1] * b[1][2]
+ a[3][2] * b[2][2]
+ a[3][3] * b[3][2]
+ a[3][4] * b[4][2]
+ a[3][5] * b[5][2];
c[3][3] = a[3][0] * b[0][3]
+ a[3][1] * b[1][3]
+ a[3][2] * b[2][3]
+ a[3][3] * b[3][3]
+ a[3][4] * b[4][3]
+ a[3][5] * b[5][3];
c[3][4] = a[3][0] * b[0][4]
+ a[3][1] * b[1][4]
+ a[3][2] * b[2][4]
+ a[3][3] * b[3][4]
+ a[3][4] * b[4][4]
+ a[3][5] * b[5][4];
c[3][5] = a[3][0] * b[0][5]
+ a[3][1] * b[1][5]
+ a[3][2] * b[2][5]
+ a[3][3] * b[3][5]
+ a[3][4] * b[4][5]
+ a[3][5] * b[5][5];
c[4][0] = a[4][0] * b[0][0]
+ a[4][1] * b[1][0]
+ a[4][2] * b[2][0]
+ a[4][3] * b[3][0]
+ a[4][4] * b[4][0]
+ a[4][5] * b[5][0];
c[4][1] = a[4][0] * b[0][1]
+ a[4][1] * b[1][1]
+ a[4][2] * b[2][1]
+ a[4][3] * b[3][1]
+ a[4][4] * b[4][1]
+ a[4][5] * b[5][1];
c[4][2] = a[4][0] * b[0][2]
+ a[4][1] * b[1][2]
+ a[4][2] * b[2][2]
+ a[4][3] * b[3][2]
+ a[4][4] * b[4][2]
+ a[4][5] * b[5][2];
c[4][3] = a[4][0] * b[0][3]
+ a[4][1] * b[1][3]
+ a[4][2] * b[2][3]
+ a[4][3] * b[3][3]
+ a[4][4] * b[4][3]
+ a[4][5] * b[5][3];
c[4][4] = a[4][0] * b[0][4]
+ a[4][1] * b[1][4]
+ a[4][2] * b[2][4]
+ a[4][3] * b[3][4]
+ a[4][4] * b[4][4]
+ a[4][5] * b[5][4];
c[4][5] = a[4][0] * b[0][5]
+ a[4][1] * b[1][5]
+ a[4][2] * b[2][5]
+ a[4][3] * b[3][5]
+ a[4][4] * b[4][5]
+ a[4][5] * b[5][5];
c[5][0] = a[5][0] * b[0][0]
+ a[5][1] * b[1][0]
+ a[5][2] * b[2][0]
+ a[5][3] * b[3][0]
+ a[5][4] * b[4][0]
+ a[5][5] * b[5][0];
c[5][1] = a[5][0] * b[0][1]
+ a[5][1] * b[1][1]
+ a[5][2] * b[2][1]
+ a[5][3] * b[3][1]
+ a[5][4] * b[4][1]
+ a[5][5] * b[5][1];
c[5][2] = a[5][0] * b[0][2]
+ a[5][1] * b[1][2]
+ a[5][2] * b[2][2]
+ a[5][3] * b[3][2]
+ a[5][4] * b[4][2]
+ a[5][5] * b[5][2];
c[5][3] = a[5][0] * b[0][3]
+ a[5][1] * b[1][3]
+ a[5][2] * b[2][3]
+ a[5][3] * b[3][3]
+ a[5][4] * b[4][3]
+ a[5][5] * b[5][3];
c[5][4] = a[5][0] * b[0][4]
+ a[5][1] * b[1][4]
+ a[5][2] * b[2][4]
+ a[5][3] * b[3][4]
+ a[5][4] * b[4][4]
+ a[5][5] * b[5][4];
c[5][5] = a[5][0] * b[0][5]
+ a[5][1] * b[1][5]
+ a[5][2] * b[2][5]
+ a[5][3] * b[3][5]
+ a[5][4] * b[4][5]
+ a[5][5] * b[5][5];
}
#[inline(never)]
fn test_matrix() {
let a: Mat = [
[1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18],
[19, 20, 21, 22, 23, 24],
[25, 26, 27, 28, 29, 30],
[31, 32, 33, 34, 35, 36],
];
let b: Mat = [
[37, 38, 39, 40, 41, 42],
[43, 44, 45, 46, 47, 48],
[49, 50, 51, 52, 53, 54],
[55, 56, 57, 58, 59, 60],
[61, 62, 63, 64, 65, 66],
[67, 68, 69, 70, 71, 72],
];
let mut c: Mat = [[0; SIZE]; SIZE];
matrix_multiply_unrolled(&a, &b, &mut c);
assert_eq!(c[0][0], 1197);
reveal(c[0][0] as u32, 0);
reveal(c[5][5] as u32, 1);
}
#[inline(never)]
fn loop_test_matrix() {
for _ in 0..8000 {
test_matrix();
}
}

12
openvm/guest/Cargo.toml Normal file
View File

@@ -0,0 +1,12 @@
[workspace]
[package]
name = "powdr-openvm-guest-stdin-test"
version = "0.0.0"
edition = "2021"
[dependencies]
openvm = { git = "https://github.com/powdr-labs/openvm.git", branch = "powdr" }
[profile.release-with-debug]
inherits = "release"
debug = true

22
openvm/guest/src/main.rs Normal file
View File

@@ -0,0 +1,22 @@
#![cfg_attr(target_os = "zkvm", no_main)]
#![cfg_attr(target_os = "zkvm", no_std)]
openvm::entry!(main);
use openvm::io::{reveal, read};
pub fn main() {
let n: u32 = read();
let mut a: u32 = 0;
let mut b: u32 = 1;
for _ in 1..n {
let sum = a + b;
a = b;
b = sum;
}
if a == 0 {
panic!();
}
reveal(a, 0);
}

88
openvm/src/air_builder.rs Normal file
View File

@@ -0,0 +1,88 @@
use std::sync::Arc;
use openvm_stark_backend::air_builders::symbolic::get_symbolic_builder;
use openvm_stark_backend::air_builders::symbolic::SymbolicRapBuilder;
use openvm_stark_backend::config::Com;
use openvm_stark_backend::config::StarkGenericConfig;
use openvm_stark_backend::config::Val;
use openvm_stark_backend::interaction::RapPhaseSeqKind;
use openvm_stark_backend::keygen::types::ProverOnlySinglePreprocessedData;
use openvm_stark_backend::keygen::types::TraceWidth;
use openvm_stark_backend::keygen::types::VerifierSinglePreprocessedData;
use openvm_stark_backend::p3_commit::Pcs;
use openvm_stark_backend::p3_matrix::Matrix;
use openvm_stark_backend::rap::AnyRap;
pub struct PrepKeygenData<SC: StarkGenericConfig> {
pub _verifier_data: Option<VerifierSinglePreprocessedData<Com<SC>>>,
pub prover_data: Option<ProverOnlySinglePreprocessedData<SC>>,
}
pub struct AirKeygenBuilder<SC: StarkGenericConfig> {
air: Arc<dyn AnyRap<SC>>,
prep_keygen_data: PrepKeygenData<SC>,
}
fn compute_prep_data_for_air<SC: StarkGenericConfig>(
pcs: &SC::Pcs,
air: &dyn AnyRap<SC>,
) -> PrepKeygenData<SC> {
let preprocessed_trace = air.preprocessed_trace();
let vpdata_opt = preprocessed_trace.map(|trace| {
let domain = pcs.natural_domain_for_degree(trace.height());
let (commit, data) = pcs.commit(vec![(domain, trace.clone())]);
let vdata = VerifierSinglePreprocessedData { commit };
let pdata = ProverOnlySinglePreprocessedData {
trace: Arc::new(trace),
data: Arc::new(data),
};
(vdata, pdata)
});
if let Some((vdata, pdata)) = vpdata_opt {
PrepKeygenData {
prover_data: Some(pdata),
_verifier_data: Some(vdata),
}
} else {
PrepKeygenData {
prover_data: None,
_verifier_data: None,
}
}
}
impl<SC: StarkGenericConfig> AirKeygenBuilder<SC> {
pub fn new(pcs: &SC::Pcs, air: Arc<dyn AnyRap<SC>>) -> Self {
let prep_keygen_data = compute_prep_data_for_air(pcs, air.as_ref());
AirKeygenBuilder {
air,
prep_keygen_data,
}
}
pub fn get_symbolic_builder(
&self,
max_constraint_degree: Option<usize>,
) -> SymbolicRapBuilder<Val<SC>> {
let width = TraceWidth {
preprocessed: self.prep_keygen_data.width(),
cached_mains: self.air.cached_main_widths(),
common_main: self.air.common_main_width(),
after_challenge: vec![],
};
get_symbolic_builder(
self.air.as_ref(),
&width,
&[],
&[],
RapPhaseSeqKind::None,
max_constraint_degree.unwrap_or(0),
)
}
}
impl<SC: StarkGenericConfig> PrepKeygenData<SC> {
pub fn width(&self) -> Option<usize> {
self.prover_data.as_ref().map(|d| d.trace.width())
}
}

View File

@@ -0,0 +1,151 @@
use powdr::{FieldElement, LargeInt};
use powdr_constraint_solver::range_constraint::RangeConstraint;
use super::byte_constraint;
pub fn handle_bitwise_lookup<T: FieldElement>(
payload: &[RangeConstraint<T>],
) -> Vec<RangeConstraint<T>> {
// See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/bitwise_op_lookup/bus.rs
// Expects (x, y, z, op), where:
// - if op == 0, x & y are bytes, z = 0
// - if op == 1, x & y are bytes, z = x ^ y
let [x, y, _z, op] = payload else {
panic!("Expected arguments (x, y, z, op)");
};
match op
.try_to_single_value()
.map(|v| v.to_integer().try_into_u64().unwrap())
{
// Range constraint on x & y, z = 0
Some(0) => vec![
byte_constraint(),
byte_constraint(),
RangeConstraint::from_value(T::zero()),
RangeConstraint::from_value(T::zero()),
],
// z = x ^ y
Some(1) => {
if let (Some(x), Some(y)) = (x.try_to_single_value(), y.try_to_single_value()) {
// Both inputs are known, can compute result concretely
let z = T::from(
x.to_integer().try_into_u64().unwrap() ^ y.to_integer().try_into_u64().unwrap(),
);
vec![
RangeConstraint::from_value(x),
RangeConstraint::from_value(y),
RangeConstraint::from_value(z),
RangeConstraint::from_value(T::one()),
]
} else {
// The result of an XOR can only be a byte and have bits set that are set in either x or y
let z_constraint = x.disjunction(y).conjunction(&byte_constraint());
vec![
byte_constraint(),
byte_constraint(),
z_constraint,
RangeConstraint::from_value(T::one()),
]
}
}
// Operation is unknown, but we know that x, y, and z are bytes
// and that op is 0 or 1
None => vec![
byte_constraint(),
byte_constraint(),
byte_constraint(),
RangeConstraint::from_mask(0x1u64),
],
_ => panic!("Invalid operation"),
}
}
#[cfg(test)]
mod tests {
use crate::bus_interaction_handler::{
test_utils::*, OpenVmBusInteractionHandler, BITWISE_LOOKUP,
};
use super::*;
use powdr::number::BabyBearField;
use powdr_constraint_solver::constraint_system::{BusInteraction, BusInteractionHandler};
fn run(
x: RangeConstraint<BabyBearField>,
y: RangeConstraint<BabyBearField>,
z: RangeConstraint<BabyBearField>,
op: RangeConstraint<BabyBearField>,
) -> Vec<RangeConstraint<BabyBearField>> {
let handler = OpenVmBusInteractionHandler::<BabyBearField>::default();
let bus_interaction = BusInteraction {
bus_id: RangeConstraint::from_value(BITWISE_LOOKUP.into()),
multiplicity: value(1),
payload: vec![x, y, z, op],
};
let result = handler.handle_bus_interaction(bus_interaction);
result.payload
}
#[test]
fn test_byte_constraint() {
let result = run(default(), default(), default(), value(0));
assert_eq!(result.len(), 4);
assert_eq!(result[0], mask(0xff));
assert_eq!(result[1], mask(0xff));
assert_eq!(result[2], value(0));
assert_eq!(result[3], value(0));
}
#[test]
fn test_xor_known() {
let result = run(value(0b10101010), value(0b11001100), default(), value(1));
assert_eq!(result.len(), 4);
assert_eq!(result[0], value(0b10101010));
assert_eq!(result[1], value(0b11001100));
assert_eq!(result[2], value(0b01100110));
assert_eq!(result[3], value(1));
}
#[test]
fn test_xor_unknown() {
let result = run(default(), default(), default(), value(1));
assert_eq!(result.len(), 4);
assert_eq!(result[0], mask(0xff));
assert_eq!(result[1], mask(0xff));
assert_eq!(result[2], mask(0xff));
assert_eq!(result[3], value(1));
}
#[test]
fn test_xor_one_unknown() {
let result = run(mask(0xabcd), value(0), default(), value(1));
assert_eq!(result.len(), 4);
// Note that this constraint could be tighter (0xcd), but the solver
// will get to this by intersecting the result with the input
// constraints.
assert_eq!(result[0], mask(0xff));
// Same here
assert_eq!(result[1], mask(0xff));
// We won't be able to compute the result, but we know that the range
// constraint of `x` also applies to `z`.
assert_eq!(result[2], mask(0xcd));
assert_eq!(result[3], value(1));
}
#[test]
fn test_unknown_operation() {
let result = run(default(), default(), default(), default());
assert_eq!(result.len(), 4);
assert_eq!(result[0], mask(0xff));
assert_eq!(result[1], mask(0xff));
assert_eq!(result[2], mask(0xff));
assert_eq!(result[3], mask(0x1));
}
}

View File

@@ -0,0 +1,99 @@
use openvm_instructions::riscv::{RV32_MEMORY_AS, RV32_REGISTER_AS};
use powdr::{FieldElement, LargeInt};
use powdr_constraint_solver::range_constraint::RangeConstraint;
use super::byte_constraint;
pub fn handle_memory<T: FieldElement>(
payload: &[RangeConstraint<T>],
multiplicity: T,
) -> Vec<RangeConstraint<T>> {
// See: https://github.com/openvm-org/openvm/blob/main/crates/vm/src/system/memory/offline_checker/bus.rs
// Expects (address_space, pointer, data, timestamp).
if payload.len() < 4 {
panic!("Expected at least 4 arguments");
}
let address_space = &payload[0];
let pointer = &payload[1];
let timestamp = &payload[payload.len() - 1];
let data = &payload[2..payload.len() - 1];
let is_receive = if multiplicity == -T::one() {
true
} else if multiplicity == T::one() {
false
} else {
panic!("Expected multiplicity to be 1 or -1, got: {multiplicity}");
};
let address_space_value = address_space
.try_to_single_value()
.map(|v| v.to_integer().try_into_u32().unwrap());
match (is_receive, address_space_value) {
// By the assumption that all data written to registers or memory are range-checked,
// we can return a byte range constraint for the data.
(false, Some(RV32_REGISTER_AS)) | (false, Some(RV32_MEMORY_AS)) => {
let data = data.iter().map(|_| byte_constraint()).collect::<Vec<_>>();
vec![address_space.clone(), pointer.clone()]
.into_iter()
.chain(data)
.chain(std::iter::once(timestamp.clone()))
.collect()
}
// Otherwise, we can't improve the constraints
_ => payload.to_vec(),
}
}
#[cfg(test)]
mod tests {
use crate::bus_interaction_handler::{test_utils::*, OpenVmBusInteractionHandler, MEMORY};
use super::*;
use powdr::number::BabyBearField;
use powdr_constraint_solver::constraint_system::{BusInteraction, BusInteractionHandler};
fn run(
address_space: RangeConstraint<BabyBearField>,
pointer: RangeConstraint<BabyBearField>,
data: Vec<RangeConstraint<BabyBearField>>,
timestamp: RangeConstraint<BabyBearField>,
multiplicity: u64,
) -> Vec<RangeConstraint<BabyBearField>> {
let handler = OpenVmBusInteractionHandler::<BabyBearField>::default();
let bus_interaction = BusInteraction {
bus_id: RangeConstraint::from_value(MEMORY.into()),
multiplicity: value(multiplicity),
payload: std::iter::once(address_space)
.chain(std::iter::once(pointer))
.chain(data)
.chain(std::iter::once(timestamp))
.collect(),
};
let result = handler.handle_bus_interaction(bus_interaction);
result.payload
}
#[test]
fn test_read() {
let address_space = value(RV32_MEMORY_AS as u64);
let pointer = value(0x1234);
let data = vec![default(); 4];
let timestamp = value(0x5678);
let result = run(address_space, pointer, data, timestamp, 1);
assert_eq!(result.len(), 7);
assert_eq!(result[0], value(RV32_MEMORY_AS as u64));
assert_eq!(result[1], value(0x1234));
assert_eq!(result[2], byte_constraint());
assert_eq!(result[3], byte_constraint());
assert_eq!(result[4], byte_constraint());
assert_eq!(result[5], byte_constraint());
assert_eq!(result[6], value(0x5678));
}
}

View File

@@ -0,0 +1,159 @@
use bitwise_lookup::handle_bitwise_lookup;
use memory::handle_memory;
use powdr::{FieldElement, LargeInt};
use powdr_autoprecompiles::optimizer::{
ConcreteBusInteractionHandler, ConcreteBusInteractionResult,
};
use powdr_constraint_solver::{
constraint_system::{BusInteraction, BusInteractionHandler},
range_constraint::RangeConstraint,
};
use tuple_range_checker::handle_tuple_range_checker;
use variable_range_checker::handle_variable_range_checker;
mod bitwise_lookup;
mod memory;
mod tuple_range_checker;
mod variable_range_checker;
const EXECUTION_BRIDGE: u64 = 0;
const MEMORY: u64 = 1;
const PC_LOOKUP: u64 = 2;
const VARIABLE_RANGE_CHECKER: u64 = 3;
const BITWISE_LOOKUP: u64 = 6;
const TUPLE_RANGE_CHECKER: u64 = 7;
pub enum BusType {
ExecutionBridge,
Memory,
PcLookup,
VariableRangeChecker,
BitwiseLookup,
TupleRangeChecker,
}
pub fn bus_type(bus_id: u64) -> BusType {
match bus_id {
EXECUTION_BRIDGE => BusType::ExecutionBridge,
MEMORY => BusType::Memory,
PC_LOOKUP => BusType::PcLookup,
VARIABLE_RANGE_CHECKER => BusType::VariableRangeChecker,
BITWISE_LOOKUP => BusType::BitwiseLookup,
TUPLE_RANGE_CHECKER => BusType::TupleRangeChecker,
_ => panic!("Unknown bus ID: {bus_id}"),
}
}
#[derive(Default, Clone)]
pub struct OpenVmBusInteractionHandler<T: FieldElement> {
_phantom: std::marker::PhantomData<T>,
}
impl<T: FieldElement> BusInteractionHandler<T> for OpenVmBusInteractionHandler<T> {
fn handle_bus_interaction(
&self,
bus_interaction: BusInteraction<RangeConstraint<T>>,
) -> BusInteraction<RangeConstraint<T>> {
let (Some(bus_id), Some(multiplicity)) = (
bus_interaction.bus_id.try_to_single_value(),
bus_interaction.multiplicity.try_to_single_value(),
) else {
return bus_interaction;
};
if multiplicity.is_zero() {
return bus_interaction;
}
let payload_constraints = match bus_type(bus_id.to_integer().try_into_u64().unwrap()) {
// Sends / receives (pc, timestamp) pairs. They could have any value.
BusType::ExecutionBridge => bus_interaction.payload,
// Sends a (pc, opcode, args..) tuple. In theory, we could refine the range constraints
// of the args here, but for auto-precompiles, only the PC will be unknown, which could
// have any value.
BusType::PcLookup => bus_interaction.payload,
BusType::BitwiseLookup => handle_bitwise_lookup(&bus_interaction.payload),
BusType::Memory => handle_memory(&bus_interaction.payload, multiplicity),
BusType::VariableRangeChecker => {
handle_variable_range_checker(&bus_interaction.payload)
}
BusType::TupleRangeChecker => handle_tuple_range_checker(&bus_interaction.payload),
};
BusInteraction {
payload: payload_constraints,
..bus_interaction
}
}
}
fn byte_constraint<T: FieldElement>() -> RangeConstraint<T> {
RangeConstraint::from_mask(0xffu64)
}
impl<T: FieldElement> ConcreteBusInteractionHandler<T> for OpenVmBusInteractionHandler<T> {
fn handle_concrete_bus_interaction(
&self,
bus_interaction: BusInteraction<T>,
) -> ConcreteBusInteractionResult {
// If multiplicity is zero, can remove without inspecting
if bus_interaction.multiplicity.is_zero() {
return ConcreteBusInteractionResult::AlwaysSatisfied;
}
match bus_type(bus_interaction.bus_id.to_integer().try_into_u64().unwrap()) {
BusType::ExecutionBridge => {
// Execution bridge could have any value.
ConcreteBusInteractionResult::HasSideEffects
}
BusType::PcLookup => {
// For auto-precompiles, the PC will be unknown, which could have any value.
unreachable!("PC can't be known at compile time, so shouldn't become a bus interaction with concrete values!")
}
BusType::Memory => {
// Memory read/write will always have side effects
// so we can't remove the bus interaction without changing the statement being proven.
ConcreteBusInteractionResult::HasSideEffects
}
BusType::BitwiseLookup | BusType::VariableRangeChecker | BusType::TupleRangeChecker => {
// Fixed lookups can always be satisfied unless the bus rules are violated.
// This can be checked via BusInteractionHandler::handle_bus_interaction_checked.
let range_constraints = BusInteraction::from_iter(
bus_interaction
.iter()
.map(|v| RangeConstraint::from_value(*v)),
);
if self
.handle_bus_interaction_checked(range_constraints)
.is_err()
{
ConcreteBusInteractionResult::ViolatesBusRules
} else {
ConcreteBusInteractionResult::AlwaysSatisfied
}
}
}
}
}
#[cfg(test)]
mod test_utils {
use super::*;
use powdr::number::BabyBearField;
pub fn value(value: u64) -> RangeConstraint<BabyBearField> {
RangeConstraint::from_value(BabyBearField::from(value))
}
pub fn mask(mask: u64) -> RangeConstraint<BabyBearField> {
RangeConstraint::from_mask(mask)
}
pub fn range(start: u64, end: u64) -> RangeConstraint<BabyBearField> {
RangeConstraint::from_range(BabyBearField::from(start), BabyBearField::from(end))
}
pub fn default() -> RangeConstraint<BabyBearField> {
RangeConstraint::default()
}
}

View File

@@ -0,0 +1,63 @@
use powdr::FieldElement;
use powdr_constraint_solver::range_constraint::RangeConstraint;
/// Maximum value of the first element,
/// see https://github.com/openvm-org/openvm/blob/main/extensions/rv32im/circuit/src/extension.rs#L124
// TODO: This should be configurable
const MAX_0: u64 = (1u64 << 8) - 1;
/// Maximum value of the second element,
/// see https://github.com/openvm-org/openvm/blob/main/extensions/rv32im/circuit/src/extension.rs#L124
// TODO: This should be configurable
const MAX_1: u64 = (8 * (1 << 8)) - 1;
pub fn handle_tuple_range_checker<T: FieldElement>(
payload: &[RangeConstraint<T>],
) -> Vec<RangeConstraint<T>> {
// See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/range_tuple/bus.rs
// Expects (x, y), where `x` is in the range [0, MAX_0] and `y` is in the range [0, MAX_1]
let [_x, _y] = payload else {
panic!("Expected arguments (x, y)");
};
vec![
RangeConstraint::from_range(T::from(0u64), T::from(MAX_0)),
RangeConstraint::from_range(T::from(0u64), T::from(MAX_1)),
]
}
#[cfg(test)]
mod tests {
use crate::bus_interaction_handler::{
test_utils::*, OpenVmBusInteractionHandler, TUPLE_RANGE_CHECKER,
};
use super::*;
use powdr::number::BabyBearField;
use powdr_constraint_solver::constraint_system::{BusInteraction, BusInteractionHandler};
fn run(
x: RangeConstraint<BabyBearField>,
y: RangeConstraint<BabyBearField>,
) -> Vec<RangeConstraint<BabyBearField>> {
let handler = OpenVmBusInteractionHandler::<BabyBearField>::default();
let bus_interaction = BusInteraction {
bus_id: RangeConstraint::from_value(TUPLE_RANGE_CHECKER.into()),
multiplicity: value(1),
payload: vec![x, y],
};
let result = handler.handle_bus_interaction(bus_interaction);
result.payload
}
#[test]
fn test_unknown() {
let x = default();
let y = default();
let result = run(x, y);
assert_eq!(result.len(), 2);
assert_eq!(result[0], range(0, MAX_0));
assert_eq!(result[1], range(0, MAX_1),);
}
}

View File

@@ -0,0 +1,79 @@
use powdr::{FieldElement, LargeInt};
use powdr_constraint_solver::range_constraint::RangeConstraint;
/// The maximum number of bits that can be checked by the variable range checker.
// TODO: This should be configurable
const MAX_BITS: u64 = 25;
pub fn handle_variable_range_checker<T: FieldElement>(
payload: &[RangeConstraint<T>],
) -> Vec<RangeConstraint<T>> {
// See: https://github.com/openvm-org/openvm/blob/v1.0.0/crates/circuits/primitives/src/var_range/bus.rs
// Expects (x, bits), where `x` is in the range [0, 2^bits - 1]
let [_x, bits] = payload else {
panic!("Expected arguments (x, bits)");
};
match bits.try_to_single_value() {
Some(bits_value) => {
let bits_value = bits_value.to_integer().try_into_u64().unwrap();
assert!(bits_value <= MAX_BITS);
let mask = (1u64 << bits_value) - 1;
vec![RangeConstraint::from_mask(mask), bits.clone()]
}
None => {
vec![
RangeConstraint::from_mask((1u64 << MAX_BITS) - 1),
RangeConstraint::from_range(T::from(0), T::from(MAX_BITS)),
]
}
}
}
#[cfg(test)]
mod tests {
use crate::bus_interaction_handler::{
test_utils::*, OpenVmBusInteractionHandler, VARIABLE_RANGE_CHECKER,
};
use super::*;
use powdr::number::BabyBearField;
use powdr_constraint_solver::constraint_system::{BusInteraction, BusInteractionHandler};
fn run(
x: RangeConstraint<BabyBearField>,
bits: RangeConstraint<BabyBearField>,
) -> Vec<RangeConstraint<BabyBearField>> {
let handler = OpenVmBusInteractionHandler::<BabyBearField>::default();
let bus_interaction = BusInteraction {
bus_id: RangeConstraint::from_value(VARIABLE_RANGE_CHECKER.into()),
multiplicity: value(1),
payload: vec![x, bits],
};
let result = handler.handle_bus_interaction(bus_interaction);
result.payload
}
#[test]
fn test_unknown_bits() {
let x = default();
let bits = default();
let result = run(x, bits);
assert_eq!(result.len(), 2);
assert_eq!(
result[0],
RangeConstraint::from_mask((1u64 << MAX_BITS) - 1)
);
assert_eq!(result[1], range(0, MAX_BITS));
}
#[test]
fn test_known_bits() {
let x = default();
let bits = value(12);
let result = run(x, bits);
assert_eq!(result.len(), 2);
assert_eq!(result[0], mask(0xfff));
assert_eq!(result[1], value(12));
}
}

513
openvm/src/customize_exe.rs Normal file
View File

@@ -0,0 +1,513 @@
use std::collections::{BTreeMap, BTreeSet, HashMap};
use itertools::Itertools;
use openvm_algebra_transpiler::{Fp2Opcode, Rv32ModularArithmeticOpcode};
use openvm_ecc_transpiler::Rv32WeierstrassOpcode;
use openvm_instructions::LocalOpcode;
use openvm_instructions::{exe::VmExe, instruction::Instruction, program::Program, VmOpcode};
use openvm_keccak256_transpiler::Rv32KeccakOpcode;
use openvm_rv32im_transpiler::{Rv32HintStoreOpcode, Rv32LoadStoreOpcode};
use openvm_sdk::config::SdkVmConfig;
use openvm_sha256_transpiler::Rv32Sha256Opcode;
use openvm_stark_backend::{interaction::SymbolicInteraction, p3_field::PrimeField32};
use powdr::ast::analyzed::AlgebraicExpression;
use powdr::{FieldElement, LargeInt};
use powdr_autoprecompiles::powdr::UniqueColumns;
use powdr_autoprecompiles::{
Autoprecompiles, BusInteractionKind, InstructionKind, SymbolicBusInteraction,
SymbolicConstraint, SymbolicInstructionStatement, SymbolicMachine,
};
use crate::bus_interaction_handler::OpenVmBusInteractionHandler;
use crate::instruction_formatter::openvm_instruction_formatter;
use crate::{
powdr_extension::{OriginalInstruction, PowdrExtension, PowdrOpcode, PowdrPrecompile},
utils::symbolic_to_algebraic,
};
const OPENVM_DEGREE_BOUND: usize = 5;
const POWDR_OPCODE: usize = 0x10ff;
pub fn customize<F: PrimeField32>(
mut exe: VmExe<F>,
base_config: SdkVmConfig,
labels: &BTreeSet<u32>,
airs: &BTreeMap<usize, SymbolicMachine<powdr::number::BabyBearField>>,
autoprecompiles: usize,
skip: usize,
pc_idx_count: Option<HashMap<u32, u32>>,
) -> (VmExe<F>, PowdrExtension<F>) {
// The following opcodes shall never be accelerated and therefore always put in its own basic block.
// Currently this contains OpenVm opcodes: Rv32HintStoreOpcode::HINT_STOREW (0x260) and Rv32HintStoreOpcode::HINT_BUFFER (0x261)
// which are the only two opcodes from the Rv32HintStore, the air responsible for reading host states via stdin.
// We don't want these opcodes because they create air constraints with next references, which powdr-openvm does not support yet.
let opcodes_no_apc = vec![
Rv32HintStoreOpcode::HINT_STOREW.global_opcode().as_usize(),
Rv32HintStoreOpcode::HINT_BUFFER.global_opcode().as_usize(),
Rv32LoadStoreOpcode::LOADB.global_opcode().as_usize(),
Rv32LoadStoreOpcode::LOADH.global_opcode().as_usize(),
Rv32WeierstrassOpcode::EC_ADD_NE.global_opcode().as_usize(),
Rv32WeierstrassOpcode::SETUP_EC_ADD_NE
.global_opcode()
.as_usize(),
Rv32WeierstrassOpcode::EC_DOUBLE.global_opcode().as_usize(),
Rv32WeierstrassOpcode::SETUP_EC_DOUBLE
.global_opcode()
.as_usize(),
Rv32WeierstrassOpcode::EC_ADD_NE.global_opcode().as_usize() + 4,
Rv32WeierstrassOpcode::SETUP_EC_ADD_NE
.global_opcode()
.as_usize()
+ 4,
Rv32WeierstrassOpcode::EC_DOUBLE.global_opcode().as_usize() + 4,
Rv32WeierstrassOpcode::SETUP_EC_DOUBLE
.global_opcode()
.as_usize()
+ 4,
Rv32KeccakOpcode::KECCAK256.global_opcode().as_usize(),
Rv32Sha256Opcode::SHA256.global_opcode().as_usize(),
Rv32ModularArithmeticOpcode::ADD.global_opcode().as_usize(),
Rv32ModularArithmeticOpcode::SUB.global_opcode().as_usize(),
Rv32ModularArithmeticOpcode::SETUP_ADDSUB
.global_opcode()
.as_usize(),
Rv32ModularArithmeticOpcode::MUL.global_opcode().as_usize(),
Rv32ModularArithmeticOpcode::DIV.global_opcode().as_usize(),
Rv32ModularArithmeticOpcode::SETUP_MULDIV
.global_opcode()
.as_usize(),
Rv32ModularArithmeticOpcode::IS_EQ
.global_opcode()
.as_usize(),
Rv32ModularArithmeticOpcode::SETUP_ISEQ
.global_opcode()
.as_usize(),
Fp2Opcode::ADD.global_opcode().as_usize(),
Fp2Opcode::SUB.global_opcode().as_usize(),
Fp2Opcode::SETUP_ADDSUB.global_opcode().as_usize(),
Fp2Opcode::MUL.global_opcode().as_usize(),
Fp2Opcode::DIV.global_opcode().as_usize(),
Fp2Opcode::SETUP_MULDIV.global_opcode().as_usize(),
0x510, // not sure yet what this is
0x513, // not sure yet what this is
0x51c, // not sure yet what this is
0x523, // not sure yet what this is
];
let mut blocks = collect_basic_blocks(&exe.program, labels, &opcodes_no_apc);
tracing::info!("Got {} basic blocks", blocks.len());
if let Some(pgo_program_idx_count) = pc_idx_count {
// sort the blocks by block_len * frequency (the count of start_idx in pgo_program_idx_count)
blocks.sort_by(|a, b| {
// not all start index of a basic block can be found in pc_idx_count, because a basic block might not be executed at all
// in this case, they will just default to 0
let a_count = pgo_program_idx_count
.get(&(a.start_idx as u32))
.unwrap_or(&0);
let b_count = pgo_program_idx_count
.get(&(b.start_idx as u32))
.unwrap_or(&0);
let a_opcode_no_apc = if !a.statements.is_empty() {
opcodes_no_apc.contains(&a.statements[0].opcode.as_usize())
} else {
true
};
let b_opcode_no_apc = if !b.statements.is_empty() {
opcodes_no_apc.contains(&b.statements[0].opcode.as_usize())
} else {
true
};
// if a basic block starts with an opcode that is in opcodes_no_apc, put it at the bottom of the list of blocks to order
// otherwise, order by descending cost = instruction count * execution frequency
match (a_opcode_no_apc, b_opcode_no_apc) {
(true, false) => std::cmp::Ordering::Greater,
(false, true) => std::cmp::Ordering::Less,
_ => (b_count * (b.statements.len() as u32))
.cmp(&(a_count * (a.statements.len() as u32))),
}
});
// print block start_idx, cost = block_len * frequency, block_len, and frequency, sorted by descending cost
for block in &blocks {
let start_idx = block.start_idx;
let block_len = block.statements.len();
let count = pgo_program_idx_count.get(&(start_idx as u32)).unwrap_or(&0);
let cost = count * (block_len as u32);
tracing::info!(
"Basic block start_idx: {}, cost: {}, block_len: {}, frequency: {}",
start_idx,
cost,
block_len,
count
);
}
} else {
// if pgo option is not set, sort by descending order of block length
blocks.sort_by(|a, b| (b.statements.len()).cmp(&a.statements.len()));
}
let program = &mut exe.program.instructions_and_debug_infos;
let noop = Instruction {
opcode: VmOpcode::from_usize(0xdeadaf),
a: F::ZERO,
b: F::ZERO,
c: F::ZERO,
d: F::ZERO,
e: F::ZERO,
f: F::ZERO,
g: F::ZERO,
};
let mut extensions = Vec::new();
let n_acc = autoprecompiles;
let n_skip = skip;
tracing::info!("Generating {n_acc} autoprecompiles");
for (i, acc_block) in blocks.iter().skip(n_skip).take(n_acc).enumerate() {
tracing::debug!(
"Accelerating block {i} of length {} and start idx {}",
acc_block.statements.len(),
acc_block.start_idx
);
tracing::debug!(
"Acc block: {}",
acc_block.pretty_print(openvm_instruction_formatter)
);
let apc_opcode = POWDR_OPCODE + i;
let new_instr = Instruction {
opcode: VmOpcode::from_usize(apc_opcode),
a: F::ZERO,
b: F::ZERO,
c: F::ZERO,
d: F::ZERO,
e: F::ZERO,
f: F::ZERO,
g: F::ZERO,
};
let pc = acc_block.start_idx as usize;
let n_acc = acc_block.statements.len();
let (acc, new_instrs): (Vec<_>, Vec<_>) = program[pc..pc + n_acc]
.iter()
.enumerate()
.map(|(i, x)| {
let instr = x.as_ref().unwrap();
let instr = instr.0.clone();
if i == 0 {
(instr, new_instr.clone())
} else {
(instr, noop.clone())
}
})
.collect();
let new_instrs = new_instrs.into_iter().map(|x| Some((x, None)));
let len_before = program.len();
program.splice(pc..pc + n_acc, new_instrs);
assert_eq!(program.len(), len_before);
let (autoprecompile, subs) =
generate_autoprecompile::<F, powdr::number::BabyBearField>(acc_block, airs, apc_opcode);
let is_valid_column = autoprecompile
.unique_columns()
.find(|c| c.name == "is_valid")
.unwrap();
let opcodes_in_acc = acc
.iter()
.map(|x| x.opcode.as_usize())
.unique()
.collect_vec();
extensions.push(PowdrPrecompile::new(
format!("PowdrAutoprecompile_{i}"),
PowdrOpcode {
class_offset: apc_opcode,
},
transpose_symbolic_machine(autoprecompile),
acc.into_iter()
.zip_eq(subs)
.map(|(instruction, subs)| OriginalInstruction::new(instruction, subs))
.collect(),
airs.iter()
.filter(|(i, _)| opcodes_in_acc.contains(*i))
.map(|(i, air)| (*i, transpose_symbolic_machine(air.clone())))
.collect(),
is_valid_column,
));
}
(exe, PowdrExtension::new(extensions, base_config))
}
// TODO collect properly from opcode enums
const BRANCH_OPCODES: [usize; 9] = [
0x220, 0x221, 0x225, 0x226, 0x227, 0x228, 0x230, 0x231, 0x235,
];
pub fn is_jump(instruction: &VmOpcode) -> bool {
let opcode = instruction.as_usize();
BRANCH_OPCODES.contains(&opcode)
}
#[derive(Debug, Clone)]
pub struct BasicBlock<F> {
pub start_idx: u64,
pub statements: Vec<Instruction<F>>,
}
impl<F: PrimeField32> BasicBlock<F> {
fn pretty_print(&self, instr_formatter: impl Fn(&Instruction<F>) -> String) -> String {
format!("BasicBlock(start_idx: {}, statements: [\n", self.start_idx)
+ &self
.statements
.iter()
.enumerate()
.map(|(i, instr)| format!(" instr {i:>3}: {}", instr_formatter(instr)))
.collect::<Vec<_>>()
.join("\n")
+ "\n])"
}
}
pub fn collect_basic_blocks<F: PrimeField32>(
program: &Program<F>,
labels: &BTreeSet<u32>,
opcodes_no_apc: &[usize],
) -> Vec<BasicBlock<F>> {
let mut blocks = Vec::new();
let mut curr_block = BasicBlock {
start_idx: 0,
statements: Vec::new(),
};
let init_pc = 0x0020_0800;
for (i, instr) in program.instructions_and_debug_infos.iter().enumerate() {
let instr = instr.as_ref().unwrap().0.clone();
let adjusted_pc = init_pc + (i as u32) * 4;
let is_target = labels.contains(&adjusted_pc);
let is_branch = is_jump(&instr.opcode);
// If this opcode cannot be in an apc, we make sure it's alone in a BB.
if opcodes_no_apc.contains(&instr.opcode.as_usize()) {
// Push the current block and start a new one from this instruction.
blocks.push(curr_block);
curr_block = BasicBlock {
start_idx: i as u64,
statements: Vec::new(),
};
// Add the instruction and push the block
curr_block.statements.push(instr.clone());
blocks.push(curr_block);
// Start a new block from the next instruction.
curr_block = BasicBlock {
start_idx: (i + 1) as u64,
statements: Vec::new(),
};
} else {
// If the instruction is a target, we need to close the previous block
// as is and start a new block from this instruction.
if is_target {
blocks.push(curr_block);
curr_block = BasicBlock {
start_idx: i as u64,
statements: Vec::new(),
};
}
curr_block.statements.push(instr.clone());
// If the instruction is a branch, we need to close this block
// with this instruction and start a new block from the next one.
if is_branch {
blocks.push(curr_block);
curr_block = BasicBlock {
start_idx: (i + 1) as u64,
statements: Vec::new(),
};
}
}
}
if !curr_block.statements.is_empty() {
blocks.push(curr_block);
}
blocks
}
// OpenVM relevant bus ids:
// 0: execution bridge -> [pc, timestamp]
// 1: memory -> [address space, pointer, data, timestamp, 1]
// 2: pc lookup -> [...]
// 3: range tuple -> [col, bits]
// 5: bitwise xor ->
// [a, b, 0, 0] byte range checks for a and b
// [a, b, c, 1] c = xor(a, b)
fn generate_autoprecompile<F: PrimeField32, P: FieldElement>(
block: &BasicBlock<F>,
airs: &BTreeMap<usize, SymbolicMachine<P>>,
apc_opcode: usize,
) -> (SymbolicMachine<P>, Vec<Vec<u64>>) {
tracing::debug!(
"Generating autoprecompile for block at index {}",
block.start_idx
);
let mut instruction_kind = BTreeMap::new();
let mut instruction_machines = BTreeMap::new();
let program = block
.statements
.iter()
.map(|instr| {
let instr_name = format!("{}", instr.opcode);
let symb_machine = airs.get(&instr.opcode.as_usize()).unwrap();
let symb_instr = SymbolicInstructionStatement {
name: instr_name.clone(),
opcode: instr.opcode.as_usize(),
args: [
instr.a, instr.b, instr.c, instr.d, instr.e, instr.f, instr.g,
]
.iter()
.map(|f| to_powdr_field::<F, P>(*f))
.collect(),
};
if is_jump(&instr.opcode) {
instruction_kind.insert(instr_name.clone(), InstructionKind::ConditionalBranch);
} else {
instruction_kind.insert(instr_name.clone(), InstructionKind::Normal);
};
instruction_machines.insert(instr_name.clone(), symb_machine.clone());
symb_instr
})
.collect();
let autoprecompiles = Autoprecompiles {
program,
instruction_kind,
instruction_machines,
};
let (precompile, subs) = autoprecompiles.build(
OpenVmBusInteractionHandler::default(),
OPENVM_DEGREE_BOUND,
apc_opcode as u32,
);
// Check that substitution values are unique over all instructions
assert!(subs.iter().flatten().all_unique());
tracing::debug!(
"Done generating autoprecompile for block at index {}",
block.start_idx
);
(precompile, subs)
}
pub fn openvm_bus_interaction_to_powdr<F: PrimeField32, P: FieldElement>(
interaction: &SymbolicInteraction<F>,
columns: &[String],
) -> SymbolicBusInteraction<P> {
// TODO
let kind = BusInteractionKind::Send;
// let kind = match interaction.interaction_type {
// InteractionType::Send => BusInteractionKind::Send,
// InteractionType::Receive => BusInteractionKind::Receive,
// };
let id = interaction.bus_index as u64;
let mult = symbolic_to_algebraic(&interaction.count, columns);
let args = interaction
.message
.iter()
.map(|e| symbolic_to_algebraic(e, columns))
.collect();
SymbolicBusInteraction {
kind,
id,
mult,
args,
}
}
fn to_powdr_field<F: PrimeField32, P: FieldElement>(f: F) -> P {
f.as_canonical_u32().into()
}
fn to_ovm_field<F: PrimeField32, P: FieldElement>(f: P) -> F {
F::from_canonical_u32(f.to_integer().try_into_u32().unwrap())
}
// Transpose an algebraic expression from the powdr field to openvm field
fn transpose_algebraic_expression<F: PrimeField32, P: FieldElement>(
expr: AlgebraicExpression<P>,
) -> AlgebraicExpression<F> {
match expr {
AlgebraicExpression::Number(n) => AlgebraicExpression::Number(to_ovm_field(n)),
AlgebraicExpression::Reference(reference) => AlgebraicExpression::Reference(reference),
AlgebraicExpression::PublicReference(reference) => {
AlgebraicExpression::PublicReference(reference)
}
AlgebraicExpression::Challenge(challenge) => AlgebraicExpression::Challenge(challenge),
AlgebraicExpression::BinaryOperation(algebraic_binary_operation) => {
let left = transpose_algebraic_expression(*algebraic_binary_operation.left);
let right = transpose_algebraic_expression(*algebraic_binary_operation.right);
AlgebraicExpression::BinaryOperation(powdr::ast::analyzed::AlgebraicBinaryOperation {
left: Box::new(left),
right: Box::new(right),
op: algebraic_binary_operation.op,
})
}
AlgebraicExpression::UnaryOperation(algebraic_unary_operation) => {
AlgebraicExpression::UnaryOperation(powdr::ast::analyzed::AlgebraicUnaryOperation {
op: algebraic_unary_operation.op,
expr: Box::new(transpose_algebraic_expression(
*algebraic_unary_operation.expr,
)),
})
}
}
}
// Transpose a symbolic machine from the powdr field to openvm field
fn transpose_symbolic_machine<F: PrimeField32, P: FieldElement>(
machine: SymbolicMachine<P>,
) -> SymbolicMachine<F> {
let constraints = machine
.constraints
.into_iter()
.map(|constraint| SymbolicConstraint {
expr: transpose_algebraic_expression(constraint.expr),
})
.collect();
let bus_interactions = machine
.bus_interactions
.into_iter()
.map(|interaction| SymbolicBusInteraction {
kind: interaction.kind,
id: interaction.id,
mult: transpose_algebraic_expression(interaction.mult.clone()),
args: interaction
.args
.iter()
.map(|arg| transpose_algebraic_expression(arg.clone()))
.collect(),
})
.collect();
SymbolicMachine {
constraints,
bus_interactions,
}
}

View File

@@ -0,0 +1,91 @@
use openvm_instructions::instruction::Instruction;
use openvm_stark_backend::p3_field::PrimeField32;
pub fn openvm_instruction_formatter<F: PrimeField32>(instruction: &Instruction<F>) -> String {
let Instruction {
opcode,
a,
b,
c,
d,
e,
f,
g,
} = instruction;
// Opcodes taken from:
// https://github.com/openvm-org/openvm/blob/v1.0.0/extensions/rv32im/transpiler/src/instructions.rs
match opcode.as_usize() {
// Alu instructions, see:
// https://github.com/openvm-org/openvm/blob/v1.0.0/extensions/rv32im/circuit/src/adapters/alu.rs#L197-L201
512..=521 => {
assert_eq!(d, &F::ONE);
assert_eq!(f, &F::ZERO);
assert_eq!(g, &F::ZERO);
let opcode = match opcode.as_usize() {
// Rv32BaseAluChip
512 => "ADD",
513 => "SUB",
514 => "XOR",
515 => "OR",
516 => "AND",
// Rv32ShiftChip
517 => "SLL",
518 => "SRL",
519 => "SRA",
// Rv32LessThanChip
520 => "SLT",
521 => "SLTU",
_ => unreachable!(),
};
format!("{opcode} rd_ptr = {a}, rs1_ptr = {b}, rs2 = {c}, rs2_as = {e}")
}
// Load/Store instructions, see:
// https://github.com/openvm-org/openvm/blob/v1.0.0/extensions/rv32im/circuit/src/adapters/loadstore.rs#L340-L346
528..=535 => {
assert_eq!(d, &F::ONE);
let opcode = match opcode.as_usize() {
528 => "LOADW",
529 => "LOADBU",
530 => "LOADHU",
531 => "STOREW",
532 => "STOREH",
533 => "STOREB",
534 => "LOADB",
535 => "LOADH",
_ => unreachable!(),
};
format!("{opcode} rd_rs2_ptr = {a}, rs1_ptr = {b}, imm = {c}, mem_as = {e}, needs_write = {f}, imm_sign = {g}")
}
544 => format!("BEQ {a} {b} {c} {d} {e}"),
545 => format!("BNE {a} {b} {c} {d} {e}"),
549 => format!("BLT {a} {b} {c} {d} {e}"),
550 => format!("BLTU {a} {b} {c} {d} {e}"),
551 => format!("BGE {a} {b} {c} {d} {e}"),
552 => format!("BGEU {a} {b} {c} {d} {e}"),
560 => format!("JAL {a} {b} {c} {d} {e}"),
561 => format!("LUI {a} {b} {c} {d} {e}"),
565 => format!("JALR {a} {b} {c} {d} {e}"),
576 => format!("AUIPC {a} {b} {c} {d} {e}"),
592 => format!("MUL {a} {b} {c} {d} {e}"),
593 => format!("MULH {a} {b} {c} {d} {e}"),
594 => format!("MULHSU {a} {b} {c} {d} {e}"),
595 => format!("MULHU {a} {b} {c} {d} {e}"),
596 => format!("DIV {a} {b} {c} {d} {e}"),
597 => format!("DIVU {a} {b} {c} {d} {e}"),
598 => format!("REM {a} {b} {c} {d} {e}"),
599 => format!("REMU {a} {b} {c} {d} {e}"),
608 => format!("HINT_STOREW {a} {b} {c} {d} {e}"),
609 => format!("HINT_BUFFER {a} {b} {c} {d} {e}"),
_ => format!("<opcode {opcode}> {a} {b} {c} {d} {e} {f} {g}"),
}
}

777
openvm/src/lib.rs Normal file
View File

@@ -0,0 +1,777 @@
use eyre::Result;
use itertools::{multiunzip, Itertools};
use openvm_build::{
build_guest_package, find_unique_executable, get_package, GuestOptions, TargetFilter,
};
use openvm_circuit::arch::{
instructions::exe::VmExe, Streams, SystemConfig, VirtualMachine, VmChipComplex, VmConfig,
VmInventoryError,
};
use openvm_native_recursion::halo2::utils::CacheHalo2ParamsReader;
use openvm_stark_backend::{
air_builders::symbolic::SymbolicConstraints, engine::StarkEngine, rap::AnyRap,
};
use openvm_stark_sdk::{config::fri_params::SecurityParameters, engine::StarkFriEngine};
use powdr::FieldElement;
use powdr_autoprecompiles::SymbolicMachine;
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, Mutex},
};
use utils::get_pil;
use crate::customize_exe::openvm_bus_interaction_to_powdr;
use crate::utils::symbolic_to_algebraic;
use openvm_circuit_primitives_derive::ChipUsageGetter;
use openvm_sdk::{
config::{AggConfig, AppConfig, SdkVmConfig, SdkVmConfigExecutor, SdkVmConfigPeriphery},
DefaultStaticVerifierPvHandler, Sdk, StdIn,
};
use openvm_stark_backend::{config::StarkGenericConfig, Chip};
use openvm_stark_sdk::config::{
baby_bear_poseidon2::{config_from_perm, default_perm, BabyBearPoseidon2Engine},
FriParameters,
};
use openvm_stark_sdk::{
config::baby_bear_poseidon2::BabyBearPoseidon2Config,
openvm_stark_backend::p3_field::{Field, PrimeField32},
p3_baby_bear::BabyBear,
};
use powdr_extension::{PowdrExecutor, PowdrExtension, PowdrPeriphery};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
mod air_builder;
use air_builder::AirKeygenBuilder;
use derive_more::From;
use openvm_circuit::{
circuit_derive::Chip,
derive::{AnyEnum, InstructionExecutor as InstructionExecutorDerive},
};
mod utils;
use tracing::dispatcher::Dispatch;
use tracing::field::Field as TracingField;
use tracing::{Event, Subscriber};
use tracing_subscriber::{
layer::Context,
prelude::*,
registry::{LookupSpan, Registry},
Layer,
};
type SC = BabyBearPoseidon2Config;
pub type F = BabyBear;
/// We do not use the transpiler, instead we customize an already transpiled program
mod customize_exe;
// A module for our extension
mod powdr_extension;
mod bus_interaction_handler;
mod instruction_formatter;
#[allow(dead_code)]
mod plonk;
/// A custom VmConfig that wraps the SdkVmConfig, adding our custom extension.
#[derive(Serialize, Deserialize, Clone)]
#[serde(bound = "F: Field")]
pub struct SpecializedConfig<F: PrimeField32> {
sdk_config: SdkVmConfig,
powdr: PowdrExtension<F>,
}
#[allow(clippy::large_enum_variant)]
#[derive(ChipUsageGetter, Chip, InstructionExecutorDerive, From, AnyEnum)]
pub enum SpecializedExecutor<F: PrimeField32> {
#[any_enum]
SdkExecutor(SdkVmConfigExecutor<F>),
#[any_enum]
PowdrExecutor(PowdrExecutor<F>),
}
#[derive(From, ChipUsageGetter, Chip, AnyEnum)]
pub enum MyPeriphery<F: PrimeField32> {
#[any_enum]
SdkPeriphery(SdkVmConfigPeriphery<F>),
#[any_enum]
PowdrPeriphery(PowdrPeriphery<F>),
}
impl<F: PrimeField32> VmConfig<F> for SpecializedConfig<F> {
type Executor = SpecializedExecutor<F>;
type Periphery = MyPeriphery<F>;
fn system(&self) -> &SystemConfig {
VmConfig::<F>::system(&self.sdk_config)
}
fn system_mut(&mut self) -> &mut SystemConfig {
VmConfig::<F>::system_mut(&mut self.sdk_config)
}
fn create_chip_complex(
&self,
) -> Result<VmChipComplex<F, Self::Executor, Self::Periphery>, VmInventoryError> {
let chip = self.sdk_config.create_chip_complex()?;
let chip = chip.extend(&self.powdr)?;
Ok(chip)
}
}
impl<F: Default + PrimeField32> SpecializedConfig<F> {
fn from_base_and_extension(sdk_config: SdkVmConfig, powdr: PowdrExtension<F>) -> Self {
Self { sdk_config, powdr }
}
}
pub fn build_elf_path<P: AsRef<Path>>(
guest_opts: GuestOptions,
pkg_dir: P,
target_filter: &Option<TargetFilter>,
) -> Result<PathBuf> {
let pkg = get_package(pkg_dir.as_ref());
let target_dir = match build_guest_package(&pkg, &guest_opts, None, target_filter) {
Ok(target_dir) => target_dir,
Err(Some(code)) => {
return Err(eyre::eyre!("Failed to build guest: code = {}", code));
}
Err(None) => {
return Err(eyre::eyre!(
"Failed to build guest (OPENVM_SKIP_BUILD is set)"
));
}
};
find_unique_executable(pkg_dir, target_dir, target_filter)
}
// compile the original openvm program without powdr extension
pub fn compile_openvm(
guest: &str,
) -> Result<OriginalCompiledProgram<F>, Box<dyn std::error::Error>> {
// wrap the sdk config (with the standard extensions) in our custom config (with our custom extension)
let sdk_vm_config = SdkVmConfig::builder()
.system(Default::default())
.rv32i(Default::default())
.rv32m(Default::default())
.io(Default::default())
.keccak(Default::default())
.build();
let sdk = Sdk::default();
// Build the ELF with guest options and a target filter.
// We need these extra Rust flags to get the labels.
let guest_opts = GuestOptions::default();
let guest_opts = guest_opts.with_rustc_flags(vec!["-C", "link-arg=--emit-relocs"]);
// Point to our local guest
use std::path::PathBuf;
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).to_path_buf();
path.push(guest);
let target_path = path.to_str().unwrap();
let elf = sdk.build(guest_opts, target_path, &Default::default())?;
// Transpile the ELF into a VmExe. Note that this happens using the sdk transpiler only, our extension does not use a transpiler.
let exe = sdk.transpile(elf, sdk_vm_config.transpiler())?;
Ok(OriginalCompiledProgram { exe, sdk_vm_config })
}
pub fn compile_guest(
guest: &str,
autoprecompiles: usize,
skip: usize,
pgo_data: Option<HashMap<u32, u32>>,
) -> Result<CompiledProgram<F>, Box<dyn std::error::Error>> {
let OriginalCompiledProgram { exe, sdk_vm_config } = compile_openvm(guest)?;
compile_exe(guest, exe, sdk_vm_config, autoprecompiles, skip, pgo_data)
}
pub fn compile_exe(
guest: &str,
exe: VmExe<F>,
sdk_vm_config: SdkVmConfig,
autoprecompiles: usize,
skip: usize,
pgo_data: Option<HashMap<u32, u32>>,
) -> Result<CompiledProgram<F>, Box<dyn std::error::Error>> {
// Build the ELF with guest options and a target filter.
// We need these extra Rust flags to get the labels.
let guest_opts = GuestOptions::default();
let guest_opts = guest_opts.with_rustc_flags(vec!["-C", "link-arg=--emit-relocs"]);
// Point to our local guest
use std::path::PathBuf;
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).to_path_buf();
path.push(guest);
let target_path = path.to_str().unwrap();
let elf_binary = build_elf_path(guest_opts.clone(), target_path, &Default::default())?;
let elf_powdr = powdr::riscv::elf::load_elf(&elf_binary);
let airs =
instructions_to_airs::<_, powdr::number::BabyBearField>(exe.clone(), sdk_vm_config.clone());
let (exe, extension) = customize_exe::customize(
exe,
sdk_vm_config.clone(),
&elf_powdr.text_labels,
&airs,
autoprecompiles,
skip,
pgo_data,
);
// Generate the custom config based on the generated instructions
let vm_config = SpecializedConfig::from_base_and_extension(sdk_vm_config, extension);
export_pil(vm_config.clone(), "debug.pil", 1000);
Ok(CompiledProgram { exe, vm_config })
}
#[derive(Serialize, Deserialize, Clone)]
#[serde(bound = "F: Field")]
pub struct CompiledProgram<F: PrimeField32> {
pub exe: VmExe<F>,
pub vm_config: SpecializedConfig<F>,
}
// the original openvm program and config without powdr extension
pub struct OriginalCompiledProgram<F: PrimeField32> {
pub exe: VmExe<F>,
pub sdk_vm_config: SdkVmConfig,
}
pub struct AirMetrics {
pub name: String,
pub width: usize,
pub constraints: usize,
pub bus_interactions: usize,
}
impl CompiledProgram<F> {
pub fn powdr_airs_metrics(&self) -> Vec<AirMetrics> {
let chip_complex: VmChipComplex<_, _, _> = self.vm_config.create_chip_complex().unwrap();
chip_complex
.inventory
.executors()
.iter()
.filter_map(|executor| {
let air = executor.air();
let width = air.width();
let name = air.name();
// We actually give name "powdr_air_for_opcode_<opcode>" to the AIRs,
// but OpenVM uses the actual Rust type (PowdrAir) as the name in this method.
// TODO this is hacky but not sure how to do it better rn.
if name.starts_with("PowdrAir") {
let constraints = get_constraints(air);
Some(AirMetrics {
name: name.to_string(),
width,
constraints: constraints.constraints.len(),
bus_interactions: constraints.interactions.len(),
})
} else {
None
}
})
.collect()
}
}
pub fn execute(
program: CompiledProgram<F>,
inputs: StdIn,
) -> Result<(), Box<dyn std::error::Error>> {
let CompiledProgram { exe, vm_config } = program;
let sdk = Sdk::default();
let output = sdk.execute(exe.clone(), vm_config.clone(), inputs)?;
tracing::info!("Public values output: {:?}", output);
Ok(())
}
pub fn pgo(
program: OriginalCompiledProgram<F>,
inputs: StdIn,
) -> Result<HashMap<u32, u32>, Box<dyn std::error::Error>> {
// in memory collector storage
let collected = Arc::new(Mutex::new(Vec::new()));
let collector_layer = PgoCollector {
pc: collected.clone(),
};
// build subscriber
let subscriber = Registry::default().with(collector_layer);
// prepare for execute
let OriginalCompiledProgram { exe, sdk_vm_config } = program;
let sdk = Sdk::default();
// dispatch constructs a local subscriber at trace level that is invoked during pgo but doesn't override the global one at info level
let dispatch = Dispatch::new(subscriber);
tracing::dispatcher::with_default(&dispatch, || {
sdk.execute(exe.clone(), sdk_vm_config.clone(), inputs)
.unwrap();
});
// collect the pc's during execution
let pc = collected.lock().unwrap().clone();
// create pc_index map to times executed, where pc_index = (pc - pc_base) / step
let pc_base = exe.program.pc_base;
let step = exe.program.step;
let pc_index_count = pc
.iter()
.fold(std::collections::HashMap::new(), |mut acc, pc| {
let pc_index = (*pc as u32 - pc_base) / step;
*acc.entry(pc_index).or_insert(0u32) += 1;
acc
});
// the smallest pc is the same as the base_pc if there's no stdin
let pc_min = pc.iter().min().unwrap();
tracing::info!("pc_min: {}; pc_base: {}", pc_min, pc_base);
// print the total and by pc counts at the warn level (default level in powdr-openvm)
tracing::warn!("Pgo captured {} pc's", pc.len());
// print pc_index map in descending order of pc_index count
let mut pc_index_count_sorted: Vec<_> = pc_index_count.iter().collect();
pc_index_count_sorted.sort_by(|a, b| b.1.cmp(a.1));
pc_index_count_sorted.iter().for_each(|(pc, count)| {
tracing::warn!("pc_index {}: {}", pc, count);
});
Ok(pc_index_count)
}
pub fn prove(
program: &CompiledProgram<F>,
mock: bool,
recursion: bool,
inputs: StdIn,
) -> Result<(), Box<dyn std::error::Error>> {
let CompiledProgram { exe, vm_config } = program;
let sdk = Sdk::default();
// Set app configuration
let app_log_blowup = 2;
let app_fri_params = FriParameters::standard_with_100_bits_conjectured_security(app_log_blowup);
let app_config = AppConfig::new(app_fri_params, vm_config.clone());
// Commit the exe
let app_committed_exe = sdk.commit_app_exe(app_fri_params, exe.clone())?;
// Generate an AppProvingKey
let app_pk = Arc::new(sdk.app_keygen(app_config)?);
if mock {
tracing::info!("Checking constraints and witness in Mock prover...");
let engine = BabyBearPoseidon2Engine::new(
FriParameters::standard_with_100_bits_conjectured_security(app_log_blowup),
);
let vm = VirtualMachine::new(engine, vm_config.clone());
let pk = vm.keygen();
let streams = Streams::from(inputs);
let mut result = vm.execute_and_generate(exe.clone(), streams).unwrap();
let _final_memory = Option::take(&mut result.final_memory);
let global_airs = vm.config().create_chip_complex().unwrap().airs();
for proof_input in &result.per_segment {
let (airs, pks, air_proof_inputs): (Vec<_>, Vec<_>, Vec<_>) =
multiunzip(proof_input.per_air.iter().map(|(air_id, air_proof_input)| {
(
global_airs[*air_id].clone(),
pk.per_air[*air_id].clone(),
air_proof_input.clone(),
)
}));
vm.engine.debug(&airs, &pks, &air_proof_inputs);
}
} else {
if !recursion {
// Generate a proof
tracing::info!("Generating proof...");
let proof =
sdk.generate_app_proof(app_pk.clone(), app_committed_exe.clone(), inputs.clone())?;
tracing::info!("Proof generation done.");
tracing::info!(
"Public values: {:?}",
proof.user_public_values.public_values
);
// Verify
let app_vk = app_pk.get_app_vk();
sdk.verify_app_proof(&app_vk, &proof)?;
tracing::info!("Proof verification done.");
} else {
// Generate the aggregation proving key
const DEFAULT_PARAMS_DIR: &str = concat!(env!("HOME"), "/.openvm/params/");
let halo2_params_reader = CacheHalo2ParamsReader::new(DEFAULT_PARAMS_DIR);
let agg_config = AggConfig::default();
tracing::info!("Generating aggregation proving key...");
let agg_pk = sdk.agg_keygen(
agg_config,
&halo2_params_reader,
&DefaultStaticVerifierPvHandler,
)?;
tracing::info!("Generating SNARK verifier...");
// Generate the SNARK verifier smart contract
let verifier = sdk.generate_halo2_verifier_solidity(&halo2_params_reader, &agg_pk)?;
tracing::info!("Generating EVM proof...");
// Generate an EVM proof
let proof = sdk.generate_evm_proof(
&halo2_params_reader,
app_pk,
app_committed_exe,
agg_pk,
inputs,
)?;
tracing::info!("Verifying EVM proof...");
// Verify the EVM proof
sdk.verify_evm_halo2_proof(&verifier, proof)?;
}
tracing::info!("All done.");
}
Ok(())
}
pub fn get_pc_idx_count(guest: &str, inputs: StdIn) -> HashMap<u32, u32> {
let program = compile_openvm(guest).unwrap();
// times executed by program index, where index = (pc - base_pc) / step
// help determine the basic blocks to create autoprecompile for
pgo(program, inputs).unwrap()
}
pub fn instructions_to_airs<VC: VmConfig<F>, P: FieldElement>(
exe: VmExe<F>,
vm_config: VC,
) -> BTreeMap<usize, SymbolicMachine<P>>
where
VC::Executor: Chip<SC>,
VC::Periphery: Chip<SC>,
{
let mut chip_complex: VmChipComplex<_, _, _> = vm_config.create_chip_complex().unwrap();
exe.program
.instructions_and_debug_infos
.iter()
.map(|instr| instr.as_ref().unwrap().0.opcode)
.unique()
.filter_map(|op| {
chip_complex
.inventory
.get_mut_executor(&op)
.map(|executor| {
let air = executor.air();
let columns = get_columns(air.clone());
let constraints = get_constraints(air);
let powdr_exprs = constraints
.constraints
.iter()
.map(|expr| symbolic_to_algebraic::<F, P>(expr, &columns).into())
.collect::<Vec<_>>();
let powdr_bus_interactions = constraints
.interactions
.iter()
.map(|expr| openvm_bus_interaction_to_powdr(expr, &columns))
.collect();
let symb_machine = SymbolicMachine {
constraints: powdr_exprs,
bus_interactions: powdr_bus_interactions,
};
(op.as_usize(), symb_machine)
})
})
.collect()
}
pub fn export_pil<VC: VmConfig<F>>(vm_config: VC, path: &str, max_width: usize)
where
VC::Executor: Chip<SC>,
VC::Periphery: Chip<SC>,
{
let chip_complex: VmChipComplex<_, _, _> = vm_config.create_chip_complex().unwrap();
let pil = chip_complex
.inventory
.executors()
.iter()
.filter_map(|executor| {
let air = executor.air();
let width = air.width();
let name = air.name();
if width > max_width {
log::warn!("Skipping {name} (width: {width})");
return None;
}
let columns = get_columns(air.clone());
let constraints = get_constraints(air);
Some(get_pil(&name, &constraints, &columns, vec![]))
})
.join("\n\n\n");
println!("Writing PIL...");
std::fs::write(path, pil).unwrap();
println!("Exported PIL to {path}");
}
fn get_columns(air: Arc<dyn AnyRap<SC>>) -> Vec<String> {
let width = air.width();
air.columns()
.inspect(|columns| {
assert_eq!(columns.len(), width);
})
.unwrap_or_else(|| (0..width).map(|i| format!("unknown_{i}")).collect())
}
fn get_constraints(air: Arc<dyn AnyRap<SC>>) -> SymbolicConstraints<F> {
let perm = default_perm();
let security_params = SecurityParameters::standard_fast();
let config = config_from_perm(&perm, security_params);
let air_keygen_builder = AirKeygenBuilder::new(config.pcs(), air);
let builder = air_keygen_builder.get_symbolic_builder(None);
builder.constraints()
}
// holds basic type fields of execution objects captured in trace by subscriber
#[derive(Default)]
struct PgoData {
pc: Option<usize>,
}
impl tracing::field::Visit for PgoData {
// when we receive a u64 field, they are parsed into fields of the pgo data
fn record_u64(&mut self, field: &tracing::field::Field, value: u64) {
if field.name() == "pc" {
self.pc = Some(value as usize);
}
}
// required for implementation, but in practice we will only receive u64 fields
// the fields we receive are determined by the instruction trace print out of our openvm fork during execution
fn record_debug(&mut self, _: &TracingField, _: &dyn std::fmt::Debug) {}
}
// A Layer that collects data we are interested in using for the pgo from the trace fields.
#[derive(Clone)]
struct PgoCollector {
pc: Arc<Mutex<Vec<usize>>>,
}
impl<S> Layer<S> for PgoCollector
where
S: Subscriber + for<'a> LookupSpan<'a>,
{
fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) {
// build a visitor to parse and hold trace fields we are interested in
let mut visitor = PgoData::default();
event.record(&mut visitor);
// because our subscriber is at the trace level, for trace print outs that don't match PgoData,
// the visitor can't parse them, and these cases are filtered out automatically
if let Some(pc) = visitor.pc {
self.pc.lock().unwrap().push(pc);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use test_log::test;
fn compile_and_prove(
guest: &str,
apc: usize,
skip: usize,
mock: bool,
recursion: bool,
stdin: StdIn,
) -> Result<(), Box<dyn std::error::Error>> {
let program = compile_guest(guest, apc, skip, None).unwrap();
prove(&program, mock, recursion, stdin)
}
fn prove_simple(guest: &str, apc: usize, skip: usize, stdin: StdIn) {
let result = compile_and_prove(guest, apc, skip, false, false, stdin);
assert!(result.is_ok());
}
fn prove_mock(guest: &str, apc: usize, skip: usize, stdin: StdIn) {
let result = compile_and_prove(guest, apc, skip, true, false, stdin);
assert!(result.is_ok());
}
fn _prove_recursion(guest: &str, apc: usize, skip: usize, stdin: StdIn) {
let result = compile_and_prove(guest, apc, skip, false, true, stdin);
assert!(result.is_ok());
}
const GUEST: &str = "guest";
const GUEST_ITER: u32 = 1 << 10;
const GUEST_APC: usize = 1;
const GUEST_SKIP: usize = 39;
const GUEST_SKIP_PGO: usize = 0;
const GUEST_KECCAK: &str = "guest-keccak";
const GUEST_KECCAK_ITER: u32 = 1000;
const GUEST_KECCAK_ITER_SMALL: u32 = 10;
const GUEST_KECCAK_APC: usize = 1;
const GUEST_KECCAK_SKIP: usize = 0;
#[test]
fn guest_prove_simple() {
let mut stdin = StdIn::default();
stdin.write(&GUEST_ITER);
prove_simple(GUEST, GUEST_APC, GUEST_SKIP, stdin);
}
#[test]
fn guest_prove_mock() {
let mut stdin = StdIn::default();
stdin.write(&GUEST_ITER);
prove_mock(GUEST, GUEST_APC, GUEST_SKIP, stdin);
}
// #[test]
// #[ignore = "Too much RAM"]
// // TODO: This test currently panics because the kzg params are not set up correctly. Fix this.
// #[should_panic = "No such file or directory"]
// fn guest_prove_recursion() {
// let mut stdin = StdIn::default();
// stdin.write(&GUEST_ITER);
// prove_recursion(GUEST, GUEST_APC, GUEST_SKIP, stdin);
// }
#[test]
fn keccak_small_prove_simple() {
let mut stdin = StdIn::default();
stdin.write(&GUEST_KECCAK_ITER_SMALL);
prove_simple(GUEST_KECCAK, GUEST_KECCAK_APC, GUEST_KECCAK_SKIP, stdin);
}
#[test]
#[ignore = "Too long"]
fn keccak_prove_simple() {
let mut stdin = StdIn::default();
stdin.write(&GUEST_KECCAK_ITER);
prove_simple(GUEST_KECCAK, GUEST_KECCAK_APC, GUEST_KECCAK_SKIP, stdin);
}
#[test]
fn keccak_small_prove_mock() {
let mut stdin = StdIn::default();
stdin.write(&GUEST_KECCAK_ITER_SMALL);
prove_mock(GUEST_KECCAK, GUEST_KECCAK_APC, GUEST_KECCAK_SKIP, stdin);
}
#[test]
#[ignore = "Too long"]
fn keccak_prove_mock() {
let mut stdin = StdIn::default();
stdin.write(&GUEST_KECCAK_ITER);
prove_mock(GUEST_KECCAK, GUEST_KECCAK_APC, GUEST_KECCAK_SKIP, stdin);
}
// #[test]
// #[ignore = "Too much RAM"]
// // TODO: This test currently panics because the kzg params are not set up correctly. Fix this.
// #[should_panic = "No such file or directory"]
// fn keccak_prove_recursion() {
// let mut stdin = StdIn::default();
// stdin.write(&GUEST_KECCAK_ITER);
// prove_recursion(GUEST_KECCAK, GUEST_KECCAK_APC, GUEST_KECCAK_SKIP, stdin);
// }
// The following are compilation tests only
fn test_keccak_machine(pc_idx_count: Option<HashMap<u32, u32>>) {
let machines = compile_guest(
GUEST_KECCAK,
GUEST_KECCAK_APC,
GUEST_KECCAK_SKIP,
pc_idx_count,
)
.unwrap()
.powdr_airs_metrics();
assert_eq!(machines.len(), 1);
let m = &machines[0];
assert_eq!(m.width, 7786);
assert_eq!(m.constraints, 506);
assert_eq!(m.bus_interactions, 6485);
}
#[test]
fn guest_machine() {
let machines = compile_guest(GUEST, GUEST_APC, GUEST_SKIP, None)
.unwrap()
.powdr_airs_metrics();
assert_eq!(machines.len(), 1);
let m = &machines[0];
// TODO we need to find a new block because this one is not executed anymore.
assert_eq!(m.width, 157);
assert_eq!(m.constraints, 36);
assert_eq!(m.bus_interactions, 120);
}
#[test]
fn guest_machine_pgo() {
// Input via StdIn
let mut stdin = StdIn::default();
stdin.write(&GUEST_ITER);
// Guest machine should have more optimized results with pgo
// because we didn't accelerate the "costliest block" in the non-pgo version.
let pc_idx_count = get_pc_idx_count(GUEST, stdin);
// We don't skip any sorted basic block here to accelerate the "costliest" block.
let machines = compile_guest(GUEST, GUEST_APC, GUEST_SKIP_PGO, Some(pc_idx_count))
.unwrap()
.powdr_airs_metrics();
assert_eq!(machines.len(), 1);
let m = &machines[0];
assert_eq!(m.width, 68);
assert_eq!(m.constraints, 21);
assert_eq!(m.bus_interactions, 51);
}
#[test]
fn keccak_machine() {
test_keccak_machine(None);
}
#[test]
fn keccak_machine_pgo() {
let mut stdin = StdIn::default();
stdin.write(&GUEST_KECCAK_ITER);
// Keccak machine should have the same results with pgo
// because we already accelerate the "costliest" block with the non-pgo version.
let pc_idx_count = get_pc_idx_count(GUEST_KECCAK, stdin);
test_keccak_machine(Some(pc_idx_count));
}
}

31
openvm/src/plonk/mod.rs Normal file
View File

@@ -0,0 +1,31 @@
/// A variable in a PlonK gate.
enum Variable<V> {
/// A variable from the input constraint system.
/// At run-time, we can get the concrete values from the APC witness generation.
Witness(V),
/// A temporary variable (represented by an ID). Assuming there is at most one temporary variable in a gate,
/// we can solve for its value at run-time.
Tmp(usize),
}
/// A PlonK gate. For each gate, the following equation must hold:
/// q_l * a + q_r * b + q_o * c + q_mul * a * b + q_const = 0
/// where q_l, q_r, q_o, q_mul, and q_const are fixed coefficients
/// and a, b, c are variables.
/// If the same variable appears in multiple gates, a copy constraint
/// must be enforced.
struct Gate<T, V> {
q_l: T,
q_r: T,
q_o: T,
q_mul: T,
q_const: T,
a: Variable<V>,
b: Variable<V>,
c: Variable<V>,
}
/// The PlonK circuit, which is just a collection of gates.
struct PlonkCircuit<T, V> {
gates: Vec<Gate<T, V>>,
}

View File

@@ -0,0 +1,841 @@
// Mostly taken from [this openvm extension](https://github.com/openvm-org/openvm/blob/1b76fd5a900a7d69850ee9173969f70ef79c4c76/extensions/rv32im/circuit/src/auipc/core.rs#L1)
use std::{
collections::{BTreeMap, HashMap},
sync::{Arc, Mutex},
};
use crate::utils::algebraic_to_symbolic;
use super::{
opcode::PowdrOpcode,
vm::{OriginalInstruction, SdkVmInventory},
PowdrPrecompile,
};
use itertools::Itertools;
use openvm_circuit::{arch::VmConfig, system::memory::MemoryController};
use openvm_circuit::{
arch::{
ExecutionState, InstructionExecutor, Result as ExecutionResult, VmChipComplex,
VmInventoryError,
},
system::memory::OfflineMemory,
utils::next_power_of_two_or_zero,
};
use openvm_circuit_primitives::{
bitwise_op_lookup::SharedBitwiseOperationLookupChip, range_tuple::SharedRangeTupleCheckerChip,
var_range::SharedVariableRangeCheckerChip,
};
use openvm_instructions::{instruction::Instruction, LocalOpcode};
use openvm_native_circuit::CastFExtension;
use openvm_sdk::config::{SdkVmConfig, SdkVmConfigExecutor, SdkVmConfigPeriphery};
use openvm_stark_backend::{
air_builders::symbolic::{
symbolic_expression::{SymbolicEvaluator, SymbolicExpression},
symbolic_variable::{Entry, SymbolicVariable},
},
interaction::BusIndex,
p3_air::{Air, BaseAir},
p3_field::FieldAlgebra,
p3_matrix::dense::RowMajorMatrix,
p3_maybe_rayon::prelude::{
IndexedParallelIterator, IntoParallelIterator, ParallelIterator, ParallelSliceMut,
},
rap::ColumnsAir,
};
use openvm_stark_backend::{
config::{StarkGenericConfig, Val},
interaction::InteractionBuilder,
p3_field::{Field, PrimeField32},
p3_matrix::Matrix,
prover::types::AirProofInput,
rap::{AnyRap, BaseAirWithPublicValues, PartitionedBaseAir},
Chip, ChipUsageGetter,
};
use powdr_autoprecompiles::powdr::{Column, UniqueColumns};
use serde::{Deserialize, Serialize};
pub struct PowdrChip<F: PrimeField32> {
pub name: String,
pub opcode: PowdrOpcode,
/// An "executor" for this chip, based on the original instructions in the basic block
pub executor: PowdrExecutor<F>,
pub air: Arc<PowdrAir<F>>,
pub periphery: SharedChips,
}
// Extracted from openvm, extended to create an inventory with the correct memory
fn create_chip_complex_with_memory<F: PrimeField32>(
memory: Arc<Mutex<OfflineMemory<F>>>,
range_checker: SharedVariableRangeCheckerChip,
base_config: SdkVmConfig,
) -> std::result::Result<
VmChipComplex<F, SdkVmConfigExecutor<F>, SdkVmConfigPeriphery<F>>,
VmInventoryError,
> {
use openvm_keccak256_circuit::Keccak256;
use openvm_native_circuit::Native;
use openvm_rv32im_circuit::{Rv32I, Rv32Io};
use openvm_sha256_circuit::Sha256;
let this = base_config;
let mut complex = this.system.config.create_chip_complex()?.transmute();
// CHANGE: inject the correct memory here to be passed to the chips, to be accessible in their get_proof_input
complex.base.memory_controller.offline_memory = memory.clone();
complex.base.range_checker_chip = range_checker;
// END CHANGE
if this.rv32i.is_some() {
complex = complex.extend(&Rv32I)?;
}
if this.io.is_some() {
complex = complex.extend(&Rv32Io)?;
}
if this.keccak.is_some() {
complex = complex.extend(&Keccak256)?;
}
if this.sha256.is_some() {
complex = complex.extend(&Sha256)?;
}
if this.native.is_some() {
complex = complex.extend(&Native)?;
}
if this.castf.is_some() {
complex = complex.extend(&CastFExtension)?;
}
if let Some(rv32m) = this.rv32m {
let mut rv32m = rv32m;
if let Some(ref bigint) = this.bigint {
rv32m.range_tuple_checker_sizes[0] =
rv32m.range_tuple_checker_sizes[0].max(bigint.range_tuple_checker_sizes[0]);
rv32m.range_tuple_checker_sizes[1] =
rv32m.range_tuple_checker_sizes[1].max(bigint.range_tuple_checker_sizes[1]);
}
complex = complex.extend(&rv32m)?;
}
if let Some(bigint) = this.bigint {
let mut bigint = bigint;
if let Some(ref rv32m) = this.rv32m {
bigint.range_tuple_checker_sizes[0] =
rv32m.range_tuple_checker_sizes[0].max(bigint.range_tuple_checker_sizes[0]);
bigint.range_tuple_checker_sizes[1] =
rv32m.range_tuple_checker_sizes[1].max(bigint.range_tuple_checker_sizes[1]);
}
complex = complex.extend(&bigint)?;
}
if let Some(ref modular) = this.modular {
complex = complex.extend(modular)?;
}
if let Some(ref fp2) = this.fp2 {
complex = complex.extend(fp2)?;
}
if let Some(ref pairing) = this.pairing {
complex = complex.extend(pairing)?;
}
if let Some(ref ecc) = this.ecc {
complex = complex.extend(ecc)?;
}
Ok(complex)
}
/// A struct which holds the state of the execution based on the original instructions in this block and a dummy inventory.
pub struct PowdrExecutor<F: PrimeField32> {
instructions: Vec<OriginalInstruction<F>>,
air_by_opcode_id: BTreeMap<usize, SymbolicMachine<F>>,
is_valid_poly_id: u64,
inventory: SdkVmInventory<F>,
current_trace_height: usize,
}
impl<F: PrimeField32> PowdrExecutor<F> {
fn new(
instructions: Vec<OriginalInstruction<F>>,
air_by_opcode_id: BTreeMap<usize, SymbolicMachine<F>>,
is_valid_column: Column,
memory: Arc<Mutex<OfflineMemory<F>>>,
range_checker: &SharedVariableRangeCheckerChip,
base_config: SdkVmConfig,
) -> Self {
Self {
instructions,
air_by_opcode_id,
is_valid_poly_id: is_valid_column.id.id,
inventory: create_chip_complex_with_memory(
memory,
range_checker.clone(),
base_config.clone(),
)
.unwrap()
.inventory,
current_trace_height: 0,
}
}
fn execute(
&mut self,
memory: &mut MemoryController<F>,
from_state: ExecutionState<u32>,
) -> ExecutionResult<ExecutionState<u32>> {
// execute the original instructions one by one
let res = self
.instructions
.iter()
.try_fold(from_state, |execution_state, instruction| {
let executor = self
.inventory
.get_mut_executor(&instruction.opcode())
.unwrap();
executor.execute(memory, instruction.as_ref(), execution_state)
});
self.current_trace_height += 1;
res
}
}
/// The shared chips which can be used by the PowdrChip.
pub struct SharedChips {
bitwise_lookup_8: SharedBitwiseOperationLookupChip<8>,
range_checker: SharedVariableRangeCheckerChip,
tuple_range_checker: Option<SharedRangeTupleCheckerChip<2>>,
}
impl SharedChips {
pub fn new(
bitwise_lookup_8: SharedBitwiseOperationLookupChip<8>,
range_checker: SharedVariableRangeCheckerChip,
tuple_range_checker: Option<SharedRangeTupleCheckerChip<2>>,
) -> Self {
Self {
bitwise_lookup_8,
range_checker,
tuple_range_checker,
}
}
}
impl SharedChips {
/// Sends concrete values to the shared chips using a given bus id.
/// Panics if the bus id doesn't match any of the chips' bus ids.
fn apply(&self, bus_id: u16, mult: u32, args: &[u32]) {
match bus_id {
id if id == self.bitwise_lookup_8.bus().inner.index => {
// bitwise operation lookup
// interpret the arguments, see `Air<AB> for BitwiseOperationLookupAir<NUM_BITS>`
let [x, y, x_xor_y, selector] = args.try_into().unwrap();
for _ in 0..mult {
match selector {
0 => {
self.bitwise_lookup_8.request_range(x, y);
}
1 => {
let res = self.bitwise_lookup_8.request_xor(x, y);
debug_assert_eq!(res, x_xor_y);
}
_ => {
unreachable!("Invalid selector");
}
}
}
}
id if id == self.range_checker.bus().index() => {
// interpret the arguments, see `Air<AB> for VariableRangeCheckerAir`
let [value, max_bits] = args.try_into().unwrap();
for _ in 0..mult {
self.range_checker.add_count(value, max_bits as usize);
}
}
id if Some(id)
== self
.tuple_range_checker
.as_ref()
.map(|c| c.bus().inner.index) =>
{
// tuple range checker
// We pass a slice. It is checked inside `add_count`.
for _ in 0..mult {
self.tuple_range_checker.as_ref().unwrap().add_count(args);
}
}
0..=2 => {
// execution bridge, memory, pc lookup
// do nothing
}
_ => {
unreachable!("Bus interaction {} not implemented", bus_id);
}
}
}
}
impl<F: PrimeField32> PowdrChip<F> {
pub(crate) fn new(
precompile: PowdrPrecompile<F>,
memory: Arc<Mutex<OfflineMemory<F>>>,
base_config: SdkVmConfig,
periphery: SharedChips,
) -> Self {
let air: PowdrAir<F> = PowdrAir::new(precompile.machine);
let original_airs = precompile
.original_airs
.into_iter()
.map(|(k, v)| (k, v.into()))
.collect();
let executor = PowdrExecutor::new(
precompile.original_instructions,
original_airs,
precompile.is_valid_column,
memory,
&periphery.range_checker,
base_config,
);
let name = precompile.name;
let opcode = precompile.opcode;
Self {
name,
opcode,
air: Arc::new(air),
executor,
periphery,
}
}
/// Returns the index of the is_valid of this air.
fn get_is_valid_index(&self) -> usize {
self.air.column_index_by_poly_id[&self.executor.is_valid_poly_id]
}
}
impl<F: PrimeField32> InstructionExecutor<F> for PowdrChip<F> {
fn execute(
&mut self,
memory: &mut MemoryController<F>,
instruction: &Instruction<F>,
from_state: ExecutionState<u32>,
) -> ExecutionResult<ExecutionState<u32>> {
let &Instruction { opcode, .. } = instruction;
assert_eq!(opcode.as_usize(), self.opcode.global_opcode().as_usize());
let execution_state = self.executor.execute(memory, from_state)?;
Ok(execution_state)
}
fn get_opcode_name(&self, _: usize) -> String {
self.name.clone()
}
}
impl<F: PrimeField32> ChipUsageGetter for PowdrChip<F> {
fn air_name(&self) -> String {
format!("powdr_air_for_opcode_{}", self.opcode.global_opcode()).to_string()
}
fn current_trace_height(&self) -> usize {
self.executor.current_trace_height
}
fn trace_width(&self) -> usize {
self.air.width()
}
}
impl<SC: StarkGenericConfig> Chip<SC> for PowdrChip<Val<SC>>
where
Val<SC>: PrimeField32,
{
fn air(&self) -> Arc<dyn AnyRap<SC>> {
self.air.clone()
}
fn generate_air_proof_input(self) -> AirProofInput<SC> {
tracing::trace!("Generating air proof input for PowdrChip {}", self.name);
let is_valid_index = self.get_is_valid_index();
let num_records = self.current_trace_height();
let height = next_power_of_two_or_zero(num_records);
let width = self.air.width();
let mut values = Val::<SC>::zero_vec(height * width);
// for each original opcode, the name of the dummy air it corresponds to
let air_name_by_opcode = self
.executor
.instructions
.iter()
.map(|instruction| instruction.opcode())
.unique()
.map(|opcode| {
(
opcode,
self.executor
.inventory
.get_executor(opcode)
.unwrap()
.air_name(),
)
})
.collect::<HashMap<_, _>>();
let dummy_trace_by_air_name: HashMap<_, _> = self
.executor
.inventory
.executors
.into_iter()
.map(|executor| {
(
executor.air_name(),
Chip::<SC>::generate_air_proof_input(executor)
.raw
.common_main
.unwrap(),
)
})
.collect();
let instruction_index_to_table_offset = self
.executor
.instructions
.iter()
.enumerate()
.scan(
HashMap::default(),
|counts: &mut HashMap<&str, usize>, (index, instruction)| {
let air_name = air_name_by_opcode.get(&instruction.opcode()).unwrap();
let count = counts.entry(air_name).or_default();
let current_count = *count;
*count += 1;
Some((index, (air_name, current_count)))
},
)
.collect::<HashMap<_, _>>();
let occurrences_by_table_name: HashMap<&String, usize> = self
.executor
.instructions
.iter()
.map(|instruction| air_name_by_opcode.get(&instruction.opcode()).unwrap())
.counts();
// A vector of HashMap<dummy_trace_index, apc_trace_index> by instruction, empty HashMap if none maps to apc
let dummy_trace_index_to_apc_index_by_instruction: Vec<HashMap<usize, usize>> = self
.executor
.instructions
.iter()
.map(|instruction| {
// look up how many dummycells this AIR produces:
let air_width = dummy_trace_by_air_name
.get(air_name_by_opcode.get(&instruction.opcode()).unwrap())
.unwrap()
.width();
// build a map only of the (dummy_index -> apc_index) pairs
let mut map = HashMap::with_capacity(air_width);
for dummy_trace_index in 0..air_width {
if let Ok(apc_index) = global_index(
dummy_trace_index,
instruction,
&self.air.column_index_by_poly_id,
) {
if map.insert(dummy_trace_index, apc_index).is_some() {
panic!(
"duplicate dummy_trace_index {} for instruction opcode {:?}",
dummy_trace_index,
instruction.opcode()
);
}
}
}
map
})
.collect();
assert_eq!(
self.executor.instructions.len(),
dummy_trace_index_to_apc_index_by_instruction.len()
);
let dummy_values = (0..num_records).into_par_iter().map(|record_index| {
(0..self.executor.instructions.len())
.map(|index| {
// get the air name and offset for this instruction (by index)
let (air_name, offset) = instruction_index_to_table_offset.get(&index).unwrap();
// get the table
let table = dummy_trace_by_air_name.get(*air_name).unwrap();
// get how many times this table is used per record
let occurrences_per_record = occurrences_by_table_name.get(air_name).unwrap();
// get the width of each occurrence
let width = table.width();
// start after the previous record ended, and offset by the correct offset
let start = (record_index * occurrences_per_record + offset) * width;
// end at the start + width
let end = start + width;
&table.values[start..end]
})
.collect_vec()
});
// go through the final table and fill in the values
values
// a record is `width` values
.par_chunks_mut(width)
.zip(dummy_values)
.for_each(|(row_slice, dummy_values)| {
// map the dummy rows to the autoprecompile row
for (instruction_id, (instruction, dummy_row)) in self
.executor
.instructions
.iter()
.zip_eq(dummy_values)
.enumerate()
{
let evaluator = RowEvaluator::new(dummy_row, None);
// first remove the side effects of this row on the main periphery
for range_checker_send in self
.executor
.air_by_opcode_id
.get(&instruction.as_ref().opcode.as_usize())
.unwrap()
.bus_interactions
.iter()
.filter(|i| i.id == 3)
{
let mult = evaluator
.eval_expr(&range_checker_send.mult)
.as_canonical_u32();
let args = range_checker_send
.args
.iter()
.map(|arg| evaluator.eval_expr(arg).as_canonical_u32())
.collect_vec();
let [value, max_bits] = args.try_into().unwrap();
for _ in 0..mult {
self.periphery
.range_checker
.remove_count(value, max_bits as usize);
}
}
write_dummy_to_autoprecompile_row(
row_slice,
dummy_row,
&dummy_trace_index_to_apc_index_by_instruction[instruction_id],
);
}
// Set the is_valid column to 1
row_slice[is_valid_index] = <Val<SC>>::ONE;
let evaluator =
RowEvaluator::new(row_slice, Some(&self.air.column_index_by_poly_id));
// replay the side effects of this row on the main periphery
for bus_interaction in self.air.machine.bus_interactions.iter() {
let mult = evaluator
.eval_expr(&bus_interaction.mult)
.as_canonical_u32();
let args = bus_interaction
.args
.iter()
.map(|arg| evaluator.eval_expr(arg).as_canonical_u32())
.collect_vec();
self.periphery.apply(bus_interaction.id, mult, &args);
}
});
let trace = RowMajorMatrix::new(values, width);
AirProofInput::simple(trace, vec![])
}
}
fn write_dummy_to_autoprecompile_row<F: PrimeField32>(
row_slice: &mut [F],
dummy_row: &[F],
dummy_trace_index_to_apc_index: &HashMap<usize, usize>,
) {
for (dummy_trace_index, apc_index) in dummy_trace_index_to_apc_index {
row_slice[*apc_index] = dummy_row[*dummy_trace_index];
}
}
enum IndexError {
NotInDummy,
NotInAutoprecompile,
}
/// Maps the index of a column in the original AIR of a given instruction to the corresponding
/// index in the autoprecompile AIR.
fn global_index<F>(
local_index: usize,
instruction: &OriginalInstruction<F>,
autoprecompile_index_by_poly_id: &BTreeMap<u64, usize>,
) -> Result<usize, IndexError> {
// Map to the poly_id in the original instruction to the poly_id in the autoprecompile.
let autoprecompile_poly_id = instruction
.subs
.get(local_index)
.ok_or(IndexError::NotInDummy)?;
// Map to the index in the autoprecompile.
let variable_index = autoprecompile_index_by_poly_id
.get(autoprecompile_poly_id)
.ok_or(IndexError::NotInAutoprecompile)?;
Ok(*variable_index)
}
pub struct PowdrAir<F> {
/// The columns in arbitrary order
columns: Vec<Column>,
/// The mapping from poly_id id to the index in the list of columns.
/// The values are always unique and contiguous
column_index_by_poly_id: BTreeMap<u64, usize>,
machine: SymbolicMachine<F>,
}
impl<F: PrimeField32> ColumnsAir<F> for PowdrAir<F> {
fn columns(&self) -> Option<Vec<String>> {
Some(self.columns.iter().map(|c| c.name.clone()).collect())
}
}
pub struct RowEvaluator<'a, F: PrimeField32> {
pub row: &'a [F],
pub witness_id_to_index: Option<&'a BTreeMap<u64, usize>>,
}
impl<'a, F: PrimeField32> RowEvaluator<'a, F> {
pub fn new(row: &'a [F], witness_id_to_index: Option<&'a BTreeMap<u64, usize>>) -> Self {
Self {
row,
witness_id_to_index,
}
}
}
impl<F: PrimeField32> SymbolicEvaluator<F, F> for RowEvaluator<'_, F> {
fn eval_const(&self, c: F) -> F {
c
}
fn eval_var(&self, symbolic_var: SymbolicVariable<F>) -> F {
match symbolic_var.entry {
Entry::Main {
part_index: 0,
offset: 0,
} => {
let index = if let Some(witness_id_to_index) = self.witness_id_to_index {
witness_id_to_index[&(symbolic_var.index as u64)]
} else {
symbolic_var.index
};
self.row[index]
}
// currently only the current rotation of the main is supported
// next rotation is not supported because this is a single row evaluator
_ => unreachable!(),
}
}
fn eval_is_first_row(&self) -> F {
unreachable!()
}
fn eval_is_last_row(&self) -> F {
unreachable!()
}
fn eval_is_transition(&self) -> F {
unreachable!()
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "F: Field")]
pub struct SymbolicMachine<F> {
columns: Vec<Column>,
constraints: Vec<SymbolicConstraint<F>>,
bus_interactions: Vec<SymbolicBusInteraction<F>>,
}
impl<F: PrimeField32> From<powdr_autoprecompiles::SymbolicMachine<F>> for SymbolicMachine<F> {
fn from(machine: powdr_autoprecompiles::SymbolicMachine<F>) -> Self {
let columns = machine.unique_columns().collect();
let powdr_autoprecompiles::SymbolicMachine {
constraints,
bus_interactions,
} = machine;
Self {
columns,
constraints: constraints
.into_iter()
.map(SymbolicConstraint::from)
.collect(),
bus_interactions: bus_interactions
.into_iter()
.map(SymbolicBusInteraction::from)
.collect(),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "F: Field")]
struct SymbolicConstraint<F> {
expr: SymbolicExpression<F>,
}
impl<F: PrimeField32> From<powdr_autoprecompiles::SymbolicConstraint<F>> for SymbolicConstraint<F> {
fn from(constraint: powdr_autoprecompiles::SymbolicConstraint<F>) -> Self {
let powdr_autoprecompiles::SymbolicConstraint { expr } = constraint;
Self {
expr: algebraic_to_symbolic(&expr),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "F: Field")]
struct SymbolicBusInteraction<F> {
id: BusIndex,
mult: SymbolicExpression<F>,
args: Vec<SymbolicExpression<F>>,
count_weight: u32,
}
impl<F: PrimeField32> From<powdr_autoprecompiles::SymbolicBusInteraction<F>>
for SymbolicBusInteraction<F>
{
fn from(bus_interaction: powdr_autoprecompiles::SymbolicBusInteraction<F>) -> Self {
let powdr_autoprecompiles::SymbolicBusInteraction { id, mult, args, .. } = bus_interaction;
let mult = algebraic_to_symbolic(&mult);
let args = args.iter().map(algebraic_to_symbolic).collect();
Self {
id: id as BusIndex,
mult,
args,
// TODO: Is this correct?
count_weight: 1,
}
}
}
impl<F: PrimeField32> PowdrAir<F> {
pub fn new(machine: powdr_autoprecompiles::SymbolicMachine<F>) -> Self {
let (column_index_by_poly_id, columns): (BTreeMap<_, _>, Vec<_>) = machine
.unique_columns()
.enumerate()
.map(|(index, c)| ((c.id.id, index), c.clone()))
.unzip();
Self {
columns,
column_index_by_poly_id,
machine: machine.into(),
}
}
}
impl<F: PrimeField32> BaseAir<F> for PowdrAir<F> {
fn width(&self) -> usize {
let res = self.columns.len();
assert!(res > 0);
res
}
}
// No public values, but the trait is implemented
impl<F: PrimeField32> BaseAirWithPublicValues<F> for PowdrAir<F> {}
impl<AB: InteractionBuilder> Air<AB> for PowdrAir<AB::F>
where
AB::F: PrimeField32,
{
fn eval(&self, builder: &mut AB) {
let main = builder.main();
let witnesses = main.row_slice(0);
// TODO: cache?
let witness_values: BTreeMap<u64, AB::Var> = self
.columns
.iter()
.map(|c| c.id.id)
.zip_eq(witnesses.iter().cloned())
.collect();
let witness_evaluator = WitnessEvaluator::<AB>::new(&witness_values);
for constraint in &self.machine.constraints {
let e = witness_evaluator.eval_expr(&constraint.expr);
builder.assert_zero(e);
}
for interaction in &self.machine.bus_interactions {
let SymbolicBusInteraction {
id,
mult,
args,
count_weight,
} = interaction;
let mult = witness_evaluator.eval_expr(mult);
let args = args
.iter()
.map(|arg| witness_evaluator.eval_expr(arg))
.collect_vec();
builder.push_interaction(*id, args, mult, *count_weight);
}
}
}
pub struct WitnessEvaluator<'a, AB: InteractionBuilder> {
pub witness: &'a BTreeMap<u64, AB::Var>,
}
impl<'a, AB: InteractionBuilder> WitnessEvaluator<'a, AB> {
pub fn new(witness: &'a BTreeMap<u64, AB::Var>) -> Self {
Self { witness }
}
}
impl<AB: InteractionBuilder> SymbolicEvaluator<AB::F, AB::Expr> for WitnessEvaluator<'_, AB> {
fn eval_const(&self, c: AB::F) -> AB::Expr {
c.into()
}
fn eval_var(&self, symbolic_var: SymbolicVariable<AB::F>) -> AB::Expr {
match symbolic_var.entry {
Entry::Main { part_index, offset } => {
assert_eq!(part_index, 0);
assert_eq!(offset, 0);
(*self.witness.get(&(symbolic_var.index as u64)).unwrap()).into()
}
Entry::Public => unreachable!("Public variables are not supported"),
Entry::Challenge => unreachable!("Challenges are not supported"),
Entry::Exposed => unreachable!("Exposed values are not supported"),
Entry::Preprocessed { .. } => {
unimplemented!("Preprocessed values are not supported yet")
}
Entry::Permutation { .. } => unreachable!("Permutation values are not supported"),
}
}
fn eval_is_first_row(&self) -> AB::Expr {
unimplemented!()
}
fn eval_is_last_row(&self) -> AB::Expr {
unimplemented!()
}
fn eval_is_transition(&self) -> AB::Expr {
unimplemented!()
}
}
impl<F: PrimeField32> PartitionedBaseAir<F> for PowdrAir<F> {}

View File

@@ -0,0 +1,9 @@
/// The core logic of our extension
pub mod chip;
/// The opcodes for the powdr instructions, which is used in the chip implementation and contains the opcode ID
pub mod opcode;
/// The integration of our extension with the VM
mod vm;
pub use opcode::PowdrOpcode;
pub use vm::{OriginalInstruction, PowdrExecutor, PowdrExtension, PowdrPeriphery, PowdrPrecompile};

View File

@@ -0,0 +1,29 @@
use openvm_instructions::LocalOpcode;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
pub struct PowdrOpcode {
pub class_offset: usize,
}
impl LocalOpcode for PowdrOpcode {
// This offset must not be accessed, since we want many opcodes of the same type to have different class_offsets.
// This is because each opcode has its own air.
const CLASS_OFFSET: usize = unreachable!();
fn from_usize(value: usize) -> Self {
Self {
class_offset: value,
}
}
// The local offset is always 0, since we want to have many opcodes over the same air.
fn local_usize(&self) -> usize {
0
}
// The global opcode is based on `class_offset`, *NOT* on the static `CLASS_OFFSET`.
fn global_opcode(&self) -> openvm_instructions::VmOpcode {
openvm_instructions::VmOpcode::from_usize(self.class_offset)
}
}

View File

@@ -0,0 +1,156 @@
// Mostly taken from [this openvm extension](https://github.com/openvm-org/openvm/blob/1b76fd5a900a7d69850ee9173969f70ef79c4c76/extensions/rv32im/circuit/src/extension.rs#L185) and simplified to only handle a single opcode with its necessary dependencies
use std::collections::BTreeMap;
use std::iter::once;
use derive_more::From;
use openvm_circuit::arch::VmInventoryError;
use openvm_circuit::{
arch::{VmExtension, VmInventory},
circuit_derive::{Chip, ChipUsageGetter},
derive::{AnyEnum, InstructionExecutor},
system::phantom::PhantomChip,
};
use openvm_circuit_primitives::bitwise_op_lookup::SharedBitwiseOperationLookupChip;
use openvm_circuit_primitives::range_tuple::SharedRangeTupleCheckerChip;
use openvm_circuit_primitives::var_range::SharedVariableRangeCheckerChip;
use openvm_instructions::VmOpcode;
use openvm_instructions::{instruction::Instruction, LocalOpcode};
use openvm_sdk::config::{SdkVmConfig, SdkVmConfigExecutor, SdkVmConfigPeriphery};
use openvm_stark_backend::p3_field::{Field, PrimeField32};
use powdr_autoprecompiles::powdr::Column;
use powdr_autoprecompiles::SymbolicMachine;
use serde::{Deserialize, Serialize};
use super::chip::SharedChips;
use super::{chip::PowdrChip, PowdrOpcode};
pub type SdkVmInventory<F> = VmInventory<SdkVmConfigExecutor<F>, SdkVmConfigPeriphery<F>>;
#[derive(Clone, Deserialize, Serialize)]
#[serde(bound = "F: Field")]
pub struct PowdrExtension<F: PrimeField32> {
pub precompiles: Vec<PowdrPrecompile<F>>,
pub base_config: SdkVmConfig,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct OriginalInstruction<F> {
pub instruction: Instruction<F>,
/// The autoprecompile poly_ids that the instruction points to, in the same order as the corresponding original columns
pub subs: Vec<u64>,
}
impl<F> OriginalInstruction<F> {
pub fn new(instruction: Instruction<F>, subs: Vec<u64>) -> Self {
Self { instruction, subs }
}
pub fn opcode(&self) -> VmOpcode {
self.instruction.opcode
}
}
impl<F> AsRef<Instruction<F>> for OriginalInstruction<F> {
fn as_ref(&self) -> &Instruction<F> {
&self.instruction
}
}
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound = "F: Field")]
pub struct PowdrPrecompile<F> {
pub name: String,
pub opcode: PowdrOpcode,
pub machine: SymbolicMachine<F>,
pub original_instructions: Vec<OriginalInstruction<F>>,
pub original_airs: BTreeMap<usize, SymbolicMachine<F>>,
pub is_valid_column: Column,
}
impl<F> PowdrPrecompile<F> {
pub fn new(
name: String,
opcode: PowdrOpcode,
machine: SymbolicMachine<F>,
original_instructions: Vec<OriginalInstruction<F>>,
original_airs: BTreeMap<usize, SymbolicMachine<F>>,
is_valid_column: Column,
) -> Self {
Self {
name,
opcode,
machine,
original_instructions,
original_airs,
is_valid_column,
}
}
}
impl<F: PrimeField32> PowdrExtension<F> {
pub fn new(precompiles: Vec<PowdrPrecompile<F>>, base_config: SdkVmConfig) -> Self {
Self {
precompiles,
base_config,
}
}
}
#[derive(ChipUsageGetter, Chip, InstructionExecutor, From, AnyEnum)]
pub enum PowdrExecutor<F: PrimeField32> {
Powdr(PowdrChip<F>),
}
#[derive(From, ChipUsageGetter, Chip, AnyEnum)]
pub enum PowdrPeriphery<F: PrimeField32> {
Sdk(SdkVmConfigPeriphery<F>),
Phantom(PhantomChip<F>),
}
impl<F: PrimeField32> VmExtension<F> for PowdrExtension<F> {
type Executor = PowdrExecutor<F>;
type Periphery = PowdrPeriphery<F>;
fn build(
&self,
builder: &mut openvm_circuit::arch::VmInventoryBuilder<F>,
) -> Result<VmInventory<Self::Executor, Self::Periphery>, VmInventoryError> {
let mut inventory = VmInventory::new();
let offline_memory = builder.system_base().offline_memory();
// TODO: here we make assumptions about the existence of some chips in the periphery. Make this more flexible
let bitwise_lookup = *builder
.find_chip::<SharedBitwiseOperationLookupChip<8>>()
.first()
.unwrap();
let range_checker = *builder
.find_chip::<SharedVariableRangeCheckerChip>()
.first()
.unwrap();
let tuple_range_checker = builder
.find_chip::<SharedRangeTupleCheckerChip<2>>()
.first()
.cloned();
for precompile in &self.precompiles {
let powdr_chip: PowdrChip<F> = PowdrChip::new(
precompile.clone(),
offline_memory.clone(),
self.base_config.clone(),
SharedChips::new(
bitwise_lookup.clone(),
range_checker.clone(),
tuple_range_checker.cloned(),
),
);
inventory.add_executor(powdr_chip, once(precompile.opcode.global_opcode()))?;
}
Ok(inventory)
}
}

299
openvm/src/utils.rs Normal file
View File

@@ -0,0 +1,299 @@
use std::{collections::BTreeMap, sync::Arc};
use itertools::Itertools;
use openvm_stark_backend::{
air_builders::symbolic::{
symbolic_expression::SymbolicExpression,
symbolic_variable::{Entry, SymbolicVariable},
SymbolicConstraints,
},
interaction::Interaction,
p3_field::PrimeField32,
};
use powdr::number::FieldElement;
use powdr::{
ast::analyzed::{
AlgebraicBinaryOperation, AlgebraicBinaryOperator, AlgebraicExpression, AlgebraicReference,
AlgebraicUnaryOperation, AlgebraicUnaryOperator, PolyID, PolynomialType,
},
number::BabyBearField,
};
pub fn algebraic_to_symbolic<T: PrimeField32>(
expr: &AlgebraicExpression<T>,
) -> SymbolicExpression<T> {
match expr {
AlgebraicExpression::Number(n) => SymbolicExpression::Constant(*n),
AlgebraicExpression::BinaryOperation(binary) => match binary.op {
AlgebraicBinaryOperator::Add => SymbolicExpression::Add {
x: Arc::new(algebraic_to_symbolic(&binary.left)),
y: Arc::new(algebraic_to_symbolic(&binary.right)),
degree_multiple: 0,
},
AlgebraicBinaryOperator::Sub => SymbolicExpression::Sub {
x: Arc::new(algebraic_to_symbolic(&binary.left)),
y: Arc::new(algebraic_to_symbolic(&binary.right)),
degree_multiple: 0,
},
AlgebraicBinaryOperator::Mul => SymbolicExpression::Mul {
x: Arc::new(algebraic_to_symbolic(&binary.left)),
y: Arc::new(algebraic_to_symbolic(&binary.right)),
degree_multiple: 0,
},
AlgebraicBinaryOperator::Pow => {
// Assuming the right operand is a constant number
let base = algebraic_to_symbolic(&binary.left);
let exp = match *binary.right {
AlgebraicExpression::Number(n) => n,
_ => unimplemented!(),
};
if exp == T::ZERO {
SymbolicExpression::Constant(T::ONE)
} else {
let mut result = base.clone();
let mut remaining = exp - T::ONE;
while remaining != T::ZERO {
result = SymbolicExpression::Mul {
x: Arc::new(result),
y: Arc::new(base.clone()),
degree_multiple: 0,
};
remaining -= T::ONE;
}
result
}
}
},
AlgebraicExpression::UnaryOperation(unary) => match unary.op {
AlgebraicUnaryOperator::Minus => SymbolicExpression::Neg {
x: Arc::new(algebraic_to_symbolic(&unary.expr)),
degree_multiple: 0,
},
},
AlgebraicExpression::Reference(algebraic_reference) => {
let poly_id = algebraic_reference.poly_id;
let next = algebraic_reference.next as usize;
match poly_id.ptype {
PolynomialType::Committed => SymbolicExpression::Variable(SymbolicVariable::new(
Entry::Main {
part_index: 0,
offset: next,
},
poly_id.id as usize,
)),
PolynomialType::Constant => SymbolicExpression::Variable(SymbolicVariable::new(
Entry::Preprocessed { offset: next },
poly_id.id as usize,
)),
PolynomialType::Intermediate => todo!(),
}
}
AlgebraicExpression::PublicReference(_) => {
unimplemented!()
}
AlgebraicExpression::Challenge(ch) => SymbolicExpression::Variable(SymbolicVariable::new(
Entry::Challenge,
ch.id.try_into().unwrap(),
)),
}
}
pub fn symbolic_to_algebraic<T: PrimeField32, P: FieldElement>(
expr: &SymbolicExpression<T>,
columns: &[String],
) -> AlgebraicExpression<P> {
match expr {
SymbolicExpression::Constant(c) => {
AlgebraicExpression::Number(P::from_bytes_le(&c.as_canonical_u32().to_le_bytes()))
}
SymbolicExpression::Add { x, y, .. } => {
AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation {
left: Box::new(symbolic_to_algebraic(x, columns)),
right: Box::new(symbolic_to_algebraic(y, columns)),
op: AlgebraicBinaryOperator::Add,
})
}
SymbolicExpression::Sub { x, y, .. } => {
AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation {
left: Box::new(symbolic_to_algebraic(x, columns)),
right: Box::new(symbolic_to_algebraic(y, columns)),
op: AlgebraicBinaryOperator::Sub,
})
}
SymbolicExpression::Mul { x, y, .. } => {
AlgebraicExpression::BinaryOperation(AlgebraicBinaryOperation {
left: Box::new(symbolic_to_algebraic(x, columns)),
right: Box::new(symbolic_to_algebraic(y, columns)),
op: AlgebraicBinaryOperator::Mul,
})
}
SymbolicExpression::Neg { x, .. } => {
AlgebraicExpression::UnaryOperation(AlgebraicUnaryOperation {
expr: Box::new(symbolic_to_algebraic(x, columns)),
op: AlgebraicUnaryOperator::Minus,
})
}
SymbolicExpression::Variable(SymbolicVariable { entry, index, .. }) => match entry {
Entry::Main { offset, part_index } => {
assert_eq!(*part_index, 0);
let next = match offset {
0 => false,
1 => true,
_ => unimplemented!(),
};
let name = columns.get(*index).unwrap_or_else(|| {
panic!("Column index out of bounds: {index}\nColumns: {columns:?}");
});
AlgebraicExpression::Reference(AlgebraicReference {
name: name.clone(),
poly_id: PolyID {
id: *index as u64,
ptype: PolynomialType::Committed,
},
next,
})
}
_ => unimplemented!(),
},
SymbolicExpression::IsFirstRow => AlgebraicExpression::Reference(AlgebraicReference {
name: "is_first_row".to_string(),
poly_id: PolyID {
id: 0,
ptype: PolynomialType::Constant,
},
next: false,
}),
SymbolicExpression::IsLastRow => AlgebraicExpression::Reference(AlgebraicReference {
name: "is_last_row".to_string(),
poly_id: PolyID {
id: 1,
ptype: PolynomialType::Constant,
},
next: false,
}),
SymbolicExpression::IsTransition => AlgebraicExpression::Reference(AlgebraicReference {
name: "is_transition".to_string(),
poly_id: PolyID {
id: 2,
ptype: PolynomialType::Constant,
},
next: false,
}),
}
}
pub fn get_pil<F: PrimeField32>(
name: &str,
constraints: &SymbolicConstraints<F>,
columns: &Vec<String>,
public_values: Vec<String>,
) -> String {
let mut pil = format!(
"
namespace {name};
// Preamble
col fixed is_first_row = [1] + [0]*;
col fixed is_last_row = [0] + [1]*;
col fixed is_transition = [0] + [1]* + [0];
"
);
let bus_id_to_name = [
(0, "EXECUTION_BRIDGE"),
(1, "MEMORY"),
(2, "PC_LOOKUP"),
(3, "VARIABLE_RANGE_CHECKER"),
(6, "BITWISE_LOOKUP"),
(7, "TUPLE_RANGE_CHECKER"),
]
.into_iter()
.collect::<BTreeMap<_, _>>();
pil.push_str(
&bus_id_to_name
.iter()
.map(|(id, name)| format!(" let {name} = {id};"))
.join("\n"),
);
pil.push_str(
"
// Witness columns
",
);
// Declare witness columns
for column in columns {
pil.push_str(&format!(" col witness {column};\n"));
}
let bus_interactions_by_bus = constraints
.interactions
.iter()
.map(|interaction| (interaction.bus_index, interaction))
.into_group_map()
.into_iter()
// Use BTreeMap to sort by bus_index
.collect::<BTreeMap<_, _>>();
pil.push_str(
"
// Bus interactions (bus_index, fields, count)\n",
);
for (bus_index, interactions) in bus_interactions_by_bus {
let bus_name = bus_id_to_name
.get(&bus_index)
.unwrap_or_else(|| panic!("Bus index {bus_index} not found in bus_id_to_name"));
for interaction in interactions {
format_bus_interaction(&mut pil, interaction, columns, &public_values, bus_name);
}
pil.push('\n');
}
pil.push_str(" // Constraints\n");
for constraint in &constraints.constraints {
pil.push_str(&format!(
" {} = 0;\n",
format_expr(constraint, columns, &public_values)
));
}
pil
}
fn format_bus_interaction<F: PrimeField32>(
pil: &mut String,
interaction: &Interaction<SymbolicExpression<F>>,
columns: &[String],
public_values: &[String],
bus_name: &str,
) {
let Interaction { message, count, .. } = interaction;
// We do not know what is a send or a receive
let function_name = "bus_interaction";
pil.push_str(&format!(
" std::protocols::bus::{}({bus_name}, [{}], {});\n",
function_name,
message
.iter()
.map(|value| format_expr(value, columns, public_values))
.collect::<Vec<String>>()
.join(", "),
format_expr(count, columns, public_values)
));
}
fn format_expr<F: PrimeField32>(
expr: &SymbolicExpression<F>,
columns: &[String],
// TODO: Implement public references
_public_values: &[String],
) -> String {
symbolic_to_algebraic::<_, BabyBearField>(expr, columns).to_string()
}

View File

@@ -49,10 +49,11 @@ fn build_tests(kind: &str, dir: &str, sub_dir: &str, name: &str) {
.strip_suffix(&format!(".{kind}"))
{
println!("cargo:rerun-if-changed={full_dir}/{relative_name}");
let ignore = SLOW_LIST
.contains(&test_name)
.then_some("#[ignore = \"Too slow\"]")
.unwrap_or_default();
let ignore = if SLOW_LIST.contains(&test_name) {
"#[ignore = \"Too slow\"]"
} else {
Default::default()
};
write!(
test_file,
r#"

View File

@@ -169,8 +169,8 @@ pub fn gen_estark_proof_with_backend_variant(
let publics: Vec<GoldilocksField> = pipeline
.publics()
.iter()
.map(|(_name, v)| v.expect("all publics should be known since we created a proof"))
.values()
.map(|v| v.expect("all publics should be known since we created a proof"))
.collect();
pipeline.verify(&proof, &[publics]).unwrap();
@@ -270,8 +270,8 @@ pub fn gen_halo2_proof(pipeline: Pipeline<Bn254Field>, backend: BackendVariant)
let publics: Vec<Bn254Field> = pipeline
.publics()
.iter()
.map(|(_name, v)| v.expect("all publics should be known since we created a proof"))
.values()
.map(|v| v.expect("all publics should be known since we created a proof"))
.collect();
pipeline.verify(&proof, &[publics]).unwrap();
@@ -304,8 +304,8 @@ pub fn test_plonky3_with_backend_variant<T: FieldElement>(
let publics: Vec<T> = pipeline
.publics()
.iter()
.map(|(_name, v)| v.expect("all publics should be known since we created a proof"))
.values()
.map(|v| v.expect("all publics should be known since we created a proof"))
.collect();
pipeline.verify(&proof, &[publics.clone()]).unwrap();
@@ -351,8 +351,8 @@ pub fn test_plonky3_pipeline<T: FieldElement>(pipeline: Pipeline<T>) {
let publics: Vec<T> = pipeline
.publics()
.iter()
.map(|(_name, v)| v.expect("all publics should be known since we created a proof"))
.values()
.map(|v| v.expect("all publics should be known since we created a proof"))
.collect();
pipeline.verify(&proof, &[publics.clone()]).unwrap();

View File

@@ -263,6 +263,7 @@ fn stwo_fixed_columns() {
}
#[test]
#[should_panic = "The composition polynomial OODS value does not match the trace OODS values"]
fn stwo_stage1_publics() {
let f = "pil/stage1_publics.pil";
test_stwo_stage1_public(

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2024-12-17"
channel = "nightly-2025-05-14"

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2024-12-17"
channel = "nightly-2025-05-14"

View File

@@ -254,8 +254,8 @@ impl Session {
let pubs: Vec<u32> = self
.pipeline
.publics()
.iter()
.map(|(_, v)| v.unwrap().to_integer().try_into_u32().unwrap())
.values()
.map(|v| v.unwrap().to_integer().try_into_u32().unwrap())
.collect();
pubs.try_into().expect("There should be exactly 8 publics")
}

View File

@@ -264,7 +264,7 @@ impl<F: FieldElement> SubmachineTrace<F> {
fn push_row(&mut self) {
self.selectors.values_mut().for_each(|v| v.push(0.into()));
self.values
.extend(std::iter::repeat(F::from(0)).take(self.cols.len()));
.extend(std::iter::repeat_n(F::from(0), self.cols.len()));
}
/// Push a dummy block to the trace.

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf", "riscv32im-risc0-zkvm-elf"]
profile = "minimal"

View File

@@ -1,6 +1,5 @@
#![no_std]
#![feature(
start,
alloc_error_handler,
maybe_uninit_write_slice,
round_char_boundary,

View File

@@ -48,7 +48,7 @@ fn render_memory_hash<F: FieldElement>(hash: &[F]) -> String {
/// # Arguments
/// - `pipeline`: The pipeline that should be the starting point for all the chunks.
/// - `pipeline_callback`: A function that will be called for each chunk. It will be passed a prepared `pipeline`,
/// with all chunk-specific information set (witness, fixed cols, inputs, optimized pil)
/// with all chunk-specific information set (witness, fixed cols, inputs, optimized pil)
/// - `bootloader_inputs`: The inputs to the bootloader and the index of the row at which the shutdown routine
/// is supposed to execute, for each chunk, as returned by `rust_continuations_dry_run`.
pub fn rust_continuations<F: FieldElement, PipelineCallback, E>(

View File

@@ -367,7 +367,7 @@ fn build_cargo_command(
let mut args: Vec<&OsStr> = as_ref![
OsStr;
"+nightly-2024-08-01",
"+nightly-2025-05-14",
"build",
"--release",
"--target-dir",

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32im-risc0-zkvm-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32im-risc0-zkvm-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,4 +1,4 @@
[toolchain]
channel = "nightly-2024-08-01"
channel = "nightly-2025-05-14"
targets = ["riscv32imac-unknown-none-elf"]
profile = "minimal"

View File

@@ -1,2 +1,2 @@
[toolchain]
channel = "nightly-2024-12-17"
channel = "nightly-2025-05-14"