Merge pull request #335 from powdr-org/heap-placement-hack

Heap placement hack
This commit is contained in:
chriseth
2023-06-15 17:02:00 +02:00
committed by GitHub
3 changed files with 31 additions and 6 deletions

View File

@@ -9,7 +9,7 @@ use crate::witgen::affine_expression::AffineResult;
use crate::witgen::util::is_simple_poly_of_name;
use crate::witgen::{EvalError, EvalResult, FixedData};
use crate::witgen::{EvalValue, IncompleteCause};
use number::FieldElement;
use number::{DegreeType, FieldElement};
use pil_analyzer::{Expression, Identity, IdentityKind, PolynomialReference, SelectedExpressions};
@@ -17,6 +17,7 @@ use pil_analyzer::{Expression, Identity, IdentityKind, PolynomialReference, Sele
#[derive(Default)]
pub struct DoubleSortedWitnesses<T> {
degree: DegreeType,
//key_col: String,
/// Position of the witness columns in the data.
/// The key column has a position of usize::max
@@ -33,7 +34,7 @@ struct Operation<T> {
impl<T: FieldElement> DoubleSortedWitnesses<T> {
pub fn try_new(
_fixed_data: &FixedData<T>,
fixed_data: &FixedData<T>,
_identities: &[&Identity<T>],
witness_cols: &HashSet<&PolynomialReference>,
) -> Option<Box<Self>> {
@@ -54,7 +55,10 @@ impl<T: FieldElement> DoubleSortedWitnesses<T> {
.next()
.is_none()
{
Some(Box::default())
Some(Box::new(Self {
degree: fixed_data.degree,
..Default::default()
}))
} else {
None
}
@@ -174,6 +178,13 @@ impl<T: FieldElement> DoubleSortedWitnesses<T> {
left[0], right.expressions[0]
)
})?;
if addr.to_degree() >= self.degree {
return Err(format!(
"Memory access to too large address: 0x{addr:x} (must be less than 0x{:x})",
self.degree
)
.into());
}
let step = left[1]
.constant_value()
.ok_or_else(|| format!("Step must be known: {} = {}", left[1], right.expressions[1]))?;

View File

@@ -8,9 +8,12 @@ use core::{
ptr::{self, addr_of},
};
// Force C representation so that the large buffer is at the end.
// This might avoid access to memory with large gaps.
#[repr(C)]
struct FixedMemoryAllocator<const SIZE: usize> {
mem_buffer: [u8; SIZE],
next_available: Cell<usize>,
mem_buffer: [u8; SIZE],
}
impl<const SIZE: usize> FixedMemoryAllocator<SIZE> {

View File

@@ -13,7 +13,7 @@ pub fn compile_riscv_asm(mut assemblies: BTreeMap<String, String>) -> String {
// stack grows towards zero
let stack_start = 0x10000;
// data grows away from zero
let data_start = 0x20000;
let data_start = 0x10100;
assert!(assemblies
.insert("__runtime".to_string(), runtime().to_string())
@@ -25,7 +25,7 @@ pub fn compile_riscv_asm(mut assemblies: BTreeMap<String, String>) -> String {
.map(|(name, contents)| (name, parser::parse_asm(&contents)))
.collect(),
);
let (mut objects, object_order) = data_parser::extract_data_objects(&statements);
let (mut objects, mut object_order) = data_parser::extract_data_objects(&statements);
// Reduce to the code that is actually reachable from main
// (and the objects that are referred from there)
@@ -35,6 +35,17 @@ pub fn compile_riscv_asm(mut assemblies: BTreeMap<String, String>) -> String {
replace_dynamic_label_references(&mut statements, &objects);
// Sort the objects according to the order of the names in object_order.
// With the single exception: If there is large object, put that at the end.
// The idea behind this is that there might be a single gigantic object representing the heap
// and putting that at the end should keep memory addresses small.
let mut large_objects = objects
.iter()
.filter(|(_name, data)| data.iter().map(|d| d.size()).sum::<usize>() > 0x2000);
if let (Some((heap, _)), None) = (large_objects.next(), large_objects.next()) {
let heap_pos = object_order.iter().position(|o| o == heap).unwrap();
object_order.remove(heap_pos);
object_order.push(heap.clone());
};
let sorted_objects = object_order
.into_iter()
.filter_map(|n| {