Files
linea-monorepo/prover/utils/csvtraces/csvtraces.go
AlexandreBelling 7325f38c88 Prover: beta v1.2 integration changes (#692)
* bump go-corset

* fix compile errors

* constraints: bump to v0.1.0-rc1 for beta-v1.2

* bump to latest go-corset

* constraints: bump to beta-v1.2 / v0.1.0-rc2

* bump go-corset

* bump zkevm bin

* use next power of two value for non-power of two size columns (e.g., MMIO)

* remove a check for the power of two size

* bump corset to 9.7.18

* bump zkevm.bin

* bump corset to v9.7.18

* update zkevm.bin

* added interleaved to the compilediop columns

* adjusted size for corset columns

* Prover/Codehash Non Power of Two Column Size (#618)

* Revert "adjusted size for corset columns"

This reverts commit b1a7319fa586319a04ba57f421f10b55492124ff.

* fixed bug and added panic message for a non power of two size column

* removing panic

* reinsteaded the panic

---------

Co-authored-by: gusiri <dreamerty@postech.ac.kr>

* adjusted size for corset columns

* constraints: bump to beta v1.2/v0.1.0-rc3

* update constraints version to rc3

* bump to latest go-corset

* apply hotfix for BLOCKDATA

* move NextPowerOfTwo unit test to utils

* add logs for adjusted columns with non-power of two size

* turn off trace version check

* fix golangcli-lint

* Prover/fix public input timestamps from new blockdata (#644)

* updated timestamp fetcher and arithmetization mock data for unit testing.

* fix(codehash): uses 0x0 for the codehash of non-existing accounts instead of the default EOA codehash

* fix(mimccodehash): unimport the rom codehash for initialization code

* fixup(execDataHash): revert the exec-data hash check

* timestamp byte change

* fix(execdatahash): adds the real blockhash in the execdata hash instead of 0x0

* fixup previous commit

* fixup(build): removes imports

* Revert "fixup(execDataHash): revert the exec-data hash check"

This reverts commit eb8d984e13fab627a853dc98b2c94980a7eed0b3.

* fix(consistency): adds a smaller size to the consistency module

* feat(mimc): alex -- mimc simplification -- start

* optimize factorExpression

* feat(exec): uses the ProveCheck in the execution proof

* Revert "feat(mimc): alex -- mimc simplification -- start"

This reverts commit 184771b92746070dedb5ca356ed81f989a3daea5.

* fix (public-input): changed the hashing method to match compression

* perf(mem): adds a detector for constant regular column.

* fixup(mem): support the edge-case for smartvectors of size 1

* fix(codehash): support the case where the ROM is empty

* feat(csv): adds a feature to rename columns when using fmtcsv

* fixup(codehash): supports the case where no codehash are available

* test(codehash): adds test for empty rom or statesummary

* fix(ss-connect): skip the integration connector test

---------

Co-authored-by: gusiri <dreamerty@postech.ac.kr>
Co-authored-by: Soleimani193 <azam.soleimanian@ens.fr>
Co-authored-by: Arijit Dutta <37040536+arijitdutta67@users.noreply.github.com>
Co-authored-by: Bogdan Ursu <bogdanursuoffice@gmail.com>
2025-02-17 12:46:07 +01:00

300 lines
6.9 KiB
Go

// Package csvtraces provides a way to read and write traces in CSV format.
package csvtraces
import (
"encoding/csv"
"fmt"
"io"
"os"
"strings"
"github.com/consensys/linea-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/linea-monorepo/prover/maths/field"
"github.com/consensys/linea-monorepo/prover/protocol/ifaces"
"github.com/consensys/linea-monorepo/prover/protocol/wizard"
"github.com/consensys/linea-monorepo/prover/utils"
)
type cfg struct {
// The number of rows in the trace
nbRows int
skipPrePaddingZero bool
filterOn ifaces.Column
inHex bool
renameCols []string
}
type Option func(*cfg) error
// WithNbRows sets the number of rows in the trace
func WithNbRows(nbRows int) Option {
return func(c *cfg) error {
c.nbRows = nbRows
return nil
}
}
// SkipPrepaddingZero skips the zeroes at the beginning of the file
func SkipPrepaddingZero(c *cfg) error {
c.skipPrePaddingZero = true
return nil
}
// FilterOn sets the CSV printer to ignore rows where the provided filter
// column is zero.
func FilterOn(col ifaces.Column) Option {
return func(c *cfg) error {
c.filterOn = col
return nil
}
}
// InHex sets the CSV printer to print the values in hexadecimal
func InHex(c *cfg) error {
c.inHex = true
return nil
}
// RenameCols rename the columns in the csv
func RenameCols(s ...string) Option {
return func(c *cfg) error {
c.renameCols = s
return nil
}
}
type CsvTrace struct {
mapped map[string][]field.Element
nbRows int
}
func MustOpenCsvFile(fName string) *CsvTrace {
f, err := os.Open(fName)
if err != nil {
utils.Panic("%v", err.Error())
}
defer f.Close()
ct, err := NewCsvTrace(f)
if err != nil {
utils.Panic("could not parse CSV: %v", err.Error())
}
return ct
}
// FmtCsv is a utility function that can be used in order to print a set of column
// in a csv format so that debugging and testcase generation are simpler.
func FmtCsv(w io.Writer, run *wizard.ProverRuntime, cols []ifaces.Column, options []Option) error {
var (
header = []string{}
assignment = [][]field.Element{}
cfg = cfg{}
foundNonZero = false
filterCol []field.Element
)
for _, op := range options {
op(&cfg)
}
if cfg.renameCols != nil && len(cfg.renameCols) != len(cols) {
utils.Panic("provided %v columns, but also provided %v name replacements", len(cols), len(cfg.renameCols))
}
for i := range cols {
if cfg.renameCols != nil {
header = append(header, cfg.renameCols[i])
} else {
header = append(header, string(cols[i].GetColID()))
}
assignment = append(assignment, cols[i].GetColAssignment(run).IntoRegVecSaveAlloc())
}
fmt.Fprintf(w, "%v\n", strings.Join(header, ","))
if cfg.filterOn != nil {
filterCol = cfg.filterOn.GetColAssignment(run).IntoRegVecSaveAlloc()
}
for r := range assignment[0] {
var (
fmtVals = []string{}
allZeroes = true
)
for c := range assignment {
if !assignment[c][r].IsZero() {
allZeroes = false
}
fmtVals = append(fmtVals, fmtFieldElement(cfg.inHex, assignment[c][r]))
}
if !allZeroes {
foundNonZero = true
}
if filterCol != nil && filterCol[r].IsZero() {
continue
}
if !cfg.skipPrePaddingZero || !allZeroes || foundNonZero {
fmt.Fprintf(w, "%v\n", strings.Join(fmtVals, ","))
}
}
return nil
}
func NewCsvTrace(r io.Reader, opts ...Option) (*CsvTrace, error) {
cfg := &cfg{}
for _, opt := range opts {
if err := opt(cfg); err != nil {
return nil, err
}
}
rr := csv.NewReader(r)
rr.FieldsPerRecord = 0
data := make(map[string][]field.Element)
header, err := rr.Read()
if err != nil {
return nil, fmt.Errorf("read header row: %w", err)
}
for _, h := range header {
data[h] = make([]field.Element, 0)
}
var nbRows int
for row, err := rr.Read(); err != io.EOF; row, err = rr.Read() {
if err != nil {
return nil, fmt.Errorf("read row: %w", err)
}
for i, h := range header {
data[h] = append(data[h], field.NewFromString(row[i]))
}
nbRows++
}
if cfg.nbRows != 0 {
if cfg.nbRows < nbRows {
return nil, fmt.Errorf("invalid number of rows: %d", cfg.nbRows)
}
nbRows = cfg.nbRows
}
return &CsvTrace{mapped: data, nbRows: nbRows}, nil
}
func (c *CsvTrace) Get(name string) []field.Element {
val, ok := c.mapped[name]
if !ok {
utils.Panic("column not found %s", name)
}
return val
}
func (c *CsvTrace) GetCommit(b *wizard.Builder, name string) ifaces.Column {
if _, ok := c.mapped[name]; !ok {
utils.Panic("column not found %s", name)
}
length := utils.NextPowerOfTwo(c.nbRows)
col := b.RegisterCommit(ifaces.ColID(name), length)
return col
}
func (c *CsvTrace) Assign(run *wizard.ProverRuntime, names ...string) {
length := utils.NextPowerOfTwo(c.nbRows)
for _, k := range names {
if v, ok := c.mapped[k]; ok {
sv := smartvectors.RightZeroPadded(v, length)
run.AssignColumn(ifaces.ColID(k), sv)
} else {
utils.Panic("column not found %s", k)
}
}
}
func (c *CsvTrace) CheckAssignment(run *wizard.ProverRuntime, names ...string) {
for _, name := range names {
c.checkAssignment(run, name)
}
}
func (c *CsvTrace) checkAssignment(run *wizard.ProverRuntime, name string) {
colId := ifaces.ColID(name)
assigned := run.Spec.Columns.GetHandle(colId)
c.CheckAssignmentColumn(run, name, assigned)
}
func (c *CsvTrace) CheckAssignmentColumn(run *wizard.ProverRuntime, name string, col ifaces.Column) {
var (
stored, ok = c.mapped[name]
assigned = col.GetColAssignment(run)
fullLength = utils.NextPowerOfTwo(c.nbRows)
)
if !ok {
utils.Panic("column not found in CSV: %s", name)
}
if assigned.Len() < fullLength {
utils.Panic("column %s has not been assigned with the expected length, found %v in CSV and %v in wizard", name, fullLength, assigned.Len())
}
vec := assigned.IntoRegVecSaveAlloc()
for i := 0; i < c.nbRows; i++ {
if vec[i].Cmp(&stored[i]) != 0 {
utils.Panic("column %s has not been assigned correctly: row %d CSV=%s got Wizard=%s", name, i, stored[i].String(), vec[i].String())
}
}
for i := c.nbRows; i < assigned.Len(); i++ {
if !vec[i].IsZero() {
utils.Panic("column %s is not properly zero-padded", name)
}
}
}
func (c *CsvTrace) Len() int {
return c.nbRows
}
func (c *CsvTrace) LenPadded() int {
return utils.NextPowerOfTwo(c.nbRows)
}
// WritesExplicit format value-provided columns into a csv file. Unlike [FmtCsv]
// it does not need the columns to be registered as the assignmet of a wizard.
// It is suitable for test-case generation.
func WriteExplicit(w io.Writer, names []string, cols [][]field.Element, inHex bool) {
fmt.Fprintf(w, "%v\n", strings.Join(names, ","))
for i := range cols[0] {
row := []string{}
for j := range cols {
row = append(row, fmtFieldElement(inHex, cols[j][i]))
}
fmt.Fprintf(w, "%v\n", strings.Join(row, ","))
}
}
func fmtFieldElement(inHex bool, x field.Element) string {
if inHex || (x.IsUint64() && x.Uint64() < 1<<10) {
return x.String()
}
return "0x" + x.Text(16)
}