Prover/ExecutionDataCollector (#3768)

* Update MIMC_hasher.go
* hashNum and Ct columns
* converse transitions
* clean: deimport the importation module and do a bit of renaming
* remove execdatahashing

---------

Co-authored-by: AlexandreBelling <alexandrebelling8@gmail.com>
This commit is contained in:
Bogdan Ursu
2024-08-06 15:28:45 +02:00
committed by GitHub
parent 8fb5c34cef
commit b92a86bc70
17 changed files with 800 additions and 443 deletions

View File

@@ -32,7 +32,7 @@ type ImportAndPadInputs struct {
// columns.
type importation struct {
// Inputs tracks the input structure used for instantiating this [importation]
// Inputs tracks the input structure used for instantiating this [Importation]
Inputs ImportAndPadInputs
HashNum ifaces.Column // identifier for the hash the current limb belongs to
Limbs ifaces.Column // limbs declared by the current row
@@ -51,7 +51,7 @@ type importation struct {
}
// importationAssignmentBuilder is a utility struct used to build an assignment
// for the importation module. It is an internal of the package and is called
// for the Importation module. It is an internal of the package and is called
// in the [importation.Run] function.
type importationAssignmentBuilder struct {
HashNum *common.VectorBuilder
@@ -75,7 +75,7 @@ type padderAssignmentBuilder interface {
padAndAssign(run *wizard.ProverRuntime)
}
// ImportAndPad defines and constrains the importation and the padding of a
// ImportAndPad defines and constrains the Importation and the padding of a
// group of generic byte module following a prespecified padding strategy.
func ImportAndPad(comp *wizard.CompiledIOP, inp ImportAndPadInputs, numRows int) *importation {
@@ -180,7 +180,7 @@ func ImportAndPad(comp *wizard.CompiledIOP, inp ImportAndPadInputs, numRows int)
return res
}
// Run performs the assignment of the importation module.
// Run performs the assignment of the Importation module.
func (imp *importation) Run(run *wizard.ProverRuntime) {
var (

View File

@@ -7,8 +7,8 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
)
// It prints the importation columns, it is not safe to be used directly for test.
// Rather the csv file for importation columns should be filled up manually as the expected values.
// It prints the Importation columns, it is not safe to be used directly for test.
// Rather the csv file for Importation columns should be filled up manually as the expected values.
// The function is kept here for reproducibility.
//
//lint:ignore U1000 Ignore unused function temporarily for debugging

View File

@@ -56,7 +56,7 @@ func makeTestCaseCLDModule(uc generic.HashingUsecase) (
// assign the importation columns
assignImportationColumns(run, &imported, numHash, blockSize, size)
// assign all the packing module.
// assign all the Packing module.
ctx.assignCleanLimbs(run)
decomposed.Assign(run)
}

View File

@@ -1,4 +1,4 @@
// Packing package implements the utilities for packing
// Packing package implements the utilities for Packing
// the limbs of variable length to the lanes of fixed length.
package packing
@@ -28,16 +28,16 @@ const (
BLOCK = "BLOCK"
)
// Importaion implements the set of required columns for launching the packing module.
// Importaion implements the set of required columns for launching the Packing module.
type Importation struct {
// The set of the limbs that are subject to packing (i.e., should be pushed into the pack).
// The set of the limbs that are subject to Packing (i.e., should be pushed into the pack).
// Limbs are 16 bytes, left aligned.
Limb ifaces.Column
// It is 1 if the associated limb is the first limb of the new hash.
IsNewHash ifaces.Column
// NByte is the meaningful length of limbs in byte.
// \Sum NByte[i] should divide the blockSize,
// otherwise a padding step is required before calling the packing module
// otherwise a padding step is required before calling the Packing module
NByte ifaces.Column
// The active part of the columns in the Importation module
IsActive ifaces.Column
@@ -54,26 +54,26 @@ type PackingInput struct {
Imported Importation
}
// packing implements the [wizard.ProverAction] receiving the limbs and relevant parameters,
// Packing implements the [wizard.ProverAction] receiving the limbs and relevant parameters,
//
// it repack them in the lanes of the same size.
type packing struct {
type Packing struct {
Inputs PackingInput
// submodules
Cleaning cleaningCtx
LookUps lookUpTables
Decomposed decomposition
// it stores the result of the packing
// it stores the result of the Packing
// limbs are repacked in Lane column.
Repacked laneRepacking
Block block
}
/*
NewPack creates a packing object.
NewPack creates a Packing object.
The lanes and relevant metadata can be accessed via packing.Repacked.
The lanes and relevant metadata can be accessed via Packing.Repacked.
It panics if the columns in Imported have a size different from;
@@ -81,7 +81,7 @@ It panics if the columns in Imported have a size different from;
It also panics if the number of generated blocks passes the limit inp.maxNumBlocks
*/
func NewPack(comp *wizard.CompiledIOP, inp PackingInput) *packing {
func NewPack(comp *wizard.CompiledIOP, inp PackingInput) *Packing {
var (
isNewHash = inp.Imported.IsNewHash
lookup = NewLookupTables(comp)
@@ -92,7 +92,7 @@ func NewPack(comp *wizard.CompiledIOP, inp PackingInput) *packing {
block = newBlock(comp, getBlockInputs(lanes, inp.PackingParam))
)
return &packing{
return &Packing{
Inputs: inp,
Cleaning: cleaning,
Decomposed: decomposed,
@@ -101,8 +101,8 @@ func NewPack(comp *wizard.CompiledIOP, inp PackingInput) *packing {
}
}
// Run assign the packing module.
func (pck *packing) Run(run *wizard.ProverRuntime) {
// Run assign the Packing module.
func (pck *Packing) Run(run *wizard.ProverRuntime) {
// assign subModules
pck.Cleaning.Assign(run)
@@ -111,7 +111,7 @@ func (pck *packing) Run(run *wizard.ProverRuntime) {
pck.Block.Assign(run)
}
// it stores the inputs /outputs of spaghettifier used in the packing module.
// it stores the inputs /outputs of spaghettifier used in the Packing module.
type spaghettiCtx struct {
// ContentSpaghetti
decLimbSp, decLenSp, decLenPowerSp ifaces.Column

View File

@@ -29,7 +29,7 @@ func makeTestCasePackingModule(uc generic.HashingUsecase) (
)
imported := Importation{}
pck := &packing{}
pck := &Packing{}
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
@@ -50,7 +50,7 @@ func makeTestCasePackingModule(uc generic.HashingUsecase) (
// assign the importation columns
assignImportationColumns(run, &imported, numHash, blockSize, size)
// assign all the packing module.
// assign all the Packing module.
pck.Run(run)
}
return define, prover

View File

@@ -1,190 +0,0 @@
package execdatahashing
import (
"fmt"
"strings"
"github.com/consensys/zkevm-monorepo/prover/crypto/mimc"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/query"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
)
// ExecDataHashingInput collect the inputs of the presently-specified [execDataHashing]
// module.
type ExecDataHashingInput struct {
// Data is a column storing the execution data bytes in the following format
// - Each field element stores 31 bytes, this is a requirement for the MiMC
// hash function.
// - The input is prepadded.
Data ifaces.Column
// Selector is a binary indicator column indicating whether the current row
// of Data corresponds actually to a 31 bytes limb. The rows marked with a
// zero are ignored by the present hashing module.
Selector ifaces.Column
}
// execData stores the prover context for hashing the execution data. Its role
// is to coordinate the MiMC module. The structure is also responsible for
// assigned its internal columns, it implements the [wizard.ProverAction]
// interface.
//
// The structure is to be constructed by the caller of the [HashExecutionData]
// function and all provided columns must have the same length.
type execData struct {
// Inputs stores the [ExecDataHashingInput] used to generate the module
Inputs *ExecDataHashingInput
// InitState stores the initial state of the compression function of MiMC
InitState ifaces.Column
// NextState stores the result of the compression function
NextState ifaces.Column
// FinalHash stores the latest value of the digest and can be local opened
// on the last position to obtain de final value.
FinalHash ifaces.Column
// FinalOpening opens the last position of FinalHash and returns the
// resulting value of the digest.
FinalHashOpening query.LocalOpening
}
// HashExecutionData generates the execution data hashing components: the relevant
// constraints and returns a [wizard.ProverAction] object that can be used to
// assign the internal columns.
func HashExecutionData(comp *wizard.CompiledIOP, edhi *ExecDataHashingInput) (ed *execData) {
// size denotes the size of the internal columns
var size = edhi.Data.Size()
ed = &execData{
Inputs: edhi,
InitState: comp.InsertCommit(0, deriveName[ifaces.ColID]("INIT_STATE"), size),
NextState: comp.InsertCommit(0, deriveName[ifaces.ColID]("NEW_STATE"), size),
FinalHash: comp.InsertCommit(0, deriveName[ifaces.ColID]("FINAL_HASH"), size),
}
// When the selector is zero, then the newState and the oldState are the
// same as the previous row.
comp.InsertGlobal(0,
deriveName[ifaces.QueryID]("OLD_STATE_ON_UNSELECTED"),
sym.Sub(
ed.InitState,
sym.Mul(
sym.Sub(1, column.Shift(ed.Inputs.Selector, -1)),
column.Shift(ed.InitState, -1),
),
sym.Mul(
column.Shift(ed.Inputs.Selector, -1),
column.Shift(ed.NextState, -1),
),
),
)
comp.InsertGlobal(0,
deriveName[ifaces.QueryID]("FINAL_HASH_CORRECTNESS"),
sym.Sub(
ed.FinalHash,
sym.Mul(
sym.Sub(1, ed.Inputs.Selector),
column.Shift(ed.FinalHash, -1),
),
sym.Mul(
ed.Inputs.Selector,
ed.NextState,
),
),
)
comp.InsertMiMC(0,
deriveName[ifaces.QueryID]("MIMC_COMPRESSION"),
ed.Inputs.Data,
ed.InitState,
ed.NextState,
)
ed.FinalHashOpening = comp.InsertLocalOpening(0,
deriveName[ifaces.QueryID]("FINAL_OPENING"),
column.Shift(ed.FinalHash, -1),
)
comp.InsertLocal(0,
deriveName[ifaces.QueryID]("INITIAL_INIT_STATE"),
sym.NewVariable(ed.InitState),
)
return ed
}
// Run implements the [wizard.ProverAction] interface. It is responsible for
// assigning the internal columns of ed.
func (ed *execData) Run(run *wizard.ProverRuntime) {
var (
selector = ed.Inputs.Selector.GetColAssignment(run)
data = ed.Inputs.Data.GetColAssignment(run)
size = ed.Inputs.Data.Size()
initState = make([]field.Element, 0, size)
newState = make([]field.Element, 0, size)
finalHash = make([]field.Element, 0, size)
)
for row := 0; row < size; row++ {
if row == 0 {
initState = append(initState, field.Zero())
}
if row > 0 {
prevSel := selector.Get(row - 1)
if prevSel.IsOne() {
initState = append(initState, newState[row-1])
}
if prevSel.IsZero() {
initState = append(initState, initState[row-1])
}
}
newState = append(newState,
mimc.BlockCompression(
initState[row],
data.Get(row),
),
)
currSel := selector.Get(row)
if currSel.IsZero() && row == 0 {
// We use zero as initial value this is just a filler, that is
// unconstrained. It would also work with any other value but this
// is fine as long execution data is never empty which is our case.
finalHash = append(finalHash, field.Zero())
}
if currSel.IsZero() && row > 0 {
finalHash = append(finalHash, finalHash[row-1])
}
if currSel.IsOne() {
finalHash = append(finalHash, newState[row])
}
}
run.AssignColumn(ed.InitState.GetColID(), smartvectors.NewRegular(initState))
run.AssignColumn(ed.NextState.GetColID(), smartvectors.NewRegular(newState))
run.AssignColumn(ed.FinalHash.GetColID(), smartvectors.NewRegular(finalHash))
run.AssignLocalPoint(ed.FinalHashOpening.ID, finalHash[size-1])
}
func deriveName[S ~string](args ...any) S {
fmtArgs := []string{"HASHING_EXECUTION_DATA"}
for _, arg := range args {
fmtArgs = append(fmtArgs, fmt.Sprintf("%v", arg))
}
return S(strings.Join(fmtArgs, "_"))
}

View File

@@ -1,69 +0,0 @@
package execdatahashing
import (
"testing"
"github.com/consensys/zkevm-monorepo/prover/crypto/mimc"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils/csvtraces"
"github.com/stretchr/testify/assert"
)
func TestCorrectness(t *testing.T) {
var (
inp *ExecDataHashingInput
mod *execData
inpCt = csvtraces.MustOpenCsvFile("testdata/input.csv")
)
comp := wizard.Compile(func(build *wizard.Builder) {
inp = &ExecDataHashingInput{
Data: inpCt.GetCommit(build, "DATA"),
Selector: inpCt.GetCommit(build, "SELECTOR"),
}
mod = HashExecutionData(build.CompiledIOP, inp)
}, dummy.Compile)
proof := wizard.Prove(comp, func(run *wizard.ProverRuntime) {
inpCt.Assign(run, "DATA", "SELECTOR")
mod.Run(run)
var (
fHash = manuallyComputeFinalHash(run, inp)
fHash2 = run.GetLocalPointEvalParams(mod.FinalHashOpening.ID).Y
)
assert.Equalf(t, fHash.String(), fHash2.String(), "the hash computed by the module is not the correct one")
})
if err := wizard.Verify(comp, proof); err != nil {
t.Fatal("proof failed", err)
}
t.Log("proof succeeded")
}
func manuallyComputeFinalHash(
run *wizard.ProverRuntime,
inp *ExecDataHashingInput,
) field.Element {
var (
data = inp.Data.GetColAssignment(run).IntoRegVecSaveAlloc()
selector = inp.Selector.GetColAssignment(run).IntoRegVecSaveAlloc()
actualInputs = make([]field.Element, 0)
)
for row := range selector {
if selector[row].IsOne() {
actualInputs = append(actualInputs, data[row])
}
}
return mimc.HashVec(actualInputs)
}

View File

@@ -1,17 +0,0 @@
DATA,SELECTOR
"456589098098","0"
"978575897","1"
"98908789","1"
"908908","0"
"32534657689","1"
"567563","1"
"5757698","1"
"0","0"
"0","0"
"0","0"
"0","0"
"0","0"
"0","0"
"0","0"
"0","0"
"0","0"
1 DATA SELECTOR
2 456589098098 0
3 978575897 1
4 98908789 1
5 908908 0
6 32534657689 1
7 567563 1
8 5757698 1
9 0 0
10 0 0
11 0 0
12 0 0
13 0 0
14 0 0
15 0 0
16 0 0
17 0 0

View File

@@ -146,61 +146,91 @@ const (
powBlockHash = "1" // 16 bytes when loading BlockHash, 2^(128-16*8)
powSenderAddrHi = "79228162514264337593543950336" // 4 bytes when loading SENDER ADDR HI, 2^(128-4*8)
powSenderAddrLo = "1" // 16 bytes bytes when loading SenderAddrLo, 2^(128-16*8)
hashNum = 1 // the constant hashNum value needed as an input for padding and packing
)
// The ExecutionDataCollector contains columns that encode the computation and fetching of appropriate data
// from several arithmetization fetchers.
type ExecutionDataCollector struct {
// BlockId ranges from 1 to the maximal number of blocks,
// AbsTxID is the absolute transaction ID, which is unique among all blocks in the conflation
// and also starts from 1.
// AbsTxIDMax is the ID of the last transaction in the conflated batch.
BlockID, AbsTxID, AbsTxIDMax ifaces.Column
Limb, NoBytes ifaces.Column
UnalignedLimb, AlignedPow ifaces.Column
TotalNoTxBlock ifaces.Column
IsActive ifaces.Column
// The Limb data and the number of bytes in the limb.
Limb, NoBytes ifaces.Column
// UnalignedLimb contains the raw data from the corresponding arithmetization fetcher.
// AlignedPow is the power that is used to multiply UnalignedLimb in order to obtain the Limb.
// which is done to ensure consistent formatting.
UnalignedLimb, AlignedPow ifaces.Column
// the total number of transactions inside the current block.
TotalNoTxBlock ifaces.Column
// indicator column, specifying when the module contains useful data
IsActive ifaces.Column
// indicator columns that light up as 1 when the corresponding value type is being loaded
IsNoTx, IsTimestamp, IsBlockHashHi, IsBlockHashLo, IsAddrHi, IsAddrLo, IsTxRLP ifaces.Column
// counter column, increases by 1 for every new active row. Not needed inside this module, but required
// for invoking the padding and packing modules.
Ct ifaces.Column
// HashNum is a constant column only needed for invoking the padding and packing modules.
HashNum ifaces.Column
// The FirstAbsTxID/LastAbsTxIDBlock contain the first/last absolute transactions ID inside the current block.
FirstAbsTxIDBlock, LastAbsTxIDBlock ifaces.Column
// lights up as 1 on the row that contains the last RLP limb of the current transaction.
EndOfRlpSegment ifaces.Column
// Selector columns
// SelectorBlockDiff[i]=1 if (edc.BlockId[i] = edc.BlockId[i+1]), used to enforce constancies when
// inside a block segment
SelectorBlockDiff ifaces.Column
ComputeSelectorBlockDiff wizard.ProverAction
// SelectorLastTxBlock[i]=1 if (edc.AbsTxID[i]=edc.LastAbsTxIDBlock[i]), used to enforce constraints
// to transition to the next block.
SelectorLastTxBlock ifaces.Column
ComputeSelectorLastTxBlock wizard.ProverAction
FirstAbsTxIDBlock, LastAbsTxIDBlock ifaces.Column
// SelectorEndOfAllTx[i]=1 if (edc.AbsTxID[i]=edc.AbsTxIDMax[i]), used to transition to the inactive
// part of the module.
SelectorEndOfAllTx ifaces.Column
ComputeSelectorEndOfAllTx wizard.ProverAction
EndOfRlpSegment ifaces.Column
// SelectorAbsTxIDDiff[i]=1 if (edc.AbsTxID[i]=edc.AbsTxID[i+1]), used to enforce constant constraints inside a transaction segment.
SelectorAbsTxIDDiff ifaces.Column
ComputeSelectorAbsTxIDDiff wizard.ProverAction
}
func NewLimbSummary(comp *wizard.CompiledIOP, name string, size int) ExecutionDataCollector {
// NewExecutionDataCollector instantiates an ExecutionDataCollector with unconstrained columns.
func NewExecutionDataCollector(comp *wizard.CompiledIOP, name string, size int) ExecutionDataCollector {
res := ExecutionDataCollector{
BlockID: util.CreateCol(name, "BLOCK_ID", size, comp),
AbsTxID: util.CreateCol(name, "ABS_TX_ID", size, comp),
AbsTxIDMax: util.CreateCol(name, "ABS_TX_ID_MAX", size, comp),
FirstAbsTxIDBlock: util.CreateCol(name, "FIRST_ABS_TX_ID_BLOCK", size, comp),
LastAbsTxIDBlock: util.CreateCol(name, "LAST_ABS_TX_ID_BLOCK", size, comp),
Limb: util.CreateCol(name, "LIMB", size, comp),
NoBytes: util.CreateCol(name, "NO_BYTES", size, comp),
UnalignedLimb: util.CreateCol(name, "UNALIGNED_LIMB", size, comp),
AlignedPow: util.CreateCol(name, "ALIGNED_POW", size, comp),
TotalNoTxBlock: util.CreateCol(name, "TOTAL_NO_TX_BLOCK", size, comp),
IsActive: util.CreateCol(name, "IS_ACTIVE", size, comp),
IsNoTx: util.CreateCol(name, "IS_NO_TX", size, comp),
IsBlockHashHi: util.CreateCol(name, "IS_BLOCK_HASH_HI", size, comp),
IsBlockHashLo: util.CreateCol(name, "IS_BLOCK_HASH_LO", size, comp),
IsTimestamp: util.CreateCol(name, "IS_TIMESTAMP", size, comp),
IsTxRLP: util.CreateCol(name, "IS_TX_RLP", size, comp),
IsAddrHi: util.CreateCol(name, "IS_ADDR_HI", size, comp),
IsAddrLo: util.CreateCol(name, "IS_ADDR_LO", size, comp),
EndOfRlpSegment: util.CreateCol(name, "END_OF_RLP_SEGMENT", size, comp),
Limb: util.CreateCol(name, "LIMB", size, comp),
NoBytes: util.CreateCol(name, "NO_BYTES", size, comp),
UnalignedLimb: util.CreateCol(name, "UNALIGNED_LIMB", size, comp),
AlignedPow: util.CreateCol(name, "ALIGNED_POW", size, comp),
TotalNoTxBlock: util.CreateCol(name, "TOTAL_NO_TX_BLOCK", size, comp),
IsActive: util.CreateCol(name, "IS_ACTIVE", size, comp),
IsNoTx: util.CreateCol(name, "IS_NO_TX", size, comp),
IsBlockHashHi: util.CreateCol(name, "IS_BLOCK_HASH_HI", size, comp),
IsBlockHashLo: util.CreateCol(name, "IS_BLOCK_HASH_LO", size, comp),
IsTimestamp: util.CreateCol(name, "IS_TIMESTAMP", size, comp),
IsTxRLP: util.CreateCol(name, "IS_TX_RLP", size, comp),
IsAddrHi: util.CreateCol(name, "IS_ADDR_HI", size, comp),
IsAddrLo: util.CreateCol(name, "IS_ADDR_LO", size, comp),
Ct: util.CreateCol(name, "CT", size, comp),
HashNum: util.CreateCol(name, "HASH_NUM", size, comp),
EndOfRlpSegment: util.CreateCol(name, "END_OF_RLP_SEGMENT", size, comp),
}
return res
}
func GetSummarySize(btm *fetch.BlockTxnMetadata, bdc *fetch.BlockDataCols, td *fetch.TxnData, rt *fetch.RlpTxn) int {
// GetSummarySize estimates a necessary upper bound on the ExecutionDataCollector columns
// we currently ignore the following modules btm *fetch.BlockTxnMetadata, bdc *fetch.BlockDataCols,
func GetSummarySize(td *fetch.TxnData, rt *fetch.RlpTxn) int {
// number of transactions, block timestamp, blockhash + for every transaction, sender address + transaction RLP limbs
size := td.Ct.Size()
if size < rt.Limb.Size() {
@@ -209,9 +239,11 @@ func GetSummarySize(btm *fetch.BlockTxnMetadata, bdc *fetch.BlockDataCols, td *f
return size
}
// DefineBlockIdCounterConstraints enforces that the BlockID starts from 1. BlockID can either increase by 1 or stay the same.
// Finally, BlockID is more finely constrained to only increase when edc.IsActive = 1, edc.SelectorLastTxBlock = 1,
// edc.EndOfRlpSegment = 1 and edc.SelectorEndOfAllTx = 0.
func DefineBlockIdCounterConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// Counter constraints
// First, the counter starts from 0
// BlockID starts from 1
comp.InsertLocal(0, ifaces.QueryIDf("%s_%v_COUNTER_START_LOCAL_CONSTRAINT", name, edc.BlockID.GetColID()),
sym.Mul(
edc.IsActive,
@@ -257,9 +289,11 @@ func DefineBlockIdCounterConstraints(comp *wizard.CompiledIOP, edc *ExecutionDat
)
}
// DefineAbsTxIdCounterConstraints concerns AbsTxId, which starts from 1 and subsequently, can either stay the same or increase by 1.
// AbsTxId will increase when edc.IsActive = 1 and edc.EndOfRlpSegment = 1 and edc.SelectorEndOfAllTx = 0.
// AbsTxId remains the same when edc.IsActive = 0 and edc.EndOfRlpSegment = 0 and edc.SelectorEndOfAllTx = 0.
func DefineAbsTxIdCounterConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// Counter constraints
// First, the counter starts from 1
// Counter constraints: first, the AbsTxID counter starts from 1
comp.InsertLocal(0, ifaces.QueryIDf("%s_%v_COUNTER_START_LOCAL_CONSTRAINT", name, edc.AbsTxID.GetColID()),
sym.Mul(
edc.IsActive,
@@ -322,8 +356,39 @@ func DefineAbsTxIdCounterConstraints(comp *wizard.CompiledIOP, edc *ExecutionDat
)
}
// DefineCtCounterConstraints constrains that edc.Ct starts from 0, and then remains the same or increases by 1.
func DefineCtCounterConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// First, the counter starts from 0.
comp.InsertLocal(0, ifaces.QueryIDf("%s_%v_COUNTER_START_LOCAL_CONSTRAINT", name, edc.Ct.GetColID()),
ifaces.ColumnAsVariable(edc.Ct),
)
// Secondly, the counter increases by 1 every time.
comp.InsertGlobal(0, ifaces.QueryIDf("%s_%s", name, "COUNTER_GLOBAL"),
sym.Mul(
edc.IsActive,
sym.Sub(edc.Ct,
column.Shift(edc.Ct, -1),
1,
),
),
)
}
// DefineHashNumConstraints requires that edc.HashNum is constantly equal to hashNum, currently hardcoded to 1.
func DefineHashNumConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
comp.InsertGlobal(0, ifaces.QueryIDf("%s_%s", name, "HASH_NUM_GLOBAL"),
sym.Mul(
edc.IsActive,
sym.Sub(edc.HashNum,
hashNum,
),
),
)
}
// DefineIndicatorOrder constrains the order of load operations.
func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// start with IsNoTx = 1
// the module starts with IsNoTx[0] = 1
comp.InsertLocal(0,
ifaces.QueryIDf("%s_START_WITH_IS_NO_TX", name),
sym.Sub(
@@ -333,6 +398,8 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
)
// IsNbTx -> isTimestamp
// From IsNbTx[i]=1, we can only transition to isTimestamp[i+1]=1 on the next row.
// Conversely, we have that isTimestamp[i+1]=1 implies that IsNbTx[i]=1.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_IS_NB_TX_TO_IS_TIMESTAMP", name),
sym.Sub(
@@ -342,6 +409,8 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
)
// isTimestamp -> isBlockHashHi
// From IsTimestamp[i]=1, we can only transition to IsBlockHashHi[i+1]=1 on the next row.
// Conversely, we have that IsBlockHashHi[i+1]=1 implies that IsTimestamp[i]=1.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_IS_NB_TX_TO_IS_BLOCKHASH_HI", name),
sym.Sub(
@@ -351,6 +420,8 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
)
// isBlockHashHi->isBlockHashLo
// From isBlockHashHi[i]=1, we can only transition to isBlockHashLo[i+1]=1 on the next row.
// Conversely, we have that isBlockHashLo[i+1]=1 implies that isBlockHashHi[i]=1.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_BLOCKHASH_HI_TO_BLOCKHASH_LO", name),
sym.Sub(
@@ -359,7 +430,10 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
),
)
// isBlockHashLO->IsAddr
// isBlockHashLO->IsAddrHi
// From IsBlockHashLo[i]=1, we can only transition to IsAddrHi[i+1]=1 on the next row.
// The converse direction does not necessarily hold,
// we do NOT have that IsAddrHi[i+1]=1 implies that IsBlockHashLo[i]=1.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_BLOCKHASH_LO_TO_IS_ADDR_HI", name),
sym.Mul(
@@ -372,6 +446,8 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
)
// IsAddrHi -> IsAddrLo
// From IsAddrHi[i]=1, we can only transition to IsAddrLo[i+1]=1 on the next row.
// Conversely, we have that IsAddrLo[i+1]=1 implies that IsAddrHi[i]=1.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_IS_ADDR_HI_TO_IS_ADDR_LO", name),
sym.Sub(
@@ -381,6 +457,9 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
)
// IsAddrLo -> IsTxRLP
// From IsAddrLo[i]=1, we can only transition to IsTxRLP[i+1]=1 on the next row.
// The converse direction does not necessarily hold,
// we do NOT have that IsTxRLP[i+1]=1 implies that IsAddrLo[i]=1.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_IS_ADDR_LO_TO_IS_TX_RLP", name),
sym.Mul(
@@ -393,6 +472,8 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
)
// IsTxRLP -> IsTxRLP while inside the transaction RLP segment
// If IsTxRLP[i]=1 and EndOfRlpSegment[i]=0, we can transition to IsTxRLP[i+1]=1 on the next row.
// we do NOT have that IsTxRLP[i+1]=1 implies that IsTxRLP[i]=1.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_IS_TX_RLP_REMAINS_1_INSIDE_RLP_SEGMENT", name),
sym.Mul(
@@ -402,11 +483,14 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
edc.IsTxRLP, // if IsTxRLP is 1 then on the next row IsTxRLP will be 1 if we are inside the block
sym.Sub(
1,
column.Shift(edc.IsTxRLP, 1),
column.Shift(edc.IsTxRLP, 1), // IsTxRLP=1 on the next row
),
),
)
// IsTxRLP -> IsNoTx, moving to the next block.
// If IsTxRLP[i]=1 and SelectorLastTxBlock[i]=1 and EndOfRlpSegment[i]=1 and SelectorEndOfAllTx[i]=0,
// we can transition to IsNoTx[i+1]=1 on the next row.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_IS_TX_RLP_TO_NEXT_BLOCK", name),
sym.Mul(
@@ -425,6 +509,9 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
),
)
// IsTxRLP -> IS_ADDR_HI, moving to the next transaction segment, and load the next sender address.
// If IsTxRLP[i]=1 and SelectorLastTxBlock[i]=0 and EndOfRlpSegment[i]=1,
// we can transition to IsAddrHi[i+1]=1 on the next row.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_IS_TX_RLP_TO_IS_ADDR_HI_GLOBAL_CONSTRAINT", name),
sym.Mul(
@@ -442,7 +529,7 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
),
)
// direct transition to inactive part when there are no overflow bytes
// from IsTxRLP=1, we can directly transition to the inactive part.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_IS_TX_RLP_DIRECTLY_TO_IS_INACTIVE_GLOBAL_CONSTRAINT", name),
sym.Mul(
@@ -452,9 +539,66 @@ func DefineIndicatorOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector,
column.Shift(edc.IsActive, 1), // all the above forces isActive to be 0 on the next position
),
)
}
// DefineIndicatorConverseOrder constrains the converse order of load operations.
// These constraints might not be needed in order for the module to be secure.
func DefineIndicatorConverseOrder(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// IsNoTx[i+1]=1 -> IsTxRLP[i]=1
// The converse has additional conditions and is treated above.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_CONVERSE_IS_TX_RLP_TO_IS_NO_TX_NEXT_BLOCK", name),
sym.Mul(
column.Shift(edc.IsNoTx, 1),
sym.Sub(
edc.IsTxRLP,
column.Shift(edc.IsNoTx, 1),
),
),
)
// IsTxRLP[i+1]=1 -> (IsTxRLP[i]=1 or IsAddrLo[i]=1)
// The converse has additional conditions and is treated above.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_CONVERSE_IS_TX_RLP/IS_ADDR_LO_TO_IS_TX_RLP", name),
sym.Mul(
column.Shift(edc.IsTxRLP, 1),
sym.Sub(
column.Shift(edc.IsTxRLP, 1),
edc.IsTxRLP,
edc.IsAddrLo,
),
),
)
// IsAddrHi[i+1]=1 -> (IsTxRLP[i]=1 or IsBlockHashLo[i]=1)
// The converse has additional conditions and is treated above.
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_CONVERSE_IS_ADDR_HI/IS_TX_RLP_TO_IS_BLOCKHASH_LO", name),
sym.Mul(
column.Shift(edc.IsAddrHi, 1),
sym.Sub(
column.Shift(edc.IsAddrHi, 1),
edc.IsTxRLP,
edc.IsBlockHashLo,
),
),
)
// isActive[i+1]=0 and isActive[i]=1 ->IsTxRLP[i]=1
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_CONVERSE_IS_ACTIVE_TO_IS_TX_RLP", name),
sym.Mul(
sym.Sub(1, edc.IsActive),
column.Shift(edc.IsActive, -1),
sym.Sub(
1,
column.Shift(edc.IsTxRLP, -1),
),
),
)
}
// DefineIndicatorExclusion enforces that indicators for different load operations cannot light up simultaneously.
func DefineIndicatorExclusion(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// IsNoTx + IsBlockHashHi + IsBlockHashLo + IsTimestamp + IsTxRLP + IsAddr = 1
comp.InsertGlobal(0,
@@ -472,9 +616,9 @@ func DefineIndicatorExclusion(comp *wizard.CompiledIOP, edc *ExecutionDataCollec
),
),
)
}
// DefineAlignmentPowers enforces the correct aligment exponent values for each value/row type.
func DefineAlignmentPowers(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// Value of the alignment exponent, isNoTx
comp.InsertGlobal(0,
@@ -547,8 +691,13 @@ func DefineAlignmentPowers(comp *wizard.CompiledIOP, edc *ExecutionDataCollector
),
),
)
// We skip a constraint for the value of the alignment exponent when IS_TX_RLP=1
// the unaligned limb does not matter in that case.
}
// DefineNumberOfBytesConstraints defines the number of bytes loaded for each operation type.
// The RLP bytes are checked separately, in the ProjectionQueries function.
func DefineNumberOfBytesConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// noOfBytes isNoTx
comp.InsertGlobal(0,
@@ -621,10 +770,12 @@ func DefineNumberOfBytesConstraints(comp *wizard.CompiledIOP, edc *ExecutionData
),
)
// noOfBytes limbs in the RLP transaction data
// this must be checked with a projection query
// We must also enforce noOfBytes limbs in the RLP transaction data,
// and this will be checked with a projection query in the ProjectionQueries function.
}
// ProjectionQueries computes projection queries to each arithmetization fetcher:
// fetch.TimestampFetcher, fetch.BlockTxnMetadata, fetch.TxnDataFetcher and fetch.RlpTxnFetcher.
func ProjectionQueries(comp *wizard.CompiledIOP,
edc *ExecutionDataCollector,
name string,
@@ -633,14 +784,16 @@ func ProjectionQueries(comp *wizard.CompiledIOP,
txnData fetch.TxnDataFetcher,
rlp fetch.RlpTxnFetcher) {
// Prepare the projection query to the BlockData fetcher
// compute the fetcher table, directly tied to the arithmetization.
metadataTable := []ifaces.Column{
metadata.BlockID,
metadata.TotalNoTxnBlock,
metadata.FirstAbsTxId,
metadata.LastAbsTxId,
}
lsMetadataTable := []ifaces.Column{
// compute the ExecutionDataCollector table.
edcMetadataTable := []ifaces.Column{
edc.BlockID,
edc.TotalNoTxBlock,
edc.FirstAbsTxIDBlock,
@@ -649,38 +802,46 @@ func ProjectionQueries(comp *wizard.CompiledIOP,
projection.InsertProjection(comp,
ifaces.QueryIDf("%s_BLOCK_METADATA_PROJECTION", name),
lsMetadataTable,
edcMetadataTable,
metadataTable,
edc.IsNoTx,
edc.IsNoTx, // We filter on rows where the blockdata is loaded.
metadata.FilterFetched,
)
// !!! need to add segment constancies, since I am only using isNoTx in BLOCK_METADATA_PROJECTION
// Because we filtered on edc.IsNoTx=1, we also ensure that FirstAbsTxIDBlock and LastAbsTxIDBlock
// remain constant in the DefineConstantConstraints function.
// we do not need to also check the constancy of TotalNoTxBlock, as it is only used when IsNoTx=1
// Prepare the projection query to the BlockData fetcher, but concerning timestamps
// compute the fetcher table, directly tied to the arithmetization.
timestampTable := []ifaces.Column{
timestamps.RelBlock,
timestamps.Data,
}
lsTimestamps := []ifaces.Column{
// compute the ExecutionDataCollector table.
edcTimestamps := []ifaces.Column{
edc.BlockID,
edc.UnalignedLimb,
}
projection.InsertProjection(comp,
ifaces.QueryIDf("%s_TIMESTAMP_PROJECTION", name),
lsTimestamps,
edcTimestamps,
timestampTable,
edc.IsTimestamp,
edc.IsTimestamp, // filter on IsTimestamp=1
timestamps.FilterFetched,
)
// Prepare a projection query to the TxnData fetcher, to check the Hi part of the sender address.
// compute the fetcher table, directly tied to the arithmetization.
txnDataTableHi := []ifaces.Column{
txnData.RelBlock,
txnData.AbsTxNum,
txnData.FromHi,
txnData.FromHi, // checks that the Hi part of the sender address is fetched correctly.
}
lsTxnSenderAddressTableHi := []ifaces.Column{
// compute the ExecutionDataCollector table.
edcTxnSenderAddressTableHi := []ifaces.Column{
edc.BlockID,
edc.AbsTxID,
edc.UnalignedLimb,
@@ -688,18 +849,21 @@ func ProjectionQueries(comp *wizard.CompiledIOP,
projection.InsertProjection(comp,
ifaces.QueryIDf("%s_SENDER_ADDRESS_HI_PROJECTION", name),
lsTxnSenderAddressTableHi,
edcTxnSenderAddressTableHi,
txnDataTableHi,
edc.IsAddrHi,
edc.IsAddrHi, // filter on IsAddrHi=1
txnData.FilterFetched,
)
// Prepare the projection query to the TxnData fetcher, to check the Lo part of the sender address.
// compute the fetcher table, directly tied to the arithmetization.
txnDataTableLo := []ifaces.Column{
txnData.RelBlock,
txnData.AbsTxNum,
txnData.FromLo,
}
lsTxnSenderAddressTableLo := []ifaces.Column{
// compute the ExecutionDataCollector table.
edcTxnSenderAddressTableLo := []ifaces.Column{
edc.BlockID,
edc.AbsTxID,
edc.UnalignedLimb,
@@ -707,12 +871,15 @@ func ProjectionQueries(comp *wizard.CompiledIOP,
projection.InsertProjection(comp,
ifaces.QueryIDf("%s_SENDER_ADDRESS_LO_PROJECTION", name),
lsTxnSenderAddressTableLo,
edcTxnSenderAddressTableLo,
txnDataTableLo,
edc.IsAddrLo,
edc.IsAddrLo, // filter on IsAddrLo=1
txnData.FilterFetched,
)
// Prepare the projection query to the RlpTxn fetcher, to check:
// AbsTxNum, AbsTxNumMax, Limb, NBytes and EndOfRlpSegment.
// first compute the fetcher table, directly tied to the arithmetization.
rlpDataTable := []ifaces.Column{
rlp.AbsTxNum,
rlp.AbsTxNumMax,
@@ -720,23 +887,27 @@ func ProjectionQueries(comp *wizard.CompiledIOP,
rlp.NBytes,
rlp.EndOfRlpSegment,
}
lsRlpDataTable := []ifaces.Column{
edc.AbsTxID,
edc.AbsTxIDMax,
edc.UnalignedLimb,
edc.NoBytes,
// compute the ExecutionDataCollector table.
edcRlpDataTable := []ifaces.Column{
edc.AbsTxID, // Check correctness of the AbsTxID.
edc.AbsTxIDMax, // The fact that it is constant is enforced in DefineConstantConstraints.
edc.UnalignedLimb, // Check correctness of the limbs.
edc.NoBytes, // Check correctness of the number of bytes.
edc.EndOfRlpSegment, // This constrains EndOfRlpSegment on edc.IsTx.RLP = 1, but we still need to constrain it elsewhere
// EndOfRlpSegment is also constrained in DefineSelectorConstraints, which requires that EndOfRlpSegment=0 when AbsTxID is constant.
// EndOfRlpSegment is also constrained in DefineZeroizationConstraints, with respect to IsActive.
}
projection.InsertProjection(comp,
ifaces.QueryIDf("%s_RLP_LIMB_DATA_PROJECTION", name),
lsRlpDataTable,
edcRlpDataTable,
rlpDataTable,
edc.IsTxRLP,
edc.IsTxRLP, // filter on IsTxRLP=1
rlp.FilterFetched,
)
}
// EnforceZeroOnInactiveFilter is a generic helper function that enforces that targetCol is 0 when filterExpr = 0.
func EnforceZeroOnInactiveFilter(comp *wizard.CompiledIOP, filterExpr *sym.Expression, targetCol ifaces.Column, name, subname string) {
comp.InsertGlobal(
0,
@@ -751,7 +922,9 @@ func EnforceZeroOnInactiveFilter(comp *wizard.CompiledIOP, filterExpr *sym.Expre
)
}
// DefineSelectorConstraints constrains the selectors, but also the EndOfRlpSegment column.
func DefineSelectorConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// We first compute the prover actions
edc.SelectorLastTxBlock, edc.ComputeSelectorLastTxBlock = dedicated.IsZero(
comp,
sym.Sub(
@@ -785,6 +958,7 @@ func DefineSelectorConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataColle
)
// edc.EndOfRlpSegment is partially constrained in the projection queries, on areas where edc.IsTxRLP = 1
// it is also constrained in DefineZeroizationConstraints.
// here we require that when edc.IsTxRLP = 0, we have EndOfRlpSegment = 0
comp.InsertGlobal(
0,
@@ -798,6 +972,9 @@ func DefineSelectorConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataColle
),
)
// Recall that edc.EndOfRlpSegment is partially constrained in the projection queries, on areas where edc.IsTxRLP = 1
// it is also constrained in DefineZeroizationConstraints.
// Here we ask that whenever the AbsTxID is constant, EndOfRlpSegment cannot light up (we are inside the same transaction segment).
comp.InsertGlobal(
0,
ifaces.QueryIDf("%s_END_OF_RLP_SEGMENT_IS_0_WHENEVER_TX_ID_IS_CONSTANT", name),
@@ -809,17 +986,21 @@ func DefineSelectorConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataColle
}
// DefineCounterConstraints enforces counter constraints for BlockId, AbsTxId and Ct.
func DefineCounterConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
DefineBlockIdCounterConstraints(comp, edc, name)
DefineAbsTxIdCounterConstraints(comp, edc, name)
DefineCtCounterConstraints(comp, edc, name)
}
// DefineZeroizationConstraints enforces that multiple columns are zero when the IsActive filter is zero.
func DefineZeroizationConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// enforce zero fields when isActive is not set to 1
var emptyWhenInactive = [...]ifaces.Column{
edc.BlockID,
edc.AbsTxID,
edc.AbsTxIDMax,
edc.Ct,
edc.TotalNoTxBlock,
edc.IsNoTx,
edc.IsBlockHashHi,
@@ -835,6 +1016,7 @@ func DefineZeroizationConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCo
edc.NoBytes,
edc.UnalignedLimb,
edc.AlignedPow,
// exclude edc.HashNum, as it is a fully constant column
}
for _, col := range emptyWhenInactive {
@@ -842,6 +1024,8 @@ func DefineZeroizationConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCo
EnforceZeroOnInactiveFilter(comp, ifaces.ColumnAsVariable(edc.IsActive), col, name, "IS_ACTIVE")
}
}
// DefineIndicatorsMustBeBinary enforces that various indicator columns are binary.
func DefineIndicatorsMustBeBinary(comp *wizard.CompiledIOP, edc *ExecutionDataCollector) {
util.MustBeBinary(comp, edc.IsActive)
util.MustBeBinary(comp, edc.IsNoTx)
@@ -853,10 +1037,11 @@ func DefineIndicatorsMustBeBinary(comp *wizard.CompiledIOP, edc *ExecutionDataCo
util.MustBeBinary(comp, edc.IsTxRLP)
}
// DefineConstantConstraints requires that FirstAbsTxIDBlock/LastAbsTxIDBlock remain constant inside the block.
// And that AbsTxIDMax is constant on the active part of the module.
func DefineConstantConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// in order for SelectorLastTxBlock to be constrained properly, the values of FirstAbsTxIDBlock and LastAbsTxIDBlock
// must be constant for the entire segment defined by the block (otherwise SelectorLastTxBlock is meaningless).
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_%v_CONSTANT_FIRST_ABS_TX_ID_BLOCK_INSIDE_THE_BLOCK_SEGMENT", name, edc.FirstAbsTxIDBlock.GetColID()),
sym.Mul(
@@ -881,7 +1066,6 @@ func DefineConstantConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataColle
// we do not contrain that FirstAbsTxIDBlock/LastAbsTxIDBlock increases only by 1, this is
// constrained in the corresponding fetcher and enforced by the projection query (in conjunction
// with the constancy property)
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_%v_CONSTANT_ABS_TX_ID_MAX", name, edc.AbsTxIDMax.GetColID()),
sym.Mul(
@@ -894,26 +1078,25 @@ func DefineConstantConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataColle
)
}
func DefineLimbSummary(comp *wizard.CompiledIOP,
edc *ExecutionDataCollector,
name string,
timestamps fetch.TimestampFetcher,
metadata fetch.BlockTxnMetadata,
txnData fetch.TxnDataFetcher,
rlp fetch.RlpTxnFetcher) {
DefineSelectorConstraints(comp, edc, name)
DefineIndicatorExclusion(comp, edc, name)
DefineAlignmentPowers(comp, edc, name)
DefineIndicatorOrder(comp, edc, name)
DefineIndicatorsMustBeBinary(comp, edc)
DefineNumberOfBytesConstraints(comp, edc, name)
DefineZeroizationConstraints(comp, edc, name)
DefineConstantConstraints(comp, edc, name)
// isActive constraints
// require that isActive is binary
// DefineLimbAlignmentConstraints constrains that Limb=UnalignedLimb*AlignedPow.
func DefineLimbAlignmentConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// unaligned limb --- aligned limb constraints
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_UNALIGNED_LIMB_AND_ALIGNED_LIMB_CONSTRAINT", name),
sym.Sub(
edc.Limb,
sym.Mul(
edc.UnalignedLimb,
edc.AlignedPow,
),
),
)
}
// DefineIsActiveConstraints requires that IsActive has the proper shape, never transitioning from 0 to 1.
// the fact that IsActive is binary is enforced in DefineIndicatorsMustBeBinary.
func DefineIsActiveConstraints(comp *wizard.CompiledIOP, edc *ExecutionDataCollector, name string) {
// we require that isActive is binary in DefineIndicatorsMustBeBinary
// require that the isActive filter only contains 1s followed by 0s
comp.InsertGlobal(
0,
@@ -926,30 +1109,64 @@ func DefineLimbSummary(comp *wizard.CompiledIOP,
),
),
)
}
// unaligned limb --- aligned limb constraints
comp.InsertGlobal(0,
ifaces.QueryIDf("%s_UNALIGNED_LIMB_AND_ALIGNED_LIMB_CONSTRAINT", name),
sym.Sub(
edc.Limb,
sym.Mul(
edc.UnalignedLimb,
edc.AlignedPow,
),
),
)
// DefineExecutionDataCollector is the main function that defines the constraints of the ExecutionDataCollector.
func DefineExecutionDataCollector(comp *wizard.CompiledIOP,
edc *ExecutionDataCollector,
name string,
timestamps fetch.TimestampFetcher,
metadata fetch.BlockTxnMetadata,
txnData fetch.TxnDataFetcher,
rlp fetch.RlpTxnFetcher) {
// selector constraints cover the prover actions which use dedicated.IsZero, but also
// constrain the EndOfRlpSegment column.
// these prover actions must be defined first, or dependent constraints will fail.
DefineSelectorConstraints(comp, edc, name)
// Indicator constraints, which concern the indicators: isActive,
// but also IsNoTx, IsTimestamp, IsBlockHashHi, IsBlockHashLo,
// IsAddrHi, IsAddrLo and IsTxRLP.
DefineIndicatorExclusion(comp, edc, name)
DefineIndicatorOrder(comp, edc, name)
DefineIndicatorConverseOrder(comp, edc, name)
DefineIndicatorsMustBeBinary(comp, edc)
DefineIsActiveConstraints(comp, edc, name)
// constraints that concern the limbs, unaligned limbs, the alignment powers
// and the number of bytes
DefineAlignmentPowers(comp, edc, name)
DefineLimbAlignmentConstraints(comp, edc, name)
DefineNumberOfBytesConstraints(comp, edc, name)
// zeroization constraints when the isActive filter is set to 0
DefineZeroizationConstraints(comp, edc, name)
// some columns must remain constant on the corresponding block/module segment, concerns:
// edc.FirstAbsTxIDBlock, edc.LastAbsTxIDBlock, edc.AbsTxIDMax
DefineConstantConstraints(comp, edc, name)
// simple constraint asking that HashNum is constant
DefineHashNumConstraints(comp, edc, name)
// Constraints for the counters edc.Ct, edc.AbsTxID, edc.BlockID
DefineCounterConstraints(comp, edc, name)
// Enforce data consistency with the arithmetization fetchers using projection queries
ProjectionQueries(comp, edc, name, timestamps, metadata, txnData, rlp)
}
func AssignLimbSummary(run *wizard.ProverRuntime,
// AssignExecutionDataCollector assigns the data in the ExecutionDataCollector, using
// the arithmetizationfetchers fetch.TimestampFetcher, fetch.BlockTxnMetadata,
// fetch.TxnDataFetcher, and fetch.RlpTxnFetcher.
func AssignExecutionDataCollector(run *wizard.ProverRuntime,
edc ExecutionDataCollector,
timestamps fetch.TimestampFetcher,
metadata fetch.BlockTxnMetadata,
txnData fetch.TxnDataFetcher,
rlp fetch.RlpTxnFetcher) {
size := edc.Limb.Size()
// generate a helper struct that instantiates field element vectors for all our columns
vect := NewExecutionDataCollectorVectors(size)
fetchedAbsTxIdMax := rlp.AbsTxNumMax.GetColAssignmentAt(run, 0)
@@ -969,6 +1186,10 @@ func AssignLimbSummary(run *wizard.ProverRuntime,
lastAbsTxIDBlock := metadata.LastAbsTxId.GetColAssignmentAt(run, blockCt)
fetchNoTx := metadata.TotalNoTxnBlock.GetColAssignmentAt(run, blockCt)
// genericLoadFunction is a function that computes most of the data
// that is computed in a similar way in each type of row.
// opType is the type of row, and the field element value is the unaligned
// limb value
genericLoadFunction := func(opType int, value field.Element) {
vect.IsActive[totalCt].SetOne()
vect.SetLimbAndUnalignedLimb(totalCt, value, opType)
@@ -1050,13 +1271,17 @@ func AssignLimbSummary(run *wizard.ProverRuntime,
}
} // end of the block for loop
// assign the columns to the ExecutionDataCollector
AssignExecutionDataColumns(run, edc, vect)
// assign the selectors
edc.ComputeSelectorBlockDiff.Run(run)
edc.ComputeSelectorLastTxBlock.Run(run)
edc.ComputeSelectorEndOfAllTx.Run(run)
edc.ComputeSelectorAbsTxIDDiff.Run(run)
}
// AssignExecutionDataColumns uses the helper struct ExecutionDataCollectorVectors to assign the columns of
// the ExecutionDataCollector
func AssignExecutionDataColumns(run *wizard.ProverRuntime, edc ExecutionDataCollector, vect *ExecutionDataCollectorVectors) {
run.AssignColumn(edc.BlockID.GetColID(), smartvectors.NewRegular(vect.BlockID))
run.AssignColumn(edc.AbsTxID.GetColID(), smartvectors.NewRegular(vect.AbsTxID))
@@ -1073,6 +1298,8 @@ func AssignExecutionDataColumns(run *wizard.ProverRuntime, edc ExecutionDataColl
run.AssignColumn(edc.IsTxRLP.GetColID(), smartvectors.NewRegular(vect.IsTxRLP))
run.AssignColumn(edc.IsAddrHi.GetColID(), smartvectors.NewRegular(vect.IsAddrHi))
run.AssignColumn(edc.IsAddrLo.GetColID(), smartvectors.NewRegular(vect.IsAddrLo))
run.AssignColumn(edc.Ct.GetColID(), smartvectors.NewRegular(vect.Ct))
run.AssignColumn(edc.HashNum.GetColID(), smartvectors.NewConstant(field.NewElement(hashNum), len(vect.Ct)))
run.AssignColumn(edc.AbsTxIDMax.GetColID(), smartvectors.NewRegular(vect.AbsTxIDMax))
run.AssignColumn(edc.EndOfRlpSegment.GetColID(), smartvectors.NewRegular(vect.EndOfRlpSegment))
run.AssignColumn(edc.FirstAbsTxIDBlock.GetColID(), smartvectors.NewRegular(vect.FirstAbsTxIDBlock))

View File

@@ -0,0 +1,272 @@
package publicInput
import (
"fmt"
"strings"
"testing"
"github.com/consensys/zkevm-monorepo/prover/crypto/mimc"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/importpad"
pack "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/packing"
fetch "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/publicInput/fetchers_arithmetization"
util "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/publicInput/utilities"
"github.com/stretchr/testify/assert"
)
// fixedTestDataRlpLimbs stores the RLP limbs corresponding to each transaction.
// This data is consistent to the CSV testdata. The input is the absolute transaction ID.
var fixedTestDataRlpLimbs = [][]string{
{"f9", "ccc0"},
{"123456", "f0"},
{"47d5f9a0"},
{"477aaaa0", "aaaa", "cc"},
{"bb"},
{"aaaaaaaaaa"},
{"ff"},
{"aaa0"},
{"aaa0"},
{"eee0", "1a2a3a4a5a", "b1b2b3", "10"},
}
// ComputeMiMCHashFixedTestData computes the execution data hash that corresponds to the test
// data in our CSV files. It concatenates all the bytes in the same order as the ExecutionDataCollector
// into a large string that contains all the bytes in hexadecimal.
// It then pads the output with 0s until it is a multiple of 62 (this corresponds to 31 bytes, which is the
// maximum number of bytes that the packing module fits into a single field element).
// We then split the large string into subslices of length 62, and use them to instantiate new field elements.
// Finally, the field elements are hashed using MiMC.
func ComputeMiMCHashFixedTestData() field.Element {
// 6 byte timestamp values for each block
var timestamps = [...]string{"00000000000a", "0000000000ab", "0000000000bc", "0000000000cd"}
// the maximal number of transaction for each block is stored in 2 bytes
var noTxString = [...]string{"0003", "0004", "0002", "0001"}
// here are maximal number of transaction for each block, stored as int
var noTx = [...]int{3, 4, 2, 1}
// 4 bytes for the Hi part of the sender address of each transaction
var senderAddrHi = [...]string{"aaaaaaaa", "bbbbbbbb", "cccccccc", "dddddddd", "eeeeeeee", "ffffffff", "aaaaaaaa", "bbbbbbbb", "cccccccc", "dddddddd"}
// 16 bytes for the Lo part of the sender address of each transaction
var senderAddrLo = [...]string{"ffffffffffffffffffffffffffffffff", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "cccccccccccccccccccccccccccccccc", "dddddddddddddddddddddddddddddddd", "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", "ffffffffffffffffffffffffffffffff", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "cccccccccccccccccccccccccccccccc"}
// prepare a strings Builder to concatenate the test data in the proper manner
var fullString strings.Builder
absTxId := 1 // a counter that will iterate over the absolute transaction ID.
// iterate over the blocks
for blockId := 0; blockId < 4; blockId++ {
// concatenate the bytes for the number of transactions, the block timestamp and the blockhash
fullString.WriteString(noTxString[blockId])
fullString.WriteString(timestamps[blockId]) // 6 bytes, timestamps
fullString.WriteString("00000000000000000000000000000000") // 16 bytes, BlockHash HI
fullString.WriteString("00000000000000000000000000000000") // 16 bytes, BlockHash LO
// absTxIdLimit is the absolute transaction ID of the first transaction in the next block
absTxIdLimit := absTxId + noTx[blockId]
// iterate over the id of the transactions in the current blocks
for absTxId < absTxIdLimit {
// concatenate the sender address of the transaction with absTxId
fullString.WriteString(senderAddrHi[absTxId-1])
fullString.WriteString(senderAddrLo[absTxId-1])
// get the RLP limbs for the transaction and concatenate them
rlpLimbs := fixedTestDataRlpLimbs[absTxId-1]
for _, rlpLimb := range rlpLimbs {
fullString.WriteString(rlpLimb)
}
// done with the data for this transaction (sender address + RLP limbs), move on to the next
absTxId++
}
absTxId = absTxIdLimit
}
// padding with 0s until the length of the large concatenated string is a multiple of 62
// (corresponding to 31 bytes)
for fullString.Len()%62 > 0 {
fullString.WriteString("0") // pad with 0s
}
// pack the bytes in the large string into a vector vectFieldElem of field elements
vectFieldElem := make([]field.Element, fullString.Len()/62)
finalString := fullString.String()
for i := 0; i < fullString.Len()/62; i++ {
// compute stringSlice, a 62 characters/31-bytes subslice of the large string
stringSlice := fmt.Sprintf("0x%s", finalString[i*62:(i+1)*62])
// instantiate a field element from the subslice and add it to the vector
elem := field.NewFromString(stringSlice)
vectFieldElem[i].Set(&elem)
}
// compute and return the MiMC hash of the packed field elements
finalHash := mimc.HashVec(vectFieldElem)
return finalHash
}
// TestExecutionDataCollectAndHash instantiates mock arithmetization modules
// BlockData, TxnData and RlpTxn from CSV test files. It then defines and assigns
// an ExecutionDataCollector, then pads, packs and MiMC-hashes its limbs.
func TestExecutionDataCollectorAndHash(t *testing.T) {
ctBlockData := util.InitializeCsv("testdata/blockdata_mock.csv", t)
ctTxnData := util.InitializeCsv("testdata/txndata_mock.csv", t)
ctRlpTxn := util.InitializeCsv("testdata/rlp_txn_mock.csv", t)
var (
execDataCollector ExecutionDataCollector
blockTxnMeta fetch.BlockTxnMetadata
timestampFetcher fetch.TimestampFetcher
txnDataFetcher fetch.TxnDataFetcher
rlpTxnFetcher fetch.RlpTxnFetcher
txnDataCols *fetch.TxnData
blockDataCols *fetch.BlockDataCols
rlpTxn *fetch.RlpTxn
mimcHasher *MIMCHasher
importInp importpad.ImportAndPadInputs
importMod wizard.ProverAction
packingInp pack.PackingInput
packingMod *pack.Packing
)
define := func(b *wizard.Builder) {
blockDataCols = &fetch.BlockDataCols{
RelBlock: ctBlockData.GetCommit(b, "REL_BLOCK"),
Inst: ctBlockData.GetCommit(b, "INST"),
Ct: ctBlockData.GetCommit(b, "CT"),
DataHi: ctBlockData.GetCommit(b, "DATA_HI"),
DataLo: ctBlockData.GetCommit(b, "DATA_LO"),
}
txnDataCols = &fetch.TxnData{
AbsTxNum: ctTxnData.GetCommit(b, "TD.ABS_TX_NUM"),
AbsTxNumMax: ctTxnData.GetCommit(b, "TD.ABS_TX_NUM_MAX"),
RelTxNum: ctTxnData.GetCommit(b, "TD.REL_TX_NUM"),
RelTxNumMax: ctTxnData.GetCommit(b, "TD.REL_TX_NUM_MAX"),
Ct: ctTxnData.GetCommit(b, "TD.CT"),
FromHi: ctTxnData.GetCommit(b, "TD.FROM_HI"),
FromLo: ctTxnData.GetCommit(b, "TD.FROM_LO"),
IsLastTxOfBlock: ctTxnData.GetCommit(b, "TD.IS_LAST_TX_OF_BLOCK"),
RelBlock: ctTxnData.GetCommit(b, "TD.REL_BLOCK"),
}
rlpTxn = &fetch.RlpTxn{
AbsTxNum: ctRlpTxn.GetCommit(b, "RT.ABS_TX_NUM"),
AbsTxNumMax: ctRlpTxn.GetCommit(b, "RT.ABS_TX_NUM_MAX"),
ToHashByProver: ctRlpTxn.GetCommit(b, "RL.TO_HASH_BY_PROVER"),
Limb: ctRlpTxn.GetCommit(b, "RL.LIMB"),
NBytes: ctRlpTxn.GetCommit(b, "RL.NBYTES"),
}
blockTxnMeta = fetch.NewBlockTxnMetadata(b.CompiledIOP, "BLOCK_TX_METADATA", txnDataCols)
fetch.DefineBlockTxnMetaData(b.CompiledIOP, &blockTxnMeta, "BLOCK_TX_METADATA", txnDataCols)
// create a new timestamp fetcher
timestampFetcher = fetch.NewTimestampFetcher(b.CompiledIOP, "TIMESTAMP_FETCHER_FROM_ARITH", blockDataCols)
// constrain the timestamp fetcher
fetch.DefineTimestampFetcher(b.CompiledIOP, &timestampFetcher, "TIMESTAMP_FETCHER_FROM_ARITH", blockDataCols)
txnDataFetcher = fetch.NewTxnDataFetcher(b.CompiledIOP, "TXN_DATA_FETCHER_FROM_ARITH", txnDataCols)
fetch.DefineTxnDataFetcher(b.CompiledIOP, &txnDataFetcher, "TXN_DATA_FETCHER_FROM_ARITH", txnDataCols)
rlpTxnFetcher = fetch.NewRlpTxnFetcher(b.CompiledIOP, "RLP_TXN_FETCHER_FROM_ARITH", rlpTxn)
// constrain the fetcher
fetch.DefineRlpTxnFetcher(b.CompiledIOP, &rlpTxnFetcher, "RLP_TXN_FETCHER_FROM_ARITH", rlpTxn)
limbColSize := GetSummarySize(txnDataCols, rlpTxn)
// we need to artificially blow up the column size by 2, or padding will fail
limbColSize = 2 * limbColSize
// create a new ExecutionDataCollector
execDataCollector = NewExecutionDataCollector(b.CompiledIOP, "EXECUTION_DATA_COLLECTOR", limbColSize)
// define the ExecutionDataCollector
DefineExecutionDataCollector(b.CompiledIOP, &execDataCollector, "EXECUTION_DATA_COLLECTOR", timestampFetcher, blockTxnMeta, txnDataFetcher, rlpTxnFetcher)
// create a padding module for the ExecutionDataCollector
importInp = importpad.ImportAndPadInputs{
Name: "TESTING",
Src: generic.GenericByteModule{Data: generic.GenDataModule{
HashNum: execDataCollector.HashNum,
Index: execDataCollector.Ct,
TO_HASH: execDataCollector.IsActive,
NBytes: execDataCollector.NoBytes,
Limb: execDataCollector.Limb,
}},
PaddingStrategy: generic.MiMCUsecase,
}
// define the padding module. The import and pad module is first assigned
// a new variable because we need to access its field although the
// struct itself is private.
imported := importpad.ImportAndPad(b.CompiledIOP, importInp, limbColSize)
importMod = imported
// create an input for the packing module
packingInp = pack.PackingInput{
MaxNumBlocks: execDataCollector.BlockID.Size(),
PackingParam: generic.MiMCUsecase,
Imported: pack.Importation{
Limb: imported.Limbs,
NByte: imported.NBytes,
IsNewHash: imported.IsNewHash,
IsActive: imported.IsActive,
},
}
// create a new packing module
packingMod = pack.NewPack(b.CompiledIOP, packingInp)
// create a MiMC hasher
mimcHasher = NewMIMCHasher(b.CompiledIOP, packingMod.Repacked.Lanes, packingMod.Repacked.IsLaneActive, "MIMC_HASHER")
// define the hasher
mimcHasher.DefineHasher(b.CompiledIOP, "EXECUTION_DATA_COLLECTOR_MIMC_HASHER")
}
prove := func(run *wizard.ProverRuntime) {
// assign the CSV data for the mock BlockData, TxnData and RlpTxn arithmetization modules
ctBlockData.Assign(
run,
"REL_BLOCK",
"INST",
"CT",
"DATA_HI",
"DATA_LO",
)
ctTxnData.Assign(
run,
"TD.ABS_TX_NUM",
"TD.ABS_TX_NUM_MAX",
"TD.REL_TX_NUM",
"TD.REL_TX_NUM_MAX",
"TD.CT",
"TD.FROM_HI",
"TD.FROM_LO",
"TD.IS_LAST_TX_OF_BLOCK",
"TD.REL_BLOCK",
)
ctRlpTxn.Assign(
run,
"RT.ABS_TX_NUM",
"RT.ABS_TX_NUM_MAX",
"RL.TO_HASH_BY_PROVER",
"RL.LIMB",
"RL.NBYTES",
)
// assign the mock arithmetization modules
fetch.AssignTimestampFetcher(run, timestampFetcher, blockDataCols)
fetch.AssignBlockTxnMetadata(run, blockTxnMeta, txnDataCols)
fetch.AssignTxnDataFetcher(run, txnDataFetcher, txnDataCols)
fetch.AssignRlpTxnFetcher(run, &rlpTxnFetcher, rlpTxn)
// assign the ExecutionDataCollector
AssignExecutionDataCollector(run, execDataCollector, timestampFetcher, blockTxnMeta, txnDataFetcher, rlpTxnFetcher)
// assign the padding module
importMod.Run(run)
// assign the packing module
packingMod.Run(run)
// assign the hasher
mimcHasher.AssignHasher(run)
// compute the MiMC hash of the fixed TestData
fixedHash := ComputeMiMCHashFixedTestData()
// assert that we are computing the hash correctly
assert.Equal(t, fixedHash, mimcHasher.hashFinal.GetColAssignmentAt(run, 0), "Final Hash Value is Incorrect")
}
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prove)
err := wizard.Verify(comp, proof)
if err != nil {
t.Fatalf("verification failed: %v", err)
}
}

View File

@@ -8,7 +8,9 @@ import (
"testing"
)
func TestExecutionDataCollector(t *testing.T) {
// TestAssignmentExecutionDataCollector tests whether the execution data collector
// defines its constraints and assigns its columns without any errors.
func TestAssignmentExecutionDataCollector(t *testing.T) {
ctBlockData := utilities.InitializeCsv("testdata/blockdata_mock.csv", t)
ctTxnData := utilities.InitializeCsv("testdata/txndata_mock.csv", t)
ctRlpTxn := utilities.InitializeCsv("testdata/rlp_txn_mock.csv", t)
@@ -64,9 +66,9 @@ func TestExecutionDataCollector(t *testing.T) {
// constrain the fetcher
fetch.DefineRlpTxnFetcher(b.CompiledIOP, &rlpTxnFetcher, "RLP_TXN_FETCHER_FROM_ARITH", rt)
limbColSize := GetSummarySize(&btm, bdc, txd, rt)
edc = NewLimbSummary(b.CompiledIOP, "LIMB_SUMMARY", limbColSize)
DefineLimbSummary(b.CompiledIOP, &edc, "LIMB_SUMMARY", timestampFetcher, btm, txnDataFetcher, rlpTxnFetcher)
limbColSize := GetSummarySize(txd, rt)
edc = NewExecutionDataCollector(b.CompiledIOP, "EXECUTION_DATA_COLLECTOR", limbColSize)
DefineExecutionDataCollector(b.CompiledIOP, &edc, "EXECUTION_DATA_COLLECTOR", timestampFetcher, btm, txnDataFetcher, rlpTxnFetcher)
}
prove := func(run *wizard.ProverRuntime) {
@@ -102,7 +104,7 @@ func TestExecutionDataCollector(t *testing.T) {
fetch.AssignBlockTxnMetadata(run, btm, txd)
fetch.AssignTxnDataFetcher(run, txnDataFetcher, txd)
fetch.AssignRlpTxnFetcher(run, &rlpTxnFetcher, rt)
AssignLimbSummary(run, edc, timestampFetcher, btm, txnDataFetcher, rlpTxnFetcher)
AssignExecutionDataCollector(run, edc, timestampFetcher, btm, txnDataFetcher, rlpTxnFetcher)
}
comp := wizard.Compile(define, dummy.Compile)

View File

@@ -13,6 +13,8 @@ type ExecutionDataCollectorVectors struct {
IsActive []field.Element
IsNoTx, IsBlockHashHi, IsBlockHashLo, IsTimestamp, IsTxRLP, IsAddrHi, IsAddrLo []field.Element
Ct []field.Element
FirstAbsTxIDBlock, LastAbsTxIDBlock []field.Element
EndOfRlpSegment []field.Element
@@ -36,6 +38,7 @@ func NewExecutionDataCollectorVectors(size int) *ExecutionDataCollectorVectors {
IsTxRLP: make([]field.Element, size),
IsAddrHi: make([]field.Element, size),
IsAddrLo: make([]field.Element, size),
Ct: make([]field.Element, size),
FirstAbsTxIDBlock: make([]field.Element, size),
LastAbsTxIDBlock: make([]field.Element, size),
EndOfRlpSegment: make([]field.Element, size),
@@ -69,6 +72,7 @@ func (vect *ExecutionDataCollectorVectors) SetLimbAndUnalignedLimb(totalCt int,
}
func (vect *ExecutionDataCollectorVectors) SetCounters(totalCt, blockCt, absTxCt, absTxIdMax int) {
vect.Ct[totalCt].SetInt64(int64(totalCt))
vect.BlockID[totalCt].SetInt64(int64(blockCt + 1))
vect.AbsTxID[totalCt].SetInt64(int64(absTxCt))
vect.AbsTxIDMax[totalCt].SetInt64(int64(absTxIdMax))

View File

@@ -61,6 +61,9 @@ func DefineHasher(comp *wizard.CompiledIOP, hasher LogHasher, name string, fetch
),
)
// inter, the old state column, is initially zero
comp.InsertLocal(0, ifaces.QueryIDf("%s_%s", name, "INTER_LOCAL"), ifaces.ColumnAsVariable(hasher.inter))
// Counter constraints
// First, the counter starts from 0
comp.InsertLocal(0, ifaces.QueryIDf("%s_%s", name, "COUNTER_LOCAL"), ifaces.ColumnAsVariable(hasher.counter))

View File

@@ -0,0 +1,125 @@
package publicInput
import (
"github.com/consensys/zkevm-monorepo/prover/crypto/mimc"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
publicInput "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/publicInput/utilities"
)
type MIMCHasher struct {
// a typical isActive binary column, provided as an input to the module
isActive ifaces.Column
// the data to be hashed, this column is provided as an input to the module
data ifaces.Column
// this column stores the MiMC hashes
hash ifaces.Column
// a constant column that stores the last relevant value of the hash
hashFinal ifaces.Column
// inter is an intermediary column used to enforce the MiMC constraints
inter ifaces.Column
}
func NewMIMCHasher(comp *wizard.CompiledIOP, data, isActive ifaces.Column, name string) *MIMCHasher {
size := data.Size()
res := &MIMCHasher{
data: data,
isActive: isActive,
hash: publicInput.CreateCol(name, "HASH", size, comp),
hashFinal: publicInput.CreateCol(name, "HASH_FINAL", size, comp),
inter: publicInput.CreateCol(name, "INTER", size, comp),
}
return res
}
// DefineHasher defines the constraints of the MIMCHasher.
// Its isActive and data columns are assumed to be already constrained in another module, no need to constrain them again.
func (hasher *MIMCHasher) DefineHasher(comp *wizard.CompiledIOP, name string) {
// MiMC constraints
comp.InsertMiMC(0, ifaces.QueryIDf("%s_%s", name, "MIMC_CONSTRAINT"), hasher.data, hasher.inter, hasher.hash)
// intermediary state integrity
comp.InsertGlobal(0, ifaces.QueryIDf("%s_%s", name, "CONSISTENCY_INTER_AND_HASH_LAST"), // LAST is either hashSecond
sym.Sub(hasher.hash,
column.Shift(hasher.inter, 1),
),
)
// inter, the old state column, is initially zero
comp.InsertLocal(0, ifaces.QueryIDf("%s_%s", name, "INTER_LOCAL"), ifaces.ColumnAsVariable(hasher.inter))
// below, constrain the hashFinal column
// two cases: Case 1: isActive is not completely full, then ctMax is equal to the counter at the last cell where isActive is 1
comp.InsertGlobal(0, ifaces.QueryIDf("%s_%s", name, "HASH_FINAL_GLOBAL_CONSTRAINT"),
sym.Mul(
hasher.isActive,
sym.Sub(1,
column.Shift(hasher.isActive, 1),
),
sym.Sub(
hasher.hash,
hasher.hashFinal,
),
),
)
// Case 2: isActive is completely full, in which case we ask that isActive[size]*(counter[size]-ctMax[size]) = 0
// i.e. at the last row, counter is equal to ctMax
comp.InsertLocal(0, ifaces.QueryIDf("%s_%s", name, "HASH_FINAL_LOCAL_CONSTRAINT"),
sym.Mul(
column.Shift(hasher.isActive, -1),
sym.Sub(
column.Shift(hasher.hash, -1),
column.Shift(hasher.hashFinal, -1),
),
),
)
}
// AssignHasher assigns the data in the MIMCHasher. The data and isActive columns are fetched from another module.
func (hasher *MIMCHasher) AssignHasher(run *wizard.ProverRuntime) {
size := hasher.data.Size()
hash := make([]field.Element, size)
inter := make([]field.Element, size)
counter := make([]field.Element, size)
var (
maxIndex field.Element
finalHash field.Element
)
state := field.Zero() // the initial state is zero
for i := 0; i < len(hash); i++ {
// first, hash the HI part of the fetched log message
mimcBlock := hasher.data.GetColAssignmentAt(run, i)
// debugString.WriteString(mimcBlock.)
state = mimc.BlockCompression(state, mimcBlock)
hash[i].Set(&state)
// set the counter
counter[i].SetInt64(int64(i))
// the data in hashSecond is used to initialize the next initial state, stored in the inter column
if i+1 < len(hash) {
inter[i+1] = hash[i]
}
isActive := hasher.isActive.GetColAssignmentAt(run, i)
if isActive.IsOne() {
finalHash.Set(&hash[i])
maxIndex.SetInt64(int64(i))
// keep track of the maximal number of active rows
}
}
// assign the hasher columns
run.AssignColumn(hasher.hash.GetColID(), smartvectors.NewRegular(hash))
run.AssignColumn(hasher.hashFinal.GetColID(), smartvectors.NewConstant(finalHash, size))
run.AssignColumn(hasher.inter.GetColID(), smartvectors.NewRegular(inter))
}

View File

@@ -1,4 +1,4 @@
RT.ABS_TX_NUM,RT.ABS_TX_NUM_MAX,RL.TO_HASH_BY_PROVER,RL.LIMB,RL.NBYTES
RT.ABS_TX_NUM,RT.ABS_TX_NUM_MAX,RL.TO_HASH_BY_PROVER,RL.NBYTES,RL.LIMB
0,0,0,0,0
0,0,0,0,0
0,0,0,0,0
@@ -24,42 +24,42 @@ RT.ABS_TX_NUM,RT.ABS_TX_NUM_MAX,RL.TO_HASH_BY_PROVER,RL.LIMB,RL.NBYTES
0,0,0,0,0
0,0,0,0,0
1,10,0,0,0
1,10,1,0xf9000000000000000000000000000000,1
1,10,1,1,0xf9000000000000000000000000000000
1,10,0,0,0
1,10,1,0xccc00000000000000000000000000000,2
2,10,1,0x12345600000000000000000000000000,3
1,10,1,2,0xccc00000000000000000000000000000
2,10,1,3,0x12345600000000000000000000000000
2,10,0,0,0
2,10,1,0xf0000000000000000000000000000000,1
2,10,1,1,0xf0000000000000000000000000000000
2,10,0,0,0
2,10,0,0,0
3,10,0,0,0
3,10,1,0x47d5f9a000000000000000000000000,4
3,10,1,4,0x47d5f9a0000000000000000000000000
3,10,0,0,0
4,10,0,0,0
4,10,0,0,0
4,10,1,0x477aaaa000000000000000000000000,4
4,10,1,0xaaaa000000000000000000000000000,2
4,10,1,0xcc00000000000000000000000000000,1
4,10,1,4,0x477aaaa0000000000000000000000000
4,10,1,2,0xaaaa0000000000000000000000000000
4,10,1,1,0xcc000000000000000000000000000000
4,10,0,0,0
5,10,0,0,0
5,10,0,0,0
5,10,1,0xbb00000000000000000000000000000,1
5,10,1,1,0xbb000000000000000000000000000000
6,10,0,0,0
6,10,1,0xaaaaaaaaaa000000000000000000000,5
6,10,1,5,0xaaaaaaaaaa0000000000000000000000
6,10,0,0,0
7,10,0,0,0
7,10,1,0xff00000000000000000000000000000,1
7,10,1,1,0xff000000000000000000000000000000
8,10,0,0,0
8,10,0,0,0
8,10,1,0xaaa0000000000000000000000000000,2
8,10,1,2,0xaaa00000000000000000000000000000
8,10,0,0,0
9,10,0,0,0
9,10,1,0xaaa0000000000000000000000000000,2
9,10,1,2,0xaaa00000000000000000000000000000
10,10,0,0,0
10,10,0,0,0
10,10,1,0xeee0000000000000000000000000000,2
10,10,1,0x1a2a3a4a5a000000000000000000000,5
10,10,1,2,0xeee00000000000000000000000000000
10,10,1,5,0x1a2a3a4a5a0000000000000000000000
10,10,0,0,0
10,10,1,0xb1b2b30000000000000000000000000,3
10,10,1,0x1000000000000000000000000000000,1
10,10,1,3,0xb1b2b300000000000000000000000000
10,10,1,1,0x10000000000000000000000000000000
10,10,0,0,0
1 RT.ABS_TX_NUM RT.ABS_TX_NUM_MAX RL.TO_HASH_BY_PROVER RL.NBYTES RL.LIMB
2 0 0 0 0 0
3 0 0 0 0 0
4 0 0 0 0 0
24 0 0 0 0 0
25 0 0 0 0 0
26 1 10 0 0 0
27 1 10 1 1 0xf9000000000000000000000000000000
28 1 10 0 0 0
29 1 10 1 2 0xccc00000000000000000000000000000
30 2 10 1 3 0x12345600000000000000000000000000
31 2 10 0 0 0
32 2 10 1 1 0xf0000000000000000000000000000000
33 2 10 0 0 0
34 2 10 0 0 0
35 3 10 0 0 0
36 3 10 1 4 0x47d5f9a000000000000000000000000 0x47d5f9a0000000000000000000000000
37 3 10 0 0 0
38 4 10 0 0 0
39 4 10 0 0 0
40 4 10 1 4 0x477aaaa000000000000000000000000 0x477aaaa0000000000000000000000000
41 4 10 1 2 0xaaaa000000000000000000000000000 0xaaaa0000000000000000000000000000
42 4 10 1 1 0xcc00000000000000000000000000000 0xcc000000000000000000000000000000
43 4 10 0 0 0
44 5 10 0 0 0
45 5 10 0 0 0
46 5 10 1 1 0xbb00000000000000000000000000000 0xbb000000000000000000000000000000
47 6 10 0 0 0
48 6 10 1 5 0xaaaaaaaaaa000000000000000000000 0xaaaaaaaaaa0000000000000000000000
49 6 10 0 0 0
50 7 10 0 0 0
51 7 10 1 1 0xff00000000000000000000000000000 0xff000000000000000000000000000000
52 8 10 0 0 0
53 8 10 0 0 0
54 8 10 1 2 0xaaa0000000000000000000000000000 0xaaa00000000000000000000000000000
55 8 10 0 0 0
56 9 10 0 0 0
57 9 10 1 2 0xaaa0000000000000000000000000000 0xaaa00000000000000000000000000000
58 10 10 0 0 0
59 10 10 0 0 0
60 10 10 1 2 0xeee0000000000000000000000000000 0xeee00000000000000000000000000000
61 10 10 1 5 0x1a2a3a4a5a000000000000000000000 0x1a2a3a4a5a0000000000000000000000
62 10 10 0 0 0
63 10 10 1 3 0xb1b2b30000000000000000000000000 0xb1b2b300000000000000000000000000
64 10 10 1 1 0x1000000000000000000000000000000 0x10000000000000000000000000000000
65 10 10 0 0 0

View File

@@ -36,30 +36,30 @@ TD.ABS_TX_NUM,TD.ABS_TX_NUM_MAX,TD.REL_TX_NUM,TD.REL_TX_NUM_MAX,TD.CT,TD.FROM_HI
0,0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0,0
0,0,0,0,0,0,0,0,0
1,10,1,3,0,21,32,0,1
1,10,1,3,1,21,32,0,1
2,10,2,3,0,45,4222,0,1
2,10,2,3,1,45,4222,0,1
3,10,3,3,0,23,3,1,1
3,10,3,3,1,23,3,1,1
3,10,3,3,2,23,3,1,1
4,10,1,4,0,7,211,0,2
4,10,1,4,1,7,211,0,2
5,10,2,4,0,234,2,0,2
5,10,2,4,1,234,2,0,2
6,10,3,4,0,12,144,0,2
6,10,3,4,1,12,144,0,2
7,10,4,4,0,9997,6,1,2
7,10,4,4,1,9997,6,1,2
8,10,1,2,0,1,3331,0,3
8,10,1,2,1,1,3331,0,3
9,10,2,2,0,33,7,1,3
9,10,2,2,1,33,7,1,3
10,10,1,1,0,1,999,1,4
10,10,1,1,1,1,999,1,4
10,10,1,1,2,1,999,1,4
10,10,1,1,3,1,999,1,4
10,10,1,1,4,1,999,1,4
10,10,1,1,5,1,999,1,4
10,10,1,1,6,1,999,1,4
10,10,1,1,7,1,999,1,4
1,10,1,3,0,0xaaaaaaaa,0xffffffffffffffffffffffffffffffff,0,1
1,10,1,3,1,0xaaaaaaaa,0xffffffffffffffffffffffffffffffff,0,1
2,10,2,3,0,0xbbbbbbbb,0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,0,1
2,10,2,3,1,0xbbbbbbbb,0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,0,1
3,10,3,3,0,0xcccccccc,0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,1,1
3,10,3,3,1,0xcccccccc,0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,1,1
3,10,3,3,2,0xcccccccc,0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,1,1
4,10,1,4,0,0xdddddddd,0xcccccccccccccccccccccccccccccccc,0,2
4,10,1,4,1,0xdddddddd,0xcccccccccccccccccccccccccccccccc,0,2
5,10,2,4,0,0xeeeeeeee,0xdddddddddddddddddddddddddddddddd,0,2
5,10,2,4,1,0xeeeeeeee,0xdddddddddddddddddddddddddddddddd,0,2
6,10,3,4,0,0xffffffff,0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee,0,2
6,10,3,4,1,0xffffffff,0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee,0,2
7,10,4,4,0,0xaaaaaaaa,0xffffffffffffffffffffffffffffffff,1,2
7,10,4,4,1,0xaaaaaaaa,0xffffffffffffffffffffffffffffffff,1,2
8,10,1,2,0,0xbbbbbbbb,0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,0,3
8,10,1,2,1,0xbbbbbbbb,0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa,0,3
9,10,2,2,0,0xcccccccc,0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,1,3
9,10,2,2,1,0xcccccccc,0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb,1,3
10,10,1,1,0,0xdddddddd,0xcccccccccccccccccccccccccccccccc,1,4
10,10,1,1,1,0xdddddddd,0xcccccccccccccccccccccccccccccccc,1,4
10,10,1,1,2,0xdddddddd,0xcccccccccccccccccccccccccccccccc,1,4
10,10,1,1,3,0xdddddddd,0xcccccccccccccccccccccccccccccccc,1,4
10,10,1,1,4,0xdddddddd,0xcccccccccccccccccccccccccccccccc,1,4
10,10,1,1,5,0xdddddddd,0xcccccccccccccccccccccccccccccccc,1,4
10,10,1,1,6,0xdddddddd,0xcccccccccccccccccccccccccccccccc,1,4
10,10,1,1,7,0xdddddddd,0xcccccccccccccccccccccccccccccccc,1,4
1 TD.ABS_TX_NUM TD.ABS_TX_NUM_MAX TD.REL_TX_NUM TD.REL_TX_NUM_MAX TD.CT TD.FROM_HI TD.FROM_LO TD.IS_LAST_TX_OF_BLOCK TD.REL_BLOCK
36 0 0 0 0 0 0 0 0 0
37 0 0 0 0 0 0 0 0 0
38 0 0 0 0 0 0 0 0 0
39 1 10 1 3 0 21 0xaaaaaaaa 32 0xffffffffffffffffffffffffffffffff 0 1
40 1 10 1 3 1 21 0xaaaaaaaa 32 0xffffffffffffffffffffffffffffffff 0 1
41 2 10 2 3 0 45 0xbbbbbbbb 4222 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 1
42 2 10 2 3 1 45 0xbbbbbbbb 4222 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 1
43 3 10 3 3 0 23 0xcccccccc 3 0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 1 1
44 3 10 3 3 1 23 0xcccccccc 3 0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 1 1
45 3 10 3 3 2 23 0xcccccccc 3 0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 1 1
46 4 10 1 4 0 7 0xdddddddd 211 0xcccccccccccccccccccccccccccccccc 0 2
47 4 10 1 4 1 7 0xdddddddd 211 0xcccccccccccccccccccccccccccccccc 0 2
48 5 10 2 4 0 234 0xeeeeeeee 2 0xdddddddddddddddddddddddddddddddd 0 2
49 5 10 2 4 1 234 0xeeeeeeee 2 0xdddddddddddddddddddddddddddddddd 0 2
50 6 10 3 4 0 12 0xffffffff 144 0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee 0 2
51 6 10 3 4 1 12 0xffffffff 144 0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee 0 2
52 7 10 4 4 0 9997 0xaaaaaaaa 6 0xffffffffffffffffffffffffffffffff 1 2
53 7 10 4 4 1 9997 0xaaaaaaaa 6 0xffffffffffffffffffffffffffffffff 1 2
54 8 10 1 2 0 1 0xbbbbbbbb 3331 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 3
55 8 10 1 2 1 1 0xbbbbbbbb 3331 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 0 3
56 9 10 2 2 0 33 0xcccccccc 7 0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 1 3
57 9 10 2 2 1 33 0xcccccccc 7 0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 1 3
58 10 10 1 1 0 1 0xdddddddd 999 0xcccccccccccccccccccccccccccccccc 1 4
59 10 10 1 1 1 1 0xdddddddd 999 0xcccccccccccccccccccccccccccccccc 1 4
60 10 10 1 1 2 1 0xdddddddd 999 0xcccccccccccccccccccccccccccccccc 1 4
61 10 10 1 1 3 1 0xdddddddd 999 0xcccccccccccccccccccccccccccccccc 1 4
62 10 10 1 1 4 1 0xdddddddd 999 0xcccccccccccccccccccccccccccccccc 1 4
63 10 10 1 1 5 1 0xdddddddd 999 0xcccccccccccccccccccccccccccccccc 1 4
64 10 10 1 1 6 1 0xdddddddd 999 0xcccccccccccccccccccccccccccccccc 1 4
65 10 10 1 1 7 1 0xdddddddd 999 0xcccccccccccccccccccccccccccccccc 1 4