Prover: complete the implementation of the keccak module (#3764)

* reorganize the structure of the keccak module
* rewrite the interface between keccak and the interconnection module
* rewrite the interface between keccak and the ecdsa module

---------

Co-authored-by: AlexandreBelling <alexandrebelling8@gmail.com>
This commit is contained in:
Azam Soleimanian
2024-08-06 12:12:36 +02:00
committed by GitHub
parent 84b6f8921c
commit 8fb5c34cef
85 changed files with 948 additions and 5076 deletions

View File

@@ -5,11 +5,11 @@ import (
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
wKeccak "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak"
)
type module struct {
keccak wKeccak.CustomizedkeccakHash
keccak keccak.KeccakOverBlocks
}
// NewCustomizedKeccak declares the columns and the constraints for proving hash over EXPECTED blocks.
@@ -19,15 +19,15 @@ func NewCustomizedKeccak(comp *wizard.CompiledIOP, maxNbKeccakF int) *module {
var (
size = utils.NextPowerOfTwo(generic.KeccakUsecase.NbOfLanesPerBlock() * maxNbKeccakF)
inp = wKeccak.CustomizedKeccakInputs{
LaneInfo: wKeccak.LaneInfo{
inp = keccak.KeccakOverBlockInputs{
LaneInfo: keccak.LaneInfo{
Lanes: comp.InsertCommit(0, "Lane", size),
IsFirstLaneOfNewHash: comp.InsertCommit(0, "IsFirstLaneOfNewHash", size),
IsLaneActive: comp.InsertCommit(0, "IsLaneActive", size),
},
MaxNumKeccakF: maxNbKeccakF,
}
m = wKeccak.NewCustomizedKeccak(comp, inp)
m = keccak.NewKeccakOverBlocks(comp, inp)
)
return &module{
keccak: *m,
@@ -38,7 +38,7 @@ func NewCustomizedKeccak(comp *wizard.CompiledIOP, maxNbKeccakF int) *module {
func (m *module) AssignCustomizedKeccak(run *wizard.ProverRuntime, providers [][]byte) {
//assign Lane-Info
wKeccak.AssignLaneInfo(run, &m.keccak.Inputs.LaneInfo, providers)
keccak.AssignLaneInfo(run, &m.keccak.Inputs.LaneInfo, providers)
// assign keccak
m.keccak.Inputs.Provider = providers
m.keccak.Run(run)

View File

@@ -177,3 +177,11 @@ func (state *State) XorIn(block *Block, traces *PermTraces) {
func (state *State) ExtractDigest() Digest {
return castDigest(state[0][0], state[1][0], state[2][0], state[3][0])
}
// it generates [PermTraces] from the given stream.
func GenerateTrace(streams [][]byte) (t PermTraces) {
for _, stream := range streams {
Hash(stream, &t)
}
return t
}

View File

@@ -31,13 +31,22 @@ func MustBeBinary(comp *wizard.CompiledIOP, c ifaces.Column) {
// MustBeActivationColumns constrains all the columns of the form "IsActive" to have
// the correct form: the column is binary and it cannot transition from 0 to 1.
func MustBeActivationColumns(comp *wizard.CompiledIOP, c ifaces.Column) {
func MustBeActivationColumns(comp *wizard.CompiledIOP, c ifaces.Column, option ...any) {
MustBeBinary(comp, c)
// must have activation form where option is not zero.
var res *sym.Expression
if len(option) > 0 {
res = sym.Mul(column.Shift(c, 1), option[0])
} else {
res = ifaces.ColumnAsVariable(column.Shift(c, 1))
}
comp.InsertGlobal(
0,
ifaces.QueryIDf("%v_CANNOT_TRANSITION_FROM_0_TO_1", c.GetColID()),
sym.Sub(sym.Mul(column.Shift(c, -1), c), c),
sym.Sub(res,
sym.Mul(c, column.Shift(c, 1))),
)
}

View File

@@ -6,15 +6,14 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/acc_module"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic/testdata"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak"
"github.com/stretchr/testify/assert"
)
func TestAddress(t *testing.T) {
m := keccak.Module{}
c := testCases
limits := &Limits{
MaxNbEcRecover: 1,
MaxNbTx: 4,
@@ -24,7 +23,8 @@ func TestAddress(t *testing.T) {
var uaGnark *UnalignedGnarkData
var ecRec *EcRecover
var td *txnData
gbmGnark := generic.GenericByteModule{}
gbmGnark := generic.GenDataModule{}
m := &keccak.KeccakSingleProvider{}
size := limits.sizeAntichamber()
@@ -37,16 +37,17 @@ func TestAddress(t *testing.T) {
comp := b.CompiledIOP
// generate a gbm and use it to represent gnark-columns
gbmGnark = acc_module.CommitGBM(comp, 0, generic.SHAKIRA, size)
gbmGnark = testdata.CreateGenDataModule(comp, "UnGNARK", size)
ac = &Antichamber{
Limits: limits,
ID: gbmGnark.Data.HashNum,
ID: gbmGnark.HashNum,
}
uaGnark = &UnalignedGnarkData{
GnarkData: gbmGnark.Data.Limb,
GnarkPublicKeyIndex: gbmGnark.Data.Index,
IsPublicKey: gbmGnark.Data.TO_HASH,
GnarkData: gbmGnark.Limb,
GnarkPublicKeyIndex: gbmGnark.Index,
IsPublicKey: gbmGnark.TO_HASH,
}
ac.UnalignedGnarkData = uaGnark
// commit to txnData and ecRecover
td, ecRec = commitEcRecTxnData(comp, sizeTxnData, size, ac)
@@ -54,17 +55,18 @@ func TestAddress(t *testing.T) {
// native columns and constraints
addr = newAddress(comp, size, ecRec, ac, td)
// prepare the provider for keccak
provider := addr.GetProvider(comp, ac, uaGnark)
// define keccak (columns and constraints)
m.Define(comp, []generic.GenericByteModule{provider}, nbKeccakF)
keccakInp := keccak.KeccakSingleProviderInput{
Provider: addr.provider,
MaxNumKeccakF: nbKeccakF,
}
m = keccak.NewKeccakSingleProvider(comp, keccakInp)
}, dummy.Compile)
proof := wizard.Prove(compiled, func(run *wizard.ProverRuntime) {
// assign GnarkColumns via gbmGnark
acc_module.AssignGBMfromTable(run, &gbmGnark, size, limits.MaxNbEcRecover+limits.MaxNbTx, false)
testdata.GenerateAndAssignGenDataModule(run, &gbmGnark, c.HashNum, c.ToHash, false)
// it assign mock data to EcRec and txn_data
AssignEcRecTxnData(run, gbmGnark, limits.MaxNbEcRecover, limits.MaxNbTx, sizeTxnData, size, td, ecRec, ac)
@@ -72,7 +74,17 @@ func TestAddress(t *testing.T) {
addr.assignAddress(run, limits.MaxNbEcRecover, size, ac, ecRec, uaGnark, td)
// assign keccak columns via provider that is embedded in the receiver
m.AssignKeccak(run)
m.Run(run)
})
assert.NoError(t, wizard.Verify(compiled, proof))
}
type makeTestCase struct {
HashNum []int
ToHash []int
}
var testCases = makeTestCase{
HashNum: []int{1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5},
ToHash: []int{1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0},
}

View File

@@ -1,17 +1,20 @@
package antichamber
import (
"fmt"
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/byte32cmp"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/projection"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/dedicated"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
)
@@ -42,9 +45,15 @@ type Addresses struct {
// a column of all 16 indicating that all 16 bytes of public key should be hashed.
col16 ifaces.Column
// used as the hassID for hashing by keccak.
hashNum ifaces.Column
// columns for decomposition and trimming the HashHi to AddressHi
limbColumnsUntrimmed byte32cmp.LimbColumns
computeLimbColumnsUntrimmed wizard.ProverAction
// providers for keccak, Providers contain the inputs and outputs of keccak hash.
provider generic.GenericByteModule
}
// AddressHi is the trimming of HashHi, taking its last 4bytes.
@@ -65,6 +74,7 @@ func newAddress(comp *wizard.CompiledIOP, size int, ecRec *EcRecover, ac *Antich
isAddressHiEcRec: comp.InsertCommit(0, ifaces.ColIDf("ISADRESS_HI_ECREC"), ecRecSize),
isAddressFromEcRec: createCol("ISADRESS_FROM_ECREC"),
isAddressFromTxnData: createCol("ISADRESS_FROM_TXNDATA"),
hashNum: createCol("HASH_NUM"),
}
// addresses are fetched from two arithmetization modules (ecRecover and txn-data)
@@ -76,6 +86,7 @@ func newAddress(comp *wizard.CompiledIOP, size int, ecRec *EcRecover, ac *Antich
mustBeBinary(comp, addr.isAddressFromEcRec)
mustBeBinary(comp, addr.isAddressFromTxnData)
isZeroWhenInactive(comp, addr.isAddress, ac.IsActive)
isZeroWhenInactive(comp, addr.hashNum, ac.IsActive)
// check the trimming of hashHi to the addressHi
addr.csAddressTrimming(comp)
@@ -94,6 +105,7 @@ func newAddress(comp *wizard.CompiledIOP, size int, ecRec *EcRecover, ac *Antich
[]ifaces.Column{ecRec.Limb}, []ifaces.Column{addr.addressLo},
column.Shift(addr.isAddressHiEcRec, -1), addr.isAddressFromEcRec,
)
td.csTxnData(comp)
// projection from txn-data to address columns
projection.InsertProjection(comp, ifaces.QueryIDf("Project_AddressHi_TxnData"),
@@ -105,6 +117,14 @@ func newAddress(comp *wizard.CompiledIOP, size int, ecRec *EcRecover, ac *Antich
[]ifaces.Column{td.fromLo}, []ifaces.Column{addr.addressLo},
td.isFrom, addr.isAddressFromTxnData,
)
comp.InsertGlobal(0, ifaces.QueryIDf("Hash_NUM_IS_ID"),
sym.Mul(ac.IsActive,
sym.Sub(addr.hashNum, ac.ID, 1)),
)
// assign the keccak provider
addr.provider = addr.GetProvider(comp, addr.hashNum, ac.UnalignedGnarkData)
return addr
}
@@ -144,16 +164,16 @@ func (addr *Addresses) csAddressTrimming(comp *wizard.CompiledIOP) {
// It builds a provider from public key extracted from Gnark-Data (as hash input) and addresses (as output).
// the consistency check is then deferred to the keccak module.
func (addr *Addresses) GetProvider(comp *wizard.CompiledIOP, ac *Antichamber, uaGnark *UnalignedGnarkData) generic.GenericByteModule {
func (addr *Addresses) GetProvider(comp *wizard.CompiledIOP, id ifaces.Column, uaGnark *UnalignedGnarkData) generic.GenericByteModule {
// generate a generic byte Module as keccak provider.
provider := addr.buildGenericModule(ac, uaGnark)
provider := addr.buildGenericModule(id, uaGnark)
return provider
}
// It builds a GenericByteModule from Address columns and Public-Key/GnarkData columns.
func (addr *Addresses) buildGenericModule(ac *Antichamber, uaGnark *UnalignedGnarkData) (pkModule generic.GenericByteModule) {
func (addr *Addresses) buildGenericModule(id ifaces.Column, uaGnark *UnalignedGnarkData) (pkModule generic.GenericByteModule) {
pkModule.Data = generic.GenDataModule{
HashNum: ac.ID,
HashNum: id,
Limb: uaGnark.GnarkData,
// a column of all 16, since all the bytes of public key are used in hashing
@@ -180,8 +200,28 @@ func (addr *Addresses) assignAddress(
uaGnark *UnalignedGnarkData,
td *txnData,
) {
td.assignTxnData(run)
addr.assignMainColumns(run, nbEcRecover, size, ac, uaGnark)
// assign td.isFrom
td.pa_IsZero.Run(run)
// assign HashNum
var (
one = field.One()
id = ac.ID.GetColAssignment(run).IntoRegVecSaveAlloc()
isActive = ac.IsActive.GetColAssignment(run).IntoRegVecSaveAlloc()
hashNum = common.NewVectorBuilder(addr.hashNum)
)
for row := range id {
if isActive[row].IsOne() {
f := *new(field.Element).Add(&id[row], &one)
hashNum.PushField(f)
} else {
hashNum.PushInt(0)
}
}
hashNum.PadAndAssign(run)
fmt.Printf("id %v\n", vector.Prettify(id))
addr.assignMainColumns(run, nbEcRecover, size, uaGnark)
addr.assignHelperColumns(run, ecRec)
}
@@ -189,12 +229,9 @@ func (addr *Addresses) assignAddress(
func (addr *Addresses) assignMainColumns(
run *wizard.ProverRuntime,
nbEcRecover, size int,
ac *Antichamber,
uaGnark *UnalignedGnarkData,
) {
pkModule := addr.buildGenericModule(ac, uaGnark)
// since we use it just for trace generating, we have to turn-off the info-module (hash output).
pkModule.Info = generic.GenInfoModule{}
pkModule := addr.buildGenericModule(addr.hashNum, uaGnark)
split := splitAt(nbEcRecover)
n := nbRowsPerEcRec
@@ -203,10 +240,7 @@ func (addr *Addresses) assignMainColumns(
hashHi, hashLo, isHash, trimmedHi []field.Element
)
permTrace := keccak.PermTraces{}
genTrace := generic.GenTrace{}
pkModule.AppendTraces(run, &genTrace, &permTrace)
permTrace := keccak.GenerateTrace(pkModule.Data.ScanStreams(run))
var v, w, u field.Element
for _, digest := range permTrace.HashOutPut {
@@ -272,23 +306,9 @@ func splitAt(nbEcRecover int) int {
}
func (td *txnData) csTxnData(comp *wizard.CompiledIOP) {
td.isFrom = comp.InsertCommit(0, ifaces.ColIDf("%v_IsFrom", NAME_ADDRESSES), td.ct.Size())
// check that isFrom == 1 iff ct==1
dedicated.InsertIsTargetValue(comp, 0, ifaces.QueryIDf("IsFrom_IsCorrect"), field.One(), td.ct, td.isFrom)
}
func (td *txnData) assignTxnData(run *wizard.ProverRuntime) {
// assign isFrom via CT
var isFrom []field.Element
ct := td.ct.GetColAssignment(run).IntoRegVecSaveAlloc()
for j := range ct {
if ct[j].IsOne() {
isFrom = append(isFrom, field.One())
} else {
isFrom = append(isFrom, field.Zero())
}
}
run.AssignColumn(td.isFrom.GetColID(), smartvectors.NewRegular(isFrom))
// isFrom == 1 iff ct==1
td.isFrom, td.pa_IsZero = dedicated.IsZero(comp, sym.Sub(td.ct, 1))
}
// txndata represents the txn_data module from the arithmetization side.
@@ -298,5 +318,6 @@ type txnData struct {
ct ifaces.Column
// helper column
isFrom ifaces.Column
isFrom ifaces.Column
pa_IsZero wizard.ProverAction
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/ecrecover"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
)
const (
@@ -58,6 +59,9 @@ type Antichamber struct {
// size of AntiChamber
size int
*Limits
// providers for keccak, Providers contain the inputs and outputs of keccak hash.
Providers []generic.GenericByteModule
}
type GnarkData struct {
@@ -77,7 +81,7 @@ func (l *Limits) sizeAntichamber() int {
return utils.NextPowerOfTwo(l.MaxNbEcRecover*nbRowsPerEcRec + l.MaxNbTx*nbRowsPerTxSign)
}
func NewAntichamber(comp *wizard.CompiledIOP, limits *Limits, ecSource *ecDataSource, txSource *txnData, plonkOptions []plonk.Option) *Antichamber {
func NewAntichamber(comp *wizard.CompiledIOP, limits *Limits, ecSource *ecDataSource, txSource *txnData, rlpTxn generic.GenDataModule, plonkOptions []plonk.Option) *Antichamber {
if limits.MaxNbEcRecover+limits.MaxNbTx != limits.NbInputInstance*limits.NbCircuitInstances {
utils.Panic("the number of supported instances %v should be %v + %v", limits.NbInputInstance*limits.NbCircuitInstances, limits.MaxNbEcRecover, limits.MaxNbTx)
}
@@ -97,7 +101,7 @@ func NewAntichamber(comp *wizard.CompiledIOP, limits *Limits, ecSource *ecDataSo
}
// declare submodules
res.txSignature = newTxSignatures(comp, size)
res.txSignature = newTxSignatures(comp, rlpTxn, size)
res.EcRecover = newEcRecover(comp, limits, ecSource)
res.UnalignedGnarkData = newUnalignedGnarkData(comp, size, res.unalignedGnarkDataSource())
res.Addresses = newAddress(comp, size, res.EcRecover, res, txSource)
@@ -125,6 +129,9 @@ func NewAntichamber(comp *wizard.CompiledIOP, limits *Limits, ecSource *ecDataSo
// ecrecover
res.EcRecover.csConstrainAuxProjectionMaskConsistency(comp, res.Source, res.IsFetching)
// assign keccak providers
res.Providers = append([]generic.GenericByteModule{res.Addresses.provider}, res.txSignature.provider)
return res
}
@@ -138,11 +145,11 @@ func NewAntichamber(comp *wizard.CompiledIOP, limits *Limits, ecSource *ecDataSo
//
// As the initial data is copied from the EC_DATA arithmetization module, then
// it has to be provided as an input.
func (ac *Antichamber) Assign(run *wizard.ProverRuntime, ecSrc *ecDataSource, txSource *txnData, txGet TxSignatureGetter) {
func (ac *Antichamber) Assign(run *wizard.ProverRuntime, ecSrc *ecDataSource, txSource *txnData, rlpTxn generic.GenDataModule, txGet TxSignatureGetter) {
nbActualEcRecover := ecSrc.nbActualInstances(run)
ac.assignAntichamber(run, nbActualEcRecover)
ac.EcRecover.Assign(run, ecSrc)
ac.txSignature.assignTxSignature(run, nbActualEcRecover, ac.size)
ac.txSignature.assignTxSignature(run, rlpTxn, nbActualEcRecover, ac.size)
ac.UnalignedGnarkData.Assign(run, ac.unalignedGnarkDataSource(), txGet)
ac.Addresses.assignAddress(run, nbActualEcRecover, ac.size, ac, ac.EcRecover, ac.UnalignedGnarkData, txSource)
ac.AlignedGnarkData.Assign(run)

View File

@@ -50,14 +50,6 @@ func (ugd *UnalignedGnarkData) cols() []ifaces.Column {
}
}
func (gd *GnarkData) cols() []ifaces.Column {
return []ifaces.Column{
gd.IdPerm,
gd.GnarkIndexPerm,
gd.DataPerm,
}
}
func (ac *Antichamber) unalignedGnarkDataSource() *unalignedGnarkDataSource {
return &unalignedGnarkDataSource{
IsActive: ac.IsActive,

View File

@@ -15,8 +15,8 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/utils/csvtraces"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/acc_module"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic/testdata"
"golang.org/x/crypto/sha3"
)
@@ -39,17 +39,17 @@ func TestAntichamber(t *testing.T) {
NbInputInstance: 5,
NbCircuitInstances: 1,
}
var rlpTxn generic.GenericByteModule
gbmSize := 256
var rlpTxn generic.GenDataModule
// to cover edge-cases, leaves some rows empty
witSize := gbmSize - gbmSize/15
c := testCaseAntiChamber
// random value for testing edge cases
nbRowsPerTxInTxnData := 3
cmp := wizard.Compile(
func(b *wizard.Builder) {
comp := b.CompiledIOP
// declare rlp_txn module
rlpTxn = acc_module.CommitGBM(comp, 0, generic.RLP_TXN, gbmSize)
rlpTxn = testdata.CreateGenDataModule(comp, "TXN_RLP", 32)
// declar txn_data module
txSrc = commitTxnData(comp, limits, nbRowsPerTxInTxnData)
@@ -65,20 +65,16 @@ func TestAntichamber(t *testing.T) {
IsRes: ct.GetCommit(b, "EC_DATA_IS_RES"),
}
ac = NewAntichamber(b.CompiledIOP, limits, ecSrc, txSrc, []plonk.Option{plonk.WithRangecheck(16, 6, true)})
ac = NewAntichamber(b.CompiledIOP, limits, ecSrc, txSrc, rlpTxn, []plonk.Option{plonk.WithRangecheck(16, 6, true)})
},
dummy.Compile,
)
proof := wizard.Prove(cmp,
func(run *wizard.ProverRuntime) {
// assign random but valid data to rlp_txn module
acc_module.AssignGBMfromTable(run, &rlpTxn, witSize, limits.MaxNbTx)
// get the hash result from the keccak trace for rlp_txn
gt := generic.GenTrace{}
trace := keccak.PermTraces{}
rlpTxn.AppendTraces(run, &gt, &trace)
// assign data to rlp_txn module
testdata.GenerateAndAssignGenDataModule(run, &rlpTxn, c.HashNum, c.ToHash, true)
trace := keccak.GenerateTrace(rlpTxn.ScanStreams(run))
// assign txn_data module from pk
txSrc.assignTxnDataFromPK(run, ac, trace.HashOutPut, nbRowsPerTxInTxnData)
@@ -86,7 +82,7 @@ func TestAntichamber(t *testing.T) {
ct.Assign(run,
"EC_DATA_CS_ECRECOVER", "EC_DATA_ID", "EC_DATA_LIMB", "EC_DATA_SUCCESS_BIT", "EC_DATA_INDEX", "EC_DATA_IS_DATA", "EC_DATA_IS_RES",
)
ac.Assign(run, ecSrc, txSrc, dummyTxSignatureGetter)
ac.Assign(run, ecSrc, txSrc, rlpTxn, dummyTxSignatureGetter)
})
if err := wizard.Verify(cmp, proof); err != nil {
@@ -159,3 +155,8 @@ func generateDeterministicSignature(txHash []byte) (pk *ecdsa.PublicKey, r, s, v
}
return nil, nil, nil, nil, fmt.Errorf("failed to generate a valid signature")
}
var testCaseAntiChamber = makeTestCase{
HashNum: []int{1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2},
ToHash: []int{1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1},
}

View File

@@ -22,9 +22,12 @@ type txSignature struct {
txHashHi ifaces.Column
txHashLo ifaces.Column
isTxHash ifaces.Column
// provider for keccak, Provider contain the inputs and outputs of keccak hash.
provider generic.GenericByteModule
}
func newTxSignatures(comp *wizard.CompiledIOP, size int) *txSignature {
func newTxSignatures(comp *wizard.CompiledIOP, rlpTxn generic.GenDataModule, size int) *txSignature {
createCol := createColFn(comp, NAME_TXSIGNATURE, size)
// declare the native columns
@@ -34,18 +37,18 @@ func newTxSignatures(comp *wizard.CompiledIOP, size int) *txSignature {
isTxHash: createCol("TX_IS_HASH_HI"),
}
res.provider = res.GetProvider(comp, rlpTxn)
return res
}
// It builds a provider from rlp-txn (as hash input) and native columns of TxSignature (as hash output)
// the consistency check is then deferred to the keccak module.
func (txn *txSignature) GetProvider(comp *wizard.CompiledIOP) generic.GenericByteModule {
func (txn *txSignature) GetProvider(comp *wizard.CompiledIOP, rlpTxn generic.GenDataModule) generic.GenericByteModule {
provider := generic.GenericByteModule{}
// get rlp_txn from the compiler trace (the module should already be committed)
rlpTxn := generic.NewGenericByteModule(comp, generic.RLP_TXN)
// pass rlp-txn as DataModule.
provider.Data = rlpTxn.Data
provider.Data = rlpTxn
// generate infoModule from native columns
provider.Info = txn.buildInfoModule()
@@ -66,18 +69,14 @@ func (txn *txSignature) buildInfoModule() generic.GenInfoModule {
}
// it assign the native columns
func (txn *txSignature) assignTxSignature(run *wizard.ProverRuntime, nbEcRecover, size int) {
func (txn *txSignature) assignTxSignature(run *wizard.ProverRuntime, rlpTxn generic.GenDataModule, nbEcRecover, size int) {
n := startAt(nbEcRecover)
hashHi := vector.Repeat(field.Zero(), n)
hashLo := vector.Repeat(field.Zero(), n)
isTxHash := vector.Repeat(field.Zero(), n)
comp := run.Spec
rlpTxn := generic.NewGenericByteModule(comp, generic.RLP_TXN)
permTrace := keccak.PermTraces{}
genTrace := generic.GenTrace{}
rlpTxn.AppendTraces(run, &genTrace, &permTrace)
permTrace := keccak.GenerateTrace(rlpTxn.ScanStreams(run))
var v, w field.Element
for _, digest := range permTrace.HashOutPut {

View File

@@ -5,40 +5,50 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/acc_module"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic/testdata"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak"
"github.com/stretchr/testify/assert"
)
func TestTxnSignature(t *testing.T) {
c := RLP_TXN_test
limits := &Limits{
MaxNbEcRecover: 5,
MaxNbTx: 5,
}
gbmSize := 256
size := limits.sizeAntichamber()
m := &keccak.KeccakSingleProvider{}
m := keccak.Module{}
ac := Antichamber{Limits: limits}
var txSign *txSignature
gbm := generic.GenericByteModule{}
rlpTxn := generic.GenDataModule{}
nbKeccakF := ac.nbKeccakF(3)
compiled := wizard.Compile(func(b *wizard.Builder) {
comp := b.CompiledIOP
gbm = acc_module.CommitGBM(comp, 0, generic.RLP_TXN, gbmSize)
txSign = newTxSignatures(comp, size)
provider := txSign.GetProvider(comp)
m.Define(comp, []generic.GenericByteModule{provider}, nbKeccakF)
rlpTxn = testdata.CreateGenDataModule(comp, "RLP_TXN", gbmSize)
txSign = newTxSignatures(comp, rlpTxn, size)
provider := txSign.GetProvider(comp, rlpTxn)
keccakInp := keccak.KeccakSingleProviderInput{
Provider: provider,
MaxNumKeccakF: nbKeccakF,
}
m = keccak.NewKeccakSingleProvider(comp, keccakInp)
}, dummy.Compile)
proof := wizard.Prove(compiled, func(run *wizard.ProverRuntime) {
witSize := gbmSize - gbmSize/15
acc_module.AssignGBMfromTable(run, &gbm, witSize, limits.MaxNbTx)
txSign.assignTxSignature(run, limits.MaxNbEcRecover, size)
m.AssignKeccak(run)
testdata.GenerateAndAssignGenDataModule(run, &rlpTxn, c.HashNum, c.ToHash, true)
txSign.assignTxSignature(run, rlpTxn, limits.MaxNbEcRecover, size)
m.Run(run)
})
assert.NoError(t, wizard.Verify(compiled, proof))
}
var RLP_TXN_test = makeTestCase{
HashNum: []int{1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5},
ToHash: []int{1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0},
}

View File

@@ -1,7 +1,7 @@
package antichamber
import (
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
@@ -9,6 +9,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"golang.org/x/crypto/sha3"
)
@@ -29,16 +30,14 @@ func commitEcRecTxnData(comp *wizard.CompiledIOP, size1 int, size int, ac *Antic
func AssignEcRecTxnData(
run *wizard.ProverRuntime,
gbm generic.GenericByteModule,
gbm generic.GenDataModule,
nbEcRec, nbTxS int,
sizeTxnData, size int,
td *txnData, ecRec *EcRecover,
ac *Antichamber,
) {
permTrace := &permTrace.PermTraces{}
genTrace := &generic.GenTrace{}
gbm.AppendTraces(run, genTrace, permTrace)
permTrace := keccak.GenerateTrace(gbm.ScanStreams(run))
// now assign ecRecover.Limb and txn_data.From from the permutation trace.
isEcRecRes := make([]field.Element, nbEcRec*nbRowsPerEcRec)

View File

@@ -1,4 +1,4 @@
package acc_module
package antichamber
import (
"crypto/rand"
@@ -7,7 +7,9 @@ import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/sirupsen/logrus"
@@ -307,3 +309,55 @@ func belongsTo(a int, set []int) int {
}
return b
}
func InsertIsTargetValue(
comp *wizard.CompiledIOP,
round int,
queryName ifaces.QueryID,
targetVal field.Element,
colA ifaces.Column,
colB any,
) {
// declare the new column colC
colC := comp.InsertCommit(round, ifaces.ColIDf(string(queryName)), colA.Size())
// to have colB[i] = 1 iff colA[i]=targetValue
// impose three following constrains (where t = targetValue -colA)
//
// 1. t * (t * colC -1) =0
//
// 2. t * colB = 0
//
// 3. (t * colC + colB - 1) = 0
//
// t := targetVal - colA
t := symbolic.Sub(targetVal, colA)
// if t[i] !=0 ---> colC[i] = t^(-1)
// i.e., t * (t * colC -1) =0
expr := symbolic.Mul(symbolic.Sub(symbolic.Mul(t, colC), 1), t)
comp.InsertGlobal(round, ifaces.QueryIDf("%v_%v", string(queryName), 1), expr)
// t * colB = 0
expr = symbolic.Mul(t, colB)
comp.InsertGlobal(round, ifaces.QueryIDf("%v_%v", string(queryName), 2), expr)
// (t * colC + colB - 1) = 0
expr = symbolic.Sub(symbolic.Add(symbolic.Mul(t, colC), colB), 1)
comp.InsertGlobal(round, ifaces.QueryIDf("%v_%v", string(queryName), 3), expr)
comp.SubProvers.AppendToInner(round,
func(run *wizard.ProverRuntime) {
assignInverse(run, targetVal, colA, colC)
},
)
}
func assignInverse(run *wizard.ProverRuntime, targetVal field.Element, colA, colC ifaces.Column) {
cola := colA.GetColAssignment(run).IntoRegVecSaveAlloc()
colCWit := make([]field.Element, len(cola))
var res field.Element
for i := range cola {
res.Sub(&targetVal, &cola[i])
colCWit[i].Inverse(&res)
}
run.AssignColumn(colC.GetColID(), smartvectors.NewRegular(colCWit))
}

View File

@@ -1,75 +0,0 @@
package acc_module
import (
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/datatransfer"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of Bridging module, for testing
func makeTestCaseBridging(numModules int) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
gbmSize := make([]int, numModules)
def := make([]generic.GenericByteModuleDefinition, numModules)
gbms := make([]generic.GenericByteModule, numModules)
maxNumKeccakF := 128
info := InfoModule{}
d := &DataModule{}
dt := datatransfer.Module{}
def[0] = generic.RLP_ADD
def[1] = generic.SHAKIRA
gbmSize[0] = 8
gbmSize[1] = 128
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
for i := range gbms {
gbms[i] = CommitGBM(comp, round, def[i], gbmSize[i])
}
d.NewDataModule(comp, round, maxNumKeccakF, gbms[:])
dt.Provider = d.Provider
dt.NewDataTransfer(comp, round, maxNumKeccakF, 0)
info.NewInfoModule(comp, round, maxNumKeccakF, gbms, dt.HashOutput, *d)
}
prover = func(run *wizard.ProverRuntime) {
traces1 := permTrace.PermTraces{}
traces2 := permTrace.PermTraces{}
gt := make([]generic.GenTrace, numModules)
for i := range gbms {
AssignGBMfromTable(run, &gbms[i], gbmSize[i]-3, gbmSize[i]/5)
gbms[i].AppendTraces(run, &gt[i], &traces1)
}
d.AssignDataModule(run, gbms)
sGT := generic.GenTrace{}
d.Provider.AppendTraces(run, &sGT, &traces2)
if len(traces1.HashOutPut) != len(traces2.HashOutPut) {
utils.Panic("trace are not the same")
}
dt.AssignModule(run, traces2, sGT)
// dt.HashOutput.AssignHashOutPut(run, traces)
info.AssignInfoModule(run, gbms)
}
return define, prover
}
func TestBridging(t *testing.T) {
define, prover := makeTestCaseBridging(2)
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}

View File

@@ -1,183 +0,0 @@
// The accumulator package is responsible for accumulating the data from different arithmetization module
// The accumulated data is then set to the datatransfer module to be prepared for keccak hash.
package acc_module
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
projection "github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/projection"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
)
const (
blockSize = 136 // number of bytes in the block
)
// The sub-module DataModule filters the data from different arithmetization module,
//
// and stitch them together to build a single module.
type DataModule struct {
// stitching of modules together
Provider generic.GenericByteModule
// filter indicating where each original module is located over the stitched one
sFilters []ifaces.Column
// the active part of the stitching module
isActive ifaces.Column
// max number of rows for the stitched module
MaxNumRows int
}
// It declares the new columns and the constraints among them
func (d *DataModule) NewDataModule(
comp *wizard.CompiledIOP,
round int,
maxNumKeccakf int,
gbms []generic.GenericByteModule,
) {
d.MaxNumRows = utils.NextPowerOfTwo(maxNumKeccakf * blockSize)
// declare the new columns per gbm
d.declareColumns(comp, round, gbms)
// constraints over sFilters
//
// 1. they are binary
//
// 2. they do'nt overlap isActive = \sum sFilters
//
// 3. sFilter[i] starts immediately after sFilters[i-1].
isActive := symbolic.NewConstant(0)
for i := range d.sFilters {
comp.InsertGlobal(round, ifaces.QueryIDf("sFilter_IsBinary_%v", i),
symbolic.Mul(d.sFilters[i], symbolic.Sub(1, d.sFilters[i])))
isActive = symbolic.Add(d.sFilters[i], isActive)
}
comp.InsertGlobal(round, ifaces.QueryIDf("sFilters_NoOverlap"), symbolic.Sub(d.isActive, isActive))
// for constraint 3; over (1-\sum_{j<i} sFilters[j])*isActive we need that,
// sFilters[i] have the form (oneThenZeros) namely, it start from ones followed by zeroes.
s := symbolic.NewConstant(0)
for i := range d.sFilters {
// over (1-s)*isActive, sFilter[i] is oneThenZero
// sFilter[i] is oneThenZero is equivalent with b (in the following) is binary
b := symbolic.Sub(d.sFilters[i], column.Shift(d.sFilters[i], 1)) // should be binary
comp.InsertGlobal(round, ifaces.QueryIDf("IsOne_ThenZero_%v", i),
symbolic.Mul(symbolic.Sub(1, s), d.isActive, symbolic.Mul(symbolic.Sub(1, b), b)))
s = symbolic.Add(s, d.sFilters[i])
}
// projection among gbms and stitched module
for i, gbm := range gbms {
projection.InsertProjection(comp, ifaces.QueryIDf("Stitch_Modules_%v", i),
[]ifaces.Column{gbm.Data.HashNum, gbm.Data.Limb, gbm.Data.NBytes, gbm.Data.Index},
[]ifaces.Column{d.Provider.Data.HashNum, d.Provider.Data.Limb, d.Provider.Data.NBytes, d.Provider.Data.Index},
gbm.Data.TO_HASH,
d.sFilters[i],
)
}
// constraints over isActive
// 1. it is binary
// 2. it is zero followed by ones// constraints over isActive
comp.InsertGlobal(round, ifaces.QueryIDf("IsActive_IsBinary_DataTrace"),
symbolic.Mul(d.isActive, symbolic.Sub(1, isActive)))
col := symbolic.Sub(column.Shift(d.isActive, 1), d.isActive) // should be binary
comp.InsertGlobal(round, ifaces.QueryIDf("IsOneThenZero_DataTrace"),
symbolic.Mul(col, symbolic.Sub(1, col)))
}
// It declares the columns specific to the DataModule
func (d *DataModule) declareColumns(comp *wizard.CompiledIOP, round int, gbms []generic.GenericByteModule) {
d.sFilters = make([]ifaces.Column, len(gbms))
for i := range gbms {
d.sFilters[i] = comp.InsertCommit(round, ifaces.ColIDf("sFilter_%v", i), d.MaxNumRows)
}
d.isActive = comp.InsertCommit(round, ifaces.ColIDf("IsActive"), d.MaxNumRows)
d.Provider.Data.HashNum = comp.InsertCommit(round, ifaces.ColIDf("sHashNum"), d.MaxNumRows)
d.Provider.Data.Limb = comp.InsertCommit(round, ifaces.ColIDf("sLimb"), d.MaxNumRows)
d.Provider.Data.NBytes = comp.InsertCommit(round, ifaces.ColIDf("sNBytes"), d.MaxNumRows)
d.Provider.Data.Index = comp.InsertCommit(round, ifaces.ColIDf("sIndex"), d.MaxNumRows)
d.Provider.Data.TO_HASH = d.isActive
}
// It assigns the columns specific to the submodule.
func (d *DataModule) AssignDataModule(
run *wizard.ProverRuntime,
gbms []generic.GenericByteModule) {
// fetch the gbm witnesses
gt := make([]generic.GenTrace, len(gbms))
for i := range gbms {
gt[i].HashNum = extractColLeftPadded(run, gbms[i].Data.HashNum)
gt[i].Limb = extractColLeftPadded(run, gbms[i].Data.Limb)
gt[i].NByte = extractColLeftPadded(run, gbms[i].Data.NBytes)
gt[i].Index = extractColLeftPadded(run, gbms[i].Data.Index)
gt[i].TO_HASH = extractColLeftPadded(run, gbms[i].Data.TO_HASH)
}
sFilters := make([][]field.Element, len(gbms))
for i := range gbms {
// remember that gt is the gbms assignment removing the padded part
filter := gt[i].TO_HASH
// populate sFilters
for j := range sFilters {
for k := range filter {
if filter[k] == field.One() {
if j == i {
sFilters[j] = append(sFilters[j], field.One())
} else {
sFilters[j] = append(sFilters[j], field.Zero())
}
}
}
}
}
//assign sFilters
for i := range gbms {
run.AssignColumn(d.sFilters[i].GetColID(), smartvectors.LeftZeroPadded(sFilters[i], d.MaxNumRows))
}
// populate and assign isActive
isActive := vector.Repeat(field.One(), len(sFilters[0]))
run.AssignColumn(d.isActive.GetColID(), smartvectors.LeftZeroPadded(isActive, d.MaxNumRows))
// populate sModule
var sHashNum, sLimb, sNBytes, sIndex []field.Element
for i := range gbms {
filter := gt[i].TO_HASH
hashNum := gt[i].HashNum
limb := gt[i].Limb
nBytes := gt[i].NByte
index := gt[i].Index
for j := range filter {
if filter[j] == field.One() {
sHashNum = append(sHashNum, hashNum[j])
sLimb = append(sLimb, limb[j])
sNBytes = append(sNBytes, nBytes[j])
sIndex = append(sIndex, index[j])
}
}
}
run.AssignColumn(d.Provider.Data.HashNum.GetColID(), smartvectors.LeftZeroPadded(sHashNum, d.MaxNumRows))
run.AssignColumn(d.Provider.Data.Limb.GetColID(), smartvectors.LeftZeroPadded(sLimb, d.MaxNumRows))
run.AssignColumn(d.Provider.Data.NBytes.GetColID(), smartvectors.LeftZeroPadded(sNBytes, d.MaxNumRows))
run.AssignColumn(d.Provider.Data.Index.GetColID(), smartvectors.LeftZeroPadded(sIndex, d.MaxNumRows))
}

View File

@@ -1,95 +0,0 @@
package acc_module
import (
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of Data module, for testing
func makeTestCaseDataModule(numModules int) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
gbmSize := make([]int, numModules)
def := make([]generic.GenericByteModuleDefinition, numModules)
gbms := make([]generic.GenericByteModule, numModules)
maxNumKeccakF := 8
d := DataModule{}
def[0] = generic.RLP_ADD
def[1] = generic.SHAKIRA
def[2] = module1
def[3] = module2
gbmSize[0] = 8
gbmSize[1] = 8
gbmSize[2] = 32
gbmSize[3] = 8
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
for i := range gbms {
gbms[i] = CommitGBM(comp, round, def[i], gbmSize[i])
}
d.NewDataModule(comp, round, maxNumKeccakF, gbms[:])
}
prover = func(run *wizard.ProverRuntime) {
traces := permTrace.PermTraces{}
gt := make([]generic.GenTrace, numModules)
for i := range gbms {
AssignGBMfromTable(run, &gbms[i], gbmSize[i]-5, gbmSize[i]/5)
gbms[i].AppendTraces(run, &gt[i], &traces)
}
d.AssignDataModule(run, gbms)
}
return define, prover
}
func TestDataModule(t *testing.T) {
define, prover := makeTestCaseDataModule(4)
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}
var module1 = generic.GenericByteModuleDefinition{
Data: generic.DataDef{
HashNum: "module1_HashNum",
Limb: "module1_Limb",
NBytes: "module1_NBytes",
TO_HASH: "module1_TO_Hash",
Index: "module1_Index",
},
Info: generic.InfoDef{
HashNum: "module1_HashNum_Info",
HashLo: "module1_HashLo",
HashHi: "module1_HashHi",
IsHashLo: "module1_IsHashLo",
IsHashHi: "module1_IsHashHi",
},
}
var module2 = generic.GenericByteModuleDefinition{
Data: generic.DataDef{
HashNum: "module2_HashNum",
Limb: "module2_Limb",
NBytes: "module2_NBytes",
TO_HASH: "module2_TO_Hash",
Index: "module2_Index",
},
Info: generic.InfoDef{
HashNum: "module2_HashNum_Info",
HashLo: "module2_HashLo",
HashHi: "module2_HashHi",
IsHashLo: "module2_IsHashLo",
IsHashHi: "module2_IsHashHi",
},
}

View File

@@ -1,153 +0,0 @@
package acc_module
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
projection "github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/projection"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/datatransfer"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
)
type InfoModule struct {
// filtering outputs for the corresponding module
sFilters []ifaces.Column
// ID of the hash
hashNumOut ifaces.Column
isActive ifaces.Column
maxNumRows int
}
func (info *InfoModule) NewInfoModule(
comp *wizard.CompiledIOP,
round, maxNumKeccakF int,
gbms []generic.GenericByteModule,
hashOutput datatransfer.HashOutput,
data DataModule,
) {
info.maxNumRows = utils.NextPowerOfTwo(maxNumKeccakF)
size := utils.NextPowerOfTwo(maxNumKeccakF)
// declare columns
info.declareColumns(comp, round, size, gbms)
// declare constraints
// constraints over sFilters
//
// 1. they are binary
//
// 2. they do'nt overlap isActive = \sum sFilters
//
// 3. sFilter[i] starts immediately after sFilters[i-1].
isActive := symbolic.NewConstant(0)
for i := range info.sFilters {
comp.InsertGlobal(round, ifaces.QueryIDf("sFilter_IsBinary_Info_%v", i),
symbolic.Mul(info.sFilters[i], symbolic.Sub(1, info.sFilters[i])))
isActive = symbolic.Add(info.sFilters[i], isActive)
}
comp.InsertGlobal(round, ifaces.QueryIDf("sFilters_NoOverlap_Info"), symbolic.Sub(info.isActive, isActive))
// for constraint 3; over (1-\sum_{j<i} sFilters[j])*isActive we need that,
// sFilters[i] have the form (oneThenZeros) namely, it start from ones followed by zeroes.
s := symbolic.NewConstant(0)
for i := range info.sFilters {
// over (1-s)*isActive, sFilter[i] is oneThenZero
// sFilter[i] is oneThenZero is equivalent with b (in the following) is binary
b := symbolic.Sub(info.sFilters[i], column.Shift(info.sFilters[i], 1)) // should be binary
comp.InsertGlobal(round, ifaces.QueryIDf("IsOne_ThenZero_Info%v", i),
symbolic.Mul(symbolic.Sub(1, s), info.isActive, symbolic.Mul(symbolic.Sub(1, b), b)))
s = symbolic.Add(s, info.sFilters[i])
}
// constraints over isActive
// 1. It is Binary
// 2. It has ones followed by zeroes
comp.InsertGlobal(round, ifaces.QueryIDf("IsActive_IsBinary_InfoTrace"),
symbolic.Mul(info.isActive, symbolic.Sub(1, isActive)))
col := symbolic.Sub(column.Shift(info.isActive, -1), info.isActive) // should be binary
comp.InsertGlobal(round, ifaces.QueryIDf("IsOneThenZero_InfoTrace"),
symbolic.Mul(col, symbolic.Sub(1, col)))
// Projection between hashOutputs
for i := range gbms {
projection.InsertProjection(comp, ifaces.QueryIDf("Project_HashLo_%v", i),
[]ifaces.Column{hashOutput.HashLo},
[]ifaces.Column{gbms[i].Info.HashLo},
info.sFilters[i],
gbms[i].Info.IsHashLo)
projection.InsertProjection(comp, ifaces.QueryIDf("Project_HashHi_%v", i),
[]ifaces.Column{hashOutput.HashHi},
[]ifaces.Column{gbms[i].Info.HashHi},
info.sFilters[i],
gbms[i].Info.IsHashHi)
}
}
// declare columns
func (info *InfoModule) declareColumns(
comp *wizard.CompiledIOP,
round, size int,
gbms []generic.GenericByteModule,
) {
info.isActive = comp.InsertCommit(round, ifaces.ColIDf("IsActive_Info"), size)
info.sFilters = make([]ifaces.Column, len(gbms))
for i := range gbms {
info.sFilters[i] = comp.InsertCommit(round, ifaces.ColIDf("sFilterOut_%v", i), size)
}
info.hashNumOut = comp.InsertCommit(round, ifaces.ColIDf("hashNumOut"), size)
}
func (info *InfoModule) AssignInfoModule(
run *wizard.ProverRuntime,
gbms []generic.GenericByteModule,
) {
// fetch the witnesses of gbm
gt := make([]generic.GenTrace, len(gbms))
for i := range gbms {
gt[i].HashNum = extractColLeftPadded(run, gbms[i].Data.HashNum)
gt[i].Limb = extractColLeftPadded(run, gbms[i].Data.Limb)
gt[i].NByte = extractColLeftPadded(run, gbms[i].Data.NBytes)
gt[i].Index = extractColLeftPadded(run, gbms[i].Data.Index)
gt[i].TO_HASH = extractColLeftPadded(run, gbms[i].Data.TO_HASH)
}
// populate hashNumOut and sFilters
var hashNumOut []field.Element
sFilters := make([][]field.Element, len(gt))
for i := range gt {
for k := range gt[i].Index {
if gt[i].Index[k] == field.Zero() && gt[i].TO_HASH[k] == field.One() {
hashNumOut = append(hashNumOut, gt[i].HashNum[k])
for j := range gt {
if j == i {
sFilters[j] = append(sFilters[j], field.One())
} else {
sFilters[j] = append(sFilters[j], field.Zero())
}
}
}
}
}
run.AssignColumn(info.hashNumOut.GetColID(), smartvectors.RightZeroPadded(hashNumOut, info.maxNumRows))
for i := range gt {
run.AssignColumn(info.sFilters[i].GetColID(), smartvectors.RightZeroPadded(sFilters[i], info.maxNumRows))
}
// populate and assign isActive
isActive := vector.Repeat(field.One(), len(sFilters[0]))
run.AssignColumn(info.isActive.GetColID(), smartvectors.RightZeroPadded(isActive, info.maxNumRows))
}

View File

@@ -1,72 +0,0 @@
package acc_module
import (
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/datatransfer"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of Bridging module, for testing
func makeTestCaseInfoModule(numModules int) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
gbmSize := make([]int, numModules)
def := make([]generic.GenericByteModuleDefinition, numModules)
gbms := make([]generic.GenericByteModule, numModules)
maxNumKeccakF := 128
info := InfoModule{}
d := &DataModule{}
dt := datatransfer.Module{}
d.MaxNumRows = 1024
dt.HashOutput.MaxNumRows = maxNumKeccakF
def[0] = generic.RLP_ADD
def[1] = generic.SHAKIRA
def[2] = module1
def[3] = module2
gbmSize[0] = 32
gbmSize[1] = 32
gbmSize[2] = 32
gbmSize[3] = 32
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
for i := range gbms {
gbms[i] = CommitGBM(comp, round, def[i], gbmSize[i])
}
d.declareColumns(comp, round, gbms)
dt.HashOutput.DeclareColumns(comp, round)
info.NewInfoModule(comp, round, maxNumKeccakF, gbms, dt.HashOutput, *d)
}
prover = func(run *wizard.ProverRuntime) {
traces := permTrace.PermTraces{}
gt := make([]generic.GenTrace, numModules)
for i := range gbms {
AssignGBMfromTable(run, &gbms[i], gbmSize[i]-7, gbmSize[i]/5)
gbms[i].AppendTraces(run, &gt[i], &traces)
}
d.AssignDataModule(run, gbms)
dt.HashOutput.AssignHashOutPut(run, traces)
info.AssignInfoModule(run, gbms)
}
return define, prover
}
func TestInfoModule(t *testing.T) {
define, prover := makeTestCaseInfoModule(4)
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}

View File

@@ -1,62 +0,0 @@
package acc_module
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
)
// Extract a shallow copy of the active zone of a column. Meaning the unpadded
// area where the column encodes actual data.
func extractColLeftPadded(
run *wizard.ProverRuntime,
col ifaces.Column,
) []field.Element {
// Fetchs the smart-vector and delimitate the active zone. Here we assume
// that all the columns are zero-prepended. And have the same length. That's
// why stop - density gives us the starting position for scanning the
// witness.
var (
col_ = run.Columns.MustGet(col.GetColID())
density = smartvectors.Density(col_)
stop = col_.Len()
start = stop - smartvectors.Density(col_)
)
// Calling subvector would result in an exception. Thus, we treat it as a
// special case and return an empty vector.
if density == 0 {
return []field.Element{}
}
// Extract the assignments through a shallow copy.
return col_.SubVector(start, stop).IntoRegVecSaveAlloc()
}
func extractColRightPadded(
run *wizard.ProverRuntime,
col ifaces.Column,
) []field.Element {
// Fetchs the smart-vector and delimitate the active zone. Here we assume
// that all the columns are zero-prepended. And have the same length. That's
// why stop - density gives us the starting position for scanning the
// witness.
var (
col_ = run.Columns.MustGet(col.GetColID())
density = smartvectors.Density(col_)
start = 0
stop = start + density
)
// Calling subvector would result in an exception. Thus, we treat it as a
// special case and return an empty vector.
if density == 0 {
return []field.Element{}
}
// Extract the assignments through a shallow copy.
return col_.SubVector(start, stop).IntoRegVecSaveAlloc()
}

View File

@@ -1,194 +0,0 @@
package datatransfer
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/keccakf"
)
// The submodule baseConversion implements the base conversion over the lanes, in order to export them to the keccakf.
// The lanes from the first block of hash are in baseA and others are in baseB.
type baseConversion struct {
// It is 1 when the lane is from the first block of the hash
IsFromFirstBlock ifaces.Column
// IsFromBlockBaseB := 1-isFromFirstBlock
IsFromBlockBaseB ifaces.Column
// Decomposition of lanes into slices of 16bits
laneSlices [4]ifaces.Column
// Slices in baseX; the one from first blocks are in baseA and others are in baseB.
laneSlicesX [4]ifaces.Column
// lanes from first block in baseA, others in baseB
LaneX ifaces.Column
}
func (b *baseConversion) newBaseConversionOfLanes(
comp *wizard.CompiledIOP,
round, maxNumRows int,
lane lane,
lu lookUpTables,
) {
// declare the columns
b.insertCommit(comp, round, maxNumRows)
// declare the constraints
// 0. isFromFirstBlock is well formed
// 1. base conversion via lookups
b.csIsFromFirstBlock(comp, round, lane)
b.csBaseConversion(comp, round, lane, lu)
}
func (b *baseConversion) insertCommit(comp *wizard.CompiledIOP, round, maxNumRows int) {
for j := range b.laneSlices {
b.laneSlices[j] = comp.InsertCommit(round, ifaces.ColIDf("SlicesUint16_%v", j), maxNumRows)
b.laneSlicesX[j] = comp.InsertCommit(round, ifaces.ColIDf("SlicesX_%v", j), maxNumRows)
}
b.LaneX = comp.InsertCommit(round, ifaces.ColIDf("LaneX"), maxNumRows)
b.IsFromFirstBlock = comp.InsertCommit(round, ifaces.ColIDf("IsFromFirstBlock"), maxNumRows)
b.IsFromBlockBaseB = comp.InsertCommit(round, ifaces.ColIDf("IsFromBlockBaseB"), maxNumRows)
}
// assign the columns specific to the submodule
func (b *baseConversion) assignBaseConversion(run *wizard.ProverRuntime, l lane, maxNumRows int) {
b.assignIsFromFirstBlock(run, l, maxNumRows)
b.assignSlicesLaneX(run, l, maxNumRows)
}
func (b *baseConversion) csIsFromFirstBlock(comp *wizard.CompiledIOP, round int, l lane) {
// isFromFirstBlock = sum_j Shift(l.isFirstLaneFromNewHash,-j) for j:=0,...,16
s := symbolic.NewConstant(0)
for j := 0; j < numLanesInBlock; j++ {
s = symbolic.Add(s, column.Shift(l.isFirstLaneOfNewHash, -j))
}
comp.InsertGlobal(round, ifaces.QueryIDf("IsFromFirstBlock"),
symbolic.Sub(s, b.IsFromFirstBlock))
// isFromBlockBaseB = (1- isFromFirstBlock) * isLaneActive
comp.InsertGlobal(round, ifaces.QueryIDf("isNotFirstBlock"),
symbolic.Mul(symbolic.Sub(b.IsFromBlockBaseB,
symbolic.Sub(1, b.IsFromFirstBlock)), l.isLaneActive))
}
func (b *baseConversion) csBaseConversion(comp *wizard.CompiledIOP, round int, l lane, lu lookUpTables) {
// if isFromFirstBlock = 1 ---> convert to keccak.BaseA
// otherwise convert to keccak.BaseB
// fist decompose to slice of size 16bits and then convert via lookUps
res := keccakf.BaseRecomposeHandles(b.laneSlices[:], power16)
comp.InsertGlobal(round, ifaces.QueryIDf("RecomposeLaneFromUint16"), symbolic.Sub(res, l.lane))
// base conversion slice by slice and via lookups
for j := range b.laneSlices {
comp.InsertInclusionConditionalOnIncluded(round,
ifaces.QueryIDf("BaseConversionIntoBaseA_%v", j),
[]ifaces.Column{lu.colUint16, lu.colBaseA},
[]ifaces.Column{b.laneSlices[j], b.laneSlicesX[j]},
b.IsFromFirstBlock)
comp.InsertInclusionConditionalOnIncluded(round,
ifaces.QueryIDf("BaseConversionIntoBaseB_%v", j),
[]ifaces.Column{lu.colUint16, lu.colBaseB},
[]ifaces.Column{b.laneSlices[j], b.laneSlicesX[j]},
b.IsFromBlockBaseB)
}
// recomposition of slicesX into blockX
base1Power16 := keccakf.BaseAPow4 * keccakf.BaseAPow4 * keccakf.BaseAPow4 * keccakf.BaseAPow4 // no overflow
base2Power16 := keccakf.BaseBPow4 * keccakf.BaseBPow4 * keccakf.BaseBPow4 * keccakf.BaseBPow4
base := symbolic.Add(symbolic.Mul(b.IsFromFirstBlock, base1Power16),
symbolic.Mul(b.IsFromBlockBaseB, base2Power16))
laneX := baseRecomposeHandles(b.laneSlicesX[:], base)
comp.InsertGlobal(round, ifaces.QueryIDf("RecomposeLaneFromBaseX"), symbolic.Sub(laneX, b.LaneX))
}
// assign column isFromFirstBlock
func (b *baseConversion) assignIsFromFirstBlock(run *wizard.ProverRuntime, l lane, maxNumRows int) {
ones := vector.Repeat(field.One(), numLanesInBlock)
var col []field.Element
witSize := smartvectors.Density(l.isFirstLaneOfNewHash.GetColAssignment(run))
isFirstLaneOfNewHash := l.isFirstLaneOfNewHash.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
for j := 0; j < witSize; j++ {
if isFirstLaneOfNewHash[j] == field.One() {
col = append(col, ones...)
j = j + (numLanesInBlock - 1)
} else {
col = append(col, field.Zero())
}
}
oneCol := vector.Repeat(field.One(), witSize)
isNotFirstBlock := make([]field.Element, witSize)
vector.Sub(isNotFirstBlock, oneCol, col)
run.AssignColumn(b.IsFromFirstBlock.GetColID(), smartvectors.RightZeroPadded(col, maxNumRows))
run.AssignColumn(b.IsFromBlockBaseB.GetColID(), smartvectors.RightZeroPadded(isNotFirstBlock, maxNumRows))
}
// assign slices for the base conversion; Slice, SliceX, laneX
func (b *baseConversion) assignSlicesLaneX(
run *wizard.ProverRuntime,
l lane, maxNumRows int) {
witSize := smartvectors.Density(b.IsFromFirstBlock.GetColAssignment(run))
isFirstBlock := b.IsFromFirstBlock.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
lane := l.lane.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
// decomposition
// populating slices
var laneSlices [4][]field.Element
var laneSlicesX [4][]field.Element
for i := range lane {
v := keccakf.DecomposeSmall(lane[i].Uint64(), power16, 4)
for k := 0; k < 4; k++ {
laneSlices[k] = append(laneSlices[k], v[k])
}
}
// assign the slice-columns
for j := range b.laneSlices {
run.AssignColumn(b.laneSlices[j].GetColID(), smartvectors.RightZeroPadded(laneSlices[j], maxNumRows))
}
// base conversion (slice by slice)
// populating sliceX
for i := range lane {
if isFirstBlock[i].Uint64() == 1 {
// base conversion to baseA
for k := 0; k < 4; k++ {
laneSlicesX[k] = append(laneSlicesX[k], uInt16ToBaseX(uint16(laneSlices[k][i].Uint64()), &keccakf.BaseAFr))
}
} else {
// base conversion to baseB
for k := 0; k < 4; k++ {
laneSlicesX[k] = append(laneSlicesX[k], uInt16ToBaseX(uint16(laneSlices[k][i].Uint64()), &keccakf.BaseBFr))
}
}
}
// assign the sliceX-columns
for j := range b.laneSlicesX {
run.AssignColumn(b.laneSlicesX[j].GetColID(), smartvectors.RightZeroPadded(laneSlicesX[j], maxNumRows))
}
// populate laneX
var laneX []field.Element
for j := range lane {
if isFirstBlock[j] == field.One() {
laneX = append(laneX, keccakf.U64ToBaseX(lane[j].Uint64(), &keccakf.BaseAFr))
} else {
laneX = append(laneX, keccakf.U64ToBaseX(lane[j].Uint64(), &keccakf.BaseBFr))
}
}
// assign the laneX
run.AssignColumn(b.LaneX.GetColID(), smartvectors.RightZeroPadded(laneX, maxNumRows))
}

View File

@@ -1,71 +0,0 @@
package datatransfer
import (
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
func makeTestCaseBaseConversionModule() (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
gbm := generic.GenericByteModule{}
iPadd := importAndPadd{}
cld := cleanLimbDecomposition{nbCld: maxLanesFromLimb, nbCldSlices: numBytesInLane}
s := spaghettizedCLD{}
l := lane{}
b := baseConversion{}
def := generic.PHONEY_RLP
cldSize := 2048
gbmSize := 512
spaghettiSize := 8 * cldSize
laneSize := 4 * cldSize
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
gbm = CommitGBM(comp, round, def, gbmSize)
lu := newLookupTables(comp)
iPadd.insertCommit(comp, round, cldSize)
cld.insertCommit(comp, round, cldSize)
s.insertCommit(comp, round, cld, spaghettiSize)
l.insertCommitForTest(comp, round, spaghettiSize, laneSize)
b.newBaseConversionOfLanes(comp, round, laneSize, l, lu)
}
prover = func(run *wizard.ProverRuntime) {
permTrace := permTrace.PermTraces{}
gt := generic.GenTrace{}
AssignGBMfromTable(run, &gbm)
gbm.AppendTraces(run, &gt, &permTrace)
iPadd.assignImportAndPadd(run, gt, cldSize, 0)
cld.assignCLD(run, iPadd, cldSize)
s.assignSpaghetti(run, iPadd, cld, spaghettiSize)
l.assignLane(run, iPadd, s, permTrace, spaghettiSize, laneSize)
b.assignBaseConversion(run, l, laneSize)
}
return define, prover
}
func TestBaseConversionModule(t *testing.T) {
// test keccak
define, prover := makeTestCaseBaseConversionModule()
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}
func (l *lane) insertCommitForTest(comp *wizard.CompiledIOP, round, maxNumRows, laneSize int) {
l.lane = comp.InsertCommit(round, ifaces.ColIDf("Lane"), laneSize)
l.coeff = comp.InsertCommit(round, ifaces.ColIDf("Coefficient"), maxNumRows)
l.isLaneActive = comp.InsertCommit(round, ifaces.ColIDf("LaneIsActive"), laneSize)
l.isFirstLaneOfNewHash = comp.InsertCommit(round, ifaces.ColIDf("IsFirstLane_Of_NewHash"), laneSize)
l.isLaneCompleteShifted = comp.InsertCommit(round, ifaces.ColIDf("IsLaneCompleteShifted"), maxNumRows)
}

View File

@@ -1,348 +0,0 @@
package datatransfer
import (
"math/big"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/dedicated"
)
// CleanLimbDecomposition (CLD) module is responsible for cleaning the limbs,
// and decomposing them to slices.
type cleanLimbDecomposition struct {
/*
To reach length 16 bytes, Limbs are padded with zeroes (as LSB).
The column "nbZeroes" stands for these redundant zeroes of limbs.
To clean the limb we have; cleanLimb := limb*2^(-nbZeroes * 8)
The column "powersNbZeros" is equivalent with 2^(8 * nbZeroes) over each element.
The equivalence of "powersNbZeros" and 2^(nbZeroes * 8) is checked by lookup tables.
Putting together we have; limb := cleanLimb*(powersNbZeros)
*/
nbZeros ifaces.Column
// powersNbZeros represent powers of nbZeroes; powersNbZeros = 2^(8 * nbZeroes)
powersNbZeros ifaces.Column
// CleanLimbs' decomposition in slices (of different sizes)
cld []ifaces.Column
// It is the length of the slices from the decomposition
cldLen []ifaces.Column
// It is the binary counterpart of cldLen.
// Namely, it is zero iff cldLen is zero, otherwise it is one.
cldLenBinary []ifaces.Column
// cldLenPowers = 2^(8*cldLen)
cldLenPowers []ifaces.Column
// decomposition of cld to cldSlices (each slice is a single byte)
cldSlices [][]ifaces.Column
// length of cldSlices
lenCldSlices [][]ifaces.Column
nbCld, nbCldSlices int
}
/*
NewCLD creates a new CLD module, defining the columns and constraints asserting to the following facts:
1. cld columns are the decomposition of clean limbs
2. cldLen is 1 iff cld is not empty
*/
func (cld *cleanLimbDecomposition) newCLD(
comp *wizard.CompiledIOP,
round int,
lu lookUpTables,
iPadd importAndPadd,
maxRows, hashType int,
) {
switch hashType {
case Keccak:
{
cld.nbCld = maxLanesFromLimb
cld.nbCldSlices = numBytesInLane
}
case Sha2:
{
cld.nbCld = maxLanesFromLimbSha2
cld.nbCldSlices = numBytesInLaneSha2
}
}
// Declare the columns
cld.insertCommit(comp, round, maxRows)
// Declare the constraints
// Constraints over the equivalence of "powersNbZeros" with "2^(8 * NbZeros)"
cld.csNbZeros(comp, round, lu, iPadd)
// Constraints over the form of cldLen;
// - each row should be of the form (1,...,1,O,...,0),
// - the number of ones in each row is NBytes
cld.csDecomposLen(comp, round, iPadd)
cld.csDecomposeCLDToSlices(comp, round, lu, iPadd)
}
func (cld *cleanLimbDecomposition) insertCommit(
comp *wizard.CompiledIOP, round, maxRows int) {
cld.cldSlices = make([][]ifaces.Column, cld.nbCld)
cld.lenCldSlices = make([][]ifaces.Column, cld.nbCld)
for x := 0; x < cld.nbCld; x++ {
cld.cld = append(cld.cld, comp.InsertCommit(round, ifaces.ColIDf("CLD_%v", x), maxRows))
cld.cldLen = append(cld.cldLen, comp.InsertCommit(round, ifaces.ColIDf("CLD_Len_%v", x), maxRows))
cld.cldLenBinary = append(cld.cldLenBinary, comp.InsertCommit(round, ifaces.ColIDf("CLD_Len_Binary_%v", x), maxRows))
cld.cldLenPowers = append(cld.cldLenPowers, comp.InsertCommit(round, ifaces.ColIDf("CLD_Len_Powers_%v", x), maxRows))
for k := 0; k < cld.nbCldSlices; k++ {
cld.cldSlices[x] = append(cld.cldSlices[x], comp.InsertCommit(round, ifaces.ColIDf("CLD_Slice_%v_%v", x, k), maxRows))
cld.lenCldSlices[x] = append(cld.lenCldSlices[x], comp.InsertCommit(round, ifaces.ColIDf("Len_CLD_Slice_%v_%v", x, k), maxRows))
}
}
cld.nbZeros = comp.InsertCommit(round, ifaces.ColIDf("NbZeros"), maxRows)
cld.powersNbZeros = comp.InsertCommit(round, ifaces.ColIDf("PowersNbZeros"), maxRows)
}
// csNbZeros imposes the constraints between nbZero and powersNbZeros;
// - powersNbZeros = 2^(nbZeros * 8)
//
// - nbZeros = 16 - nByte
func (cld *cleanLimbDecomposition) csNbZeros(
comp *wizard.CompiledIOP,
round int,
lookUp lookUpTables,
iPadd importAndPadd,
) {
// Equivalence of "PowersNbZeros" with "2^(NbZeros * 8)"
comp.InsertInclusion(round,
ifaces.QueryIDf("NumToPowers"), []ifaces.Column{lookUp.colNumber, lookUp.colPowers},
[]ifaces.Column{cld.nbZeros, cld.powersNbZeros})
// The constraint for nbZeros = (16 - NByte)* isActive
maxNByte := symbolic.NewConstant(field.NewElement(uint64(maxNByte)))
nbZeros := maxNByte.Sub(ifaces.ColumnAsVariable(iPadd.nByte))
comp.InsertGlobal(round,
ifaces.QueryIDf("NbZeros"), symbolic.Mul(symbolic.Sub(nbZeros, cld.nbZeros), iPadd.isActive))
// Equivalence of "cldLenPowers" with "2^(cldLen * 8)"
for j := range cld.cld {
comp.InsertInclusion(round,
ifaces.QueryIDf("CldLenPowers_%v", j), []ifaces.Column{lookUp.colNumber, lookUp.colPowers},
[]ifaces.Column{cld.cldLen[j], cld.cldLenPowers[j]})
}
}
// / Constraints over the form of cldLenBinary and cldLen (similarly on lenCdSlices);
// - cldLenBinary is binary
// - each row should be of the form (0,...,0,1,...,1),
// - cldLen over a row adds up to NBytes
// - cldLenBinary = 1 iff cldLen != 0
func (cld *cleanLimbDecomposition) csDecomposLen(
comp *wizard.CompiledIOP,
round int,
iPadd importAndPadd,
) {
// The rows of cldLen adds up to NByte; \sum_i cldLen[i]=NByte
s := symbolic.NewConstant(0)
for j := range cld.cld {
s = symbolic.Add(s, cld.cldLen[j])
// cldLenBinary is binary
comp.InsertGlobal(round, ifaces.QueryIDf("cldLenBinary_IsBinary_%v", j),
symbolic.Mul(cld.cldLenBinary[j], symbolic.Sub(1, cld.cldLenBinary[j])))
// cldLenBinary = 1 iff cldLen !=0
dedicated.InsertIsTargetValue(comp, round, ifaces.QueryIDf("IsOne_IFF_IsNonZero_%v", j),
field.Zero(), cld.cldLen[j], symbolic.Sub(1, cld.cldLenBinary[j]))
if j < len(cld.cld)-1 {
// a should be binary
a := symbolic.Sub(cld.cldLenBinary[j+1], cld.cldLenBinary[j])
comp.InsertGlobal(round, ifaces.QueryIDf("FirstZeros_ThenOnes_%v", j),
symbolic.Mul(a, symbolic.Sub(1, a)))
}
}
// \sum_i cldLen[i]=NByte
comp.InsertGlobal(round, ifaces.QueryIDf("cldLen_IsNByte"), symbolic.Sub(s, iPadd.nByte))
// constraints over lenCldSlices,
// - lenCldSlices is binary
// - each row should be of the form (0,...,0,1,...,1),
// - lenCldSlices over a row adds up to cldLen
for j := range cld.cld {
sum := symbolic.NewConstant(0)
for k := 0; k < cld.nbCldSlices; k++ {
sum = symbolic.Add(sum, cld.lenCldSlices[j][k])
// lenCldSlices is binary
comp.InsertGlobal(round, ifaces.QueryIDf("lenCldSlices_IsBinary_%v_%v", j, k),
symbolic.Mul(cld.lenCldSlices[j][k], symbolic.Sub(1, cld.lenCldSlices[j][k])))
if k < cld.nbCldSlices-1 {
// a should be binary
a := symbolic.Sub(cld.lenCldSlices[j][k+1], cld.lenCldSlices[j][k])
comp.InsertGlobal(round, ifaces.QueryIDf("FirstZeros_ThenOnes_lenCldSlices_%v_%v", j, k),
symbolic.Mul(a, symbolic.Sub(1, a)))
}
}
}
}
func (cld *cleanLimbDecomposition) csDecomposeCLDToSlices(
comp *wizard.CompiledIOP,
round int, lu lookUpTables,
iPadd importAndPadd,
) {
for j := range cld.cld {
// constraint asserting to the correct decomposition of cld to slices
cldRec := baseRecomposeByLengthHandles(cld.cldSlices[j][:], power8, cld.lenCldSlices[j][:])
comp.InsertGlobal(round, ifaces.QueryIDf("Decompos_CLD_%v", j), symbolic.Sub(cldRec, cld.cld[j]))
}
// decompositions are single bytes
for j := range cld.cldSlices {
for k := range cld.cldSlices[0] {
comp.InsertInclusion(round, ifaces.QueryIDf("SingleByte-Decomposition_CLD_%v_%v", j, k),
[]ifaces.Column{lu.colSingleByte}, []ifaces.Column{cld.cldSlices[j][k]})
}
}
// recomposition of slices to limbs (equivalently, recomposition of cld to limbs)
var slices, lenSlices []ifaces.Column
for j := len(cld.cldSlices) - 1; j >= 0; j-- {
slices = append(slices, cld.cldSlices[j][:]...)
lenSlices = append(lenSlices, cld.lenCldSlices[j][:]...)
}
cleanLimb := baseRecomposeBinaryLen(slices, power8, lenSlices)
res := symbolic.Mul(cld.powersNbZeros, cleanLimb)
// the padded ones are already clean so handle them separately
limb := symbolic.Add(symbolic.Mul(res, iPadd.isInserted), symbolic.Mul(cleanLimb, iPadd.isPadded))
comp.InsertGlobal(round, ifaces.QueryIDf("LimbDecomposition"),
symbolic.Sub(limb, iPadd.limb))
}
// it assign the slices for cld
func (cld *cleanLimbDecomposition) assignCLDSlices(run *wizard.ProverRuntime, maxNumRows int) {
witSize := smartvectors.Density(cld.cld[0].GetColAssignment(run))
cldWit := make([][]field.Element, cld.nbCld)
cldLenWit := make([][]field.Element, cld.nbCld)
for j := range cld.cld {
cldWit[j] = make([]field.Element, witSize)
cldWit[j] = cld.cld[j].GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
cldLenWit[j] = cld.cldLen[j].GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
}
// populate lenCldSlices
for j := range cld.cld {
lenCldSlices := make([][]field.Element, cld.nbCldSlices)
cldSlices := make([][]field.Element, cld.nbCldSlices)
for i := range cldWit[0] {
dec := getZeroOnes(cldLenWit[j][i], cld.nbCldSlices)
a := decomposeByLengthFr(cldWit[j][i], int(cldLenWit[j][i].Uint64()), dec)
for k := range cldSlices {
lenCldSlices[k] = append(lenCldSlices[k], dec[k])
cldSlices[k] = append(cldSlices[k], a[k])
}
}
for k := range cld.cldSlices[0] {
run.AssignColumn(cld.lenCldSlices[j][k].GetColID(),
smartvectors.RightZeroPadded(lenCldSlices[k], maxNumRows))
run.AssignColumn(cld.cldSlices[j][k].GetColID(),
smartvectors.RightZeroPadded(cldSlices[k], maxNumRows))
}
}
}
// assignCLD assigns the columns specific to the CLD module (i.e., cld and cldLen, ...).
func (cld *cleanLimbDecomposition) assignCLD(run *wizard.ProverRuntime, iPadd importAndPadd,
maxRows int) {
witnessSize := smartvectors.Density(iPadd.limb.GetColAssignment(run))
// Assign nbZeros and powersNbZeros
var nbZeros []field.Element
var powersNbZeros []field.Element
fr16 := field.NewElement(16)
var res field.Element
var a big.Int
nByte := run.GetColumn(iPadd.nByte.GetColID())
for i := 0; i < witnessSize; i++ {
b := nByte.Get(i)
res.Sub(&fr16, &b)
nbZeros = append(nbZeros, res)
res.BigInt(&a)
res.Exp(field.NewElement(power8), &a)
powersNbZeros = append(powersNbZeros, res)
}
run.AssignColumn(cld.nbZeros.GetColID(), smartvectors.RightZeroPadded(nbZeros, maxRows))
run.AssignColumn(cld.powersNbZeros.GetColID(), smartvectors.RightPadded(powersNbZeros, field.One(), maxRows))
// Assign the columns cld and cldLen
cldLen := make([][]field.Element, cld.nbCld)
cldCol := make([][]field.Element, cld.nbCld)
cldLenBinary := make([][]field.Element, cld.nbCld)
for j := range cldCol {
cldCol[j] = make([]field.Element, witnessSize)
cldLen[j] = make([]field.Element, witnessSize)
cldLenBinary[j] = make([]field.Element, witnessSize)
}
// assign row-by-row
cleanLimb := run.GetColumn(iPadd.cleanLimb.GetColID()).IntoRegVecSaveAlloc()[:witnessSize]
nByteWit := nByte.IntoRegVecSaveAlloc()
cldLen = cutAndStitch(nByteWit[:witnessSize], cld.nbCld, cld.nbCldSlices)
for i := 0; i < witnessSize; i++ {
// i-th row of cldLenWit
var cldLenRow []int
for j := 0; j < cld.nbCld; j++ {
cldLenRow = append(cldLenRow, int(cldLen[j][i].Uint64()))
}
// populate cldLenBinarys
for j := 0; j < cld.nbCld; j++ {
if cldLen[j][i].Uint64() != 0 {
cldLenBinary[j][i] = field.One()
}
}
// populate cldCol
cldWit := decomposeByLength(cleanLimb[i], int(nByteWit[i].Uint64()), cldLenRow)
for j := 0; j < cld.nbCld; j++ {
cldCol[j][i] = cldWit[j]
}
}
for j := 0; j < cld.nbCld; j++ {
run.AssignColumn(cld.cld[j].GetColID(), smartvectors.RightZeroPadded(cldCol[j], maxRows))
run.AssignColumn(cld.cldLen[j].GetColID(), smartvectors.RightZeroPadded(cldLen[j], maxRows))
run.AssignColumn(cld.cldLenBinary[j].GetColID(), smartvectors.RightZeroPadded(cldLenBinary[j], maxRows))
}
// assign cldLenPowers
for j := range cldLen {
cldLenPowers := make([]field.Element, witnessSize)
for i := range cldLen[0] {
cldLen[j][i].BigInt(&a)
cldLenPowers[i].Exp(field.NewElement(power8), &a)
}
run.AssignColumn(cld.cldLenPowers[j].GetColID(), smartvectors.RightPadded(cldLenPowers, field.One(), maxRows))
}
cld.assignCLDSlices(run, maxRows)
}

View File

@@ -1,56 +0,0 @@
package datatransfer
import (
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of LCD module, for testing
func makeTestCaseCLD(hashType int) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
size := 1024
gbmSize := 512
gbm := generic.GenericByteModule{}
iPadd := importAndPadd{}
cld := cleanLimbDecomposition{}
def := generic.PHONEY_RLP
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
lu := newLookupTables(comp)
gbm = CommitGBM(comp, round, def, gbmSize)
iPadd.insertCommit(comp, round, size)
cld.newCLD(comp, round, lu, iPadd, size, hashType)
}
prover = func(run *wizard.ProverRuntime) {
traces := permTrace.PermTraces{}
gt := generic.GenTrace{}
AssignGBMfromTable(run, &gbm)
gbm.AppendTraces(run, &gt, &traces)
iPadd.assignImportAndPadd(run, gt, size, hashType)
cld.assignCLD(run, iPadd, size)
}
return define, prover
}
func TestCLDModule(t *testing.T) {
// test cld for keccak
define, prover := makeTestCaseCLD(0)
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
// test cld for sha2
define, prover = makeTestCaseCLD(1)
comp = wizard.Compile(define, dummy.Compile)
proof = wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}

View File

@@ -1,166 +0,0 @@
package datatransfer
import (
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/projection"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/dedicated"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
)
const (
Keccak int = iota
Sha2
)
// The struct importAndPadd presents the columns initiated by the subModule.
// It consists of the counterpart columns for the arithmetization columns, extended via padding.
// The data is extracted from the arithmetization columns, it is then padded if required.
// The result is embedded (preserving the order) in the columns of the module.
type importAndPadd struct {
// counterparts for the arithmetization columns, extended via padding
hashNum, limb, nByte, index, cleanLimb ifaces.Column
// Indicates where the imported rows are inserted in the module
isInserted ifaces.Column
// Indicates where the padding values are added
isPadded ifaces.Column
// Indicated the active Rows of the module
isActive ifaces.Column
// It is 1 when a new hash is launched, otherwise it is zero
isNewHash ifaces.Column
// a column of all 1
oneCol ifaces.Column
// accumulator for padded nBytes
accPaddedBytes ifaces.Column
}
/*
NewImportAndPadd builds an instance of importAndPadd.
It commits to the columns specific to the submodule and defines the constrains asserting to the following facts.
- the correct extraction of the data from the arithmetization columns.
- the correct padding of the limbs.
- the correct insertion of the data to the columns of the module.
- the correct form of the columns, for instance the binary constraints.
// for Keccak it uses hashType = 0 , for Sha2 hashType = 1.
*/
func (iPadd *importAndPadd) newImportAndPadd(comp *wizard.CompiledIOP,
round, maxRows int,
gbm generic.GenericByteModule, // arithmetization columns
lu lookUpTables,
hashType int,
) {
// Declare the columns
iPadd.insertCommit(comp, round, maxRows)
// Declare the constraints
// padding over the arithmetization columns (gbm columns) is done correctly
switch hashType {
case Keccak:
iPadd.insertPaddingKeccak(comp, round, lu.colKeccakMaxPadding)
case Sha2:
iPadd.insertPaddingSha2(comp, round, lu.colSha2MaxPadding)
default:
utils.Panic("The hash type is not supported")
}
// projection query between gbm columns and module column;
// asserting the rows of arithmetization columns are correctly projected over the module columns.
data := gbm.Data
projection.InsertProjection(comp, ifaces.QueryIDf("HashNum_OrderPreserving"),
[]ifaces.Column{data.HashNum, data.Limb, data.NBytes},
[]ifaces.Column{iPadd.hashNum, iPadd.limb, iPadd.nByte}, data.TO_HASH, iPadd.isInserted)
// constraints on flag columns; isInserted,isPadded, isNewHash, isActive
/*
1. they are all binary
2. isInserted and isPadded are partition of isActive
.
.
.
*/
iPadd.csBinaryColumns(comp, round)
}
// InsertCommitToImportAndPadd commits to the columns initiated by the ImportAndPadd submodule.
func (iPadd *importAndPadd) insertCommit(comp *wizard.CompiledIOP, round, maxRows int) {
iPadd.hashNum = comp.InsertCommit(round, deriveName("HashNum"), maxRows)
iPadd.limb = comp.InsertCommit(round, deriveName("Limb"), maxRows)
iPadd.cleanLimb = comp.InsertCommit(round, deriveName("CleanLimb"), maxRows)
iPadd.nByte = comp.InsertCommit(round, deriveName("NByte"), maxRows)
iPadd.index = comp.InsertCommit(round, deriveName("Index"), maxRows)
iPadd.isInserted = comp.InsertCommit(round, deriveName("IsInserted"), maxRows)
iPadd.isPadded = comp.InsertCommit(round, deriveName("IsPadded"), maxRows)
iPadd.isNewHash = comp.InsertCommit(round, deriveName("IsNewHash"), maxRows)
iPadd.isActive = comp.InsertCommit(round, deriveName("IsActive"), maxRows)
iPadd.oneCol = comp.InsertCommit(round, deriveName("OneCol"), maxRows)
iPadd.accPaddedBytes = comp.InsertCommit(round, ifaces.ColID("AccPaddedBytes"), maxRows)
}
// csBinaryColumns aims for imposing the constraints on the flag columns,
// isInserted, isImported,isPadded, isNewHash, isActive.
/*
1. they are all binary
2. isInserted and isPadded are partition of isActive
3. isPadded appears only before isNewHash
4. isNewhas has the right form
5. isActive has the right form (starting with ones followed by zeroes, if required)
*/
func (iPadd importAndPadd) csBinaryColumns(comp *wizard.CompiledIOP, round int) {
one := symbolic.NewConstant(1)
isInserted := ifaces.ColumnAsVariable(iPadd.isInserted)
isPadded := ifaces.ColumnAsVariable(iPadd.isPadded)
isActive := ifaces.ColumnAsVariable(iPadd.isActive)
isNewHash := ifaces.ColumnAsVariable(iPadd.isNewHash)
// binary constraints
comp.InsertGlobal(round, ifaces.QueryIDf("IsInserted_IsBinary"), isInserted.Mul(one.Sub(isInserted)))
comp.InsertGlobal(round, ifaces.QueryIDf("IsPadded_IsBinary"), isPadded.Mul(one.Sub(isPadded)))
comp.InsertGlobal(round, ifaces.QueryIDf("IsActive_IsBinary"), isActive.Mul(one.Sub(isActive)))
comp.InsertGlobal(round, ifaces.QueryIDf("IsNewHash_IsBinary"), isNewHash.Mul(one.Sub(isNewHash)))
// isActive is of the right form, starting with ones and all zeroes are at the end
shiftIsActive := ifaces.ColumnAsVariable(column.Shift(iPadd.isActive, -1))
comp.InsertGlobal(round, ifaces.QueryIDf("IsActive"), (shiftIsActive.Sub(isActive)).Mul(one.Sub(shiftIsActive.Sub(isActive))))
// isInserted = (1- isPAdded) * isActive
// isActive = 0 ---> isPadded = 0 , isInserted = 0
comp.InsertGlobal(round, ifaces.QueryIDf("isInserted_isPadded"),
isInserted.Sub((one.Sub(isPadded)).Mul(isActive)))
comp.InsertGlobal(round, ifaces.QueryIDf("IsPadded_IsActive"), symbolic.Mul(symbolic.Sub(1, isActive), isPadded))
comp.InsertGlobal(round, ifaces.QueryIDf("IsInserted_IsActive"), symbolic.Mul(symbolic.Sub(1, isActive), isInserted))
// a sequence of isPadded =1 appears iff a newHash is launched.
// if isPadded[i] = 1 ---> isPadded[i+1] = 1 or isNewHash[i+1] = 1
// if isNewHas[i] = 1 ---> isPadded[i-1] = 1 and isPadded[i] = 0
isPaddedNext := ifaces.ColumnAsVariable(column.Shift(iPadd.isPadded, 1))
isPaddedPrev := ifaces.ColumnAsVariable(column.Shift(iPadd.isPadded, -1))
isNewHashNext := ifaces.ColumnAsVariable(column.Shift(iPadd.isNewHash, 1))
// to impose a bound for the global constraints
isActiveShift := ifaces.ColumnAsVariable(column.Shift(iPadd.isActive, 1))
expr1 := (isPadded.Mul(one.Sub(isPaddedNext)).Mul(one.Sub(isNewHashNext))).Mul(isActiveShift)
expr2 := (isNewHash.Mul((one.Sub(isPaddedPrev)).Add(isPadded)))
comp.InsertGlobal(round, ifaces.QueryIDf("isPadded_isNewHash1"), expr1)
comp.InsertGlobal(round, ifaces.QueryIDf("isPadded_isNewHash2"), expr2)
// constraints over isNewhash;
// if index = 0 ---> isNewHash[i] = 1
// otherwise ---> isNewHash = 0
isZero := symbolic.Add(iPadd.isNewHash, symbolic.Sub(1, iPadd.isActive))
dedicated.InsertIsTargetValue(comp, round, ifaces.QueryIDf("IsNewHash_IndexIsZero"), field.Zero(), iPadd.index, isZero)
// if isActive = 0 ---> isNewHash = 0
comp.InsertGlobal(round, ifaces.QueryIDf("IsNewHash_IsNotActive"),
symbolic.Mul(symbolic.Sub(1, iPadd.isActive), iPadd.isNewHash))
}

View File

@@ -1,54 +0,0 @@
package datatransfer
import (
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of ImportAndPadd module, for testing
func makeTestCaseImport(hashType int) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
size := 512
gbmSize := 128
gbm := generic.GenericByteModule{}
iPadd := importAndPadd{}
def := generic.PHONEY_RLP
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
lu := newLookupTables(comp)
gbm = CommitGBM(comp, round, def, gbmSize)
iPadd.newImportAndPadd(comp, round, size, gbm, lu, hashType)
}
prover = func(run *wizard.ProverRuntime) {
traces := permTrace.PermTraces{}
gt := generic.GenTrace{}
AssignGBMfromTable(run, &gbm)
gbm.AppendTraces(run, &gt, &traces)
iPadd.assignImportAndPadd(run, gt, size, hashType)
}
return define, prover
}
func TestLImportAndPaddModule(t *testing.T) {
// test keccak
define, prover := makeTestCaseImport(Keccak)
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof for keccak")
// test sha2
define, prover = makeTestCaseImport(Sha2)
comp = wizard.Compile(define, dummy.Compile)
proof = wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof for sha2")
}

View File

@@ -1,94 +0,0 @@
/*
Package datatransfer implements the utilities and the submodules for transferring the data, from
the relevant arithmetization modules to the keccak module.
It includes;
1. Data Importing from arithmetization columns
2. Data Serialization (to make well-formed blocks for the use in the keccakf module)
3. Data Exporting to the keccakf module
*/
package datatransfer
import (
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
)
const (
maxLanesFromLimb = 3 // maximum number of lanes that fall over the same limb
maxNByte = 16 // maximum size of the limb in bytes
numBytesInLane = 8 // number of bytes in the lane
numLanesInBlock = 17 // number of lanes in the block
maxBlockSize = numBytesInLane * numLanesInBlock // size of the block
power8 = 1 << 8
power16 = 1 << 16
powerMaxNByte = 1 << maxNByte
// for sha2
maxLanesFromLimbSha2 = 5
numBytesInLaneSha2 = 4
numLanesInBlockSha2 = 16
maxBlockSizeSha2 = numBytesInLaneSha2 * numLanesInBlockSha2
)
// Module consists of all the columns and submodules used for data transition.
type Module struct {
// size of the data transfer module
// NextPowerOfTwo(maxBlockSize * MaxNumKeccakF)
MaxNumKeccakF int
// Provider Trace; the arithmetization columns relevant to keccak
Provider generic.GenericByteModule
// SubModules specific to DataTransfer Module
iPadd importAndPadd
cld cleanLimbDecomposition
sCLD spaghettizedCLD
lane lane
BaseConversion baseConversion
// Lookups specific to dataTransfer Module
LookUps lookUpTables
HashOutput HashOutput
}
// It Imposes the constraints per subModule.
func (mod *Module) NewDataTransfer(comp *wizard.CompiledIOP, round, maxNumKeccakF, hashType int) {
mod.MaxNumKeccakF = maxNumKeccakF
maxNumRows := utils.NextPowerOfTwo(maxBlockSize * maxNumKeccakF)
maxNumRowsForLane := utils.NextPowerOfTwo(numLanesInBlock * maxNumKeccakF)
// Declare lookup columns
mod.LookUps = newLookupTables(comp)
// Define the subModules
mod.iPadd.newImportAndPadd(comp, round, maxNumRows, mod.Provider, mod.LookUps, 0)
mod.cld.newCLD(comp, round, mod.LookUps, mod.iPadd, maxNumRows, hashType)
mod.sCLD.newSpaghetti(comp, round, mod.iPadd, mod.cld, maxNumRows)
mod.lane.newLane(comp, round, maxNumRows, maxNumRowsForLane, mod.sCLD)
mod.BaseConversion.newBaseConversionOfLanes(comp, round, maxNumRowsForLane, mod.lane, mod.LookUps)
// hashOutput
mod.HashOutput.newHashOutput(comp, round, maxNumKeccakF)
}
// It assigns the columns per subModule.
func (mod *Module) AssignModule(
run *wizard.ProverRuntime,
permTrace permTrace.PermTraces,
gt generic.GenTrace) {
maxNumRows := utils.NextPowerOfTwo(maxBlockSize * mod.MaxNumKeccakF)
maxNumRowsForLane := utils.NextPowerOfTwo(numLanesInBlock * mod.MaxNumKeccakF)
mod.iPadd.assignImportAndPadd(run, gt, maxNumRows, 0)
mod.cld.assignCLD(run, mod.iPadd, maxNumRows)
mod.sCLD.assignSpaghetti(run, mod.iPadd, mod.cld, maxNumRows)
mod.lane.assignLane(run, mod.iPadd,
mod.sCLD, permTrace, maxNumRows, maxNumRowsForLane)
mod.BaseConversion.assignBaseConversion(run, mod.lane, maxNumRowsForLane)
mod.HashOutput.AssignHashOutPut(run, permTrace)
}

View File

@@ -1,97 +0,0 @@
package datatransfer
import (
"fmt"
"sync"
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/innerproduct"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/lookup"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/permutation"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/specialqueries"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
func TestDataTransferModule(t *testing.T) {
mod := &Module{}
round := 0
def := generic.PHONEY_RLP
gbmSize := 512
maxNumKeccakF := 128
define := func(build *wizard.Builder) {
comp := build.CompiledIOP
mod.Provider = CommitGBM(comp, round, def, gbmSize)
mod.NewDataTransfer(comp, round, maxNumKeccakF, 0)
}
prover := func(run *wizard.ProverRuntime) {
permTrace := permTrace.PermTraces{}
gt := generic.GenTrace{}
AssignGBMfromTable(run, &mod.Provider)
mod.Provider.AppendTraces(run, &gt, &permTrace)
mod.AssignModule(run, permTrace, gt)
}
compiled := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(compiled, prover)
assert.NoErrorf(t, wizard.Verify(compiled, proof), "invalid proof")
}
func BenchmarkDataTransferModule(b *testing.B) {
maxNumKeccakF := []int{
1 << 13,
// 1 << 16,
// 1 << 18,
// 1 << 20,
}
once := &sync.Once{}
for _, numKeccakF := range maxNumKeccakF {
b.Run(fmt.Sprintf("%v-numKeccakF", numKeccakF), func(b *testing.B) {
define := func(build *wizard.Builder) {
comp := build.CompiledIOP
mod := &Module{}
mod.Provider = CommitGBM(comp, 0, generic.PHONEY_RLP, 2)
mod.NewDataTransfer(comp, 0, numKeccakF, 0)
}
var (
compiled = wizard.Compile(
define,
specialqueries.RangeProof,
specialqueries.CompileFixedPermutations,
permutation.CompileGrandProduct,
lookup.CompileLogDerivative,
innerproduct.Compile,
)
numCells = 0
numCols = 0
)
for _, colID := range compiled.Columns.AllKeys() {
numCells += compiled.Columns.GetSize(colID)
numCols += 1
}
b.ReportMetric(float64(numCells), "#cells")
b.ReportMetric(float64(numCols), "#columns")
once.Do(func() {
for _, colID := range compiled.Columns.AllKeys() {
fmt.Printf("%v, %v\n", colID, compiled.Columns.GetSize(colID))
}
})
})
}
}

View File

@@ -1,114 +0,0 @@
package datatransfer
import (
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/keccakf"
)
const (
// number of slices in keccakf for base conversion
numSlices = 16
// number of lanes in the output of hash
// digest is 256 bits that is 4 lanes of 64bits
numLanesInHashOutPut = 4
)
type HashOutput struct {
// hash slices in uint4
HashLoSlices, HashHiSlices [numLanesInHashOutPut / 2][numSlices]ifaces.Column
HashLo, HashHi ifaces.Column
MaxNumRows int
}
func (h *HashOutput) newHashOutput(comp *wizard.CompiledIOP, round, maxNumKeccakF int) {
h.MaxNumRows = utils.NextPowerOfTwo(maxNumKeccakF)
// declare the columns
h.DeclareColumns(comp, round)
// constraints over decomposition hashX to hashXSlices
h.csDecompose(comp, round)
}
func (h *HashOutput) DeclareColumns(comp *wizard.CompiledIOP, round int) {
h.HashLo = comp.InsertCommit(round, ifaces.ColIDf("Hash_Lo"), h.MaxNumRows)
h.HashHi = comp.InsertCommit(round, ifaces.ColIDf("Hash_Hi"), h.MaxNumRows)
for j := range h.HashLoSlices {
for k := range h.HashLoSlices[0] {
h.HashLoSlices[j][k] = comp.InsertCommit(round, ifaces.ColIDf("HashLo_Slices_%v_%v", j, k), h.MaxNumRows)
h.HashHiSlices[j][k] = comp.InsertCommit(round, ifaces.ColIDf("HashHi_Slices_%v_%v", j, k), h.MaxNumRows)
}
}
}
// constraints over decomposition of HashHi and HashLow to slices in uint4.
func (h *HashOutput) csDecompose(comp *wizard.CompiledIOP, round int) {
a := append(h.HashLoSlices[0][:], h.HashLoSlices[1][:]...)
slicesLo := SlicesBeToLeHandle(a)
exprLo := keccakf.BaseRecomposeHandles(slicesLo, 16)
comp.InsertGlobal(round, ifaces.QueryIDf("Decomposition_HashLo"), symbolic.Sub(exprLo, h.HashLo))
b := append(h.HashHiSlices[0][:], h.HashHiSlices[1][:]...)
slicesHi := SlicesBeToLeHandle(b)
exprHi := keccakf.BaseRecomposeHandles(slicesHi, 16)
comp.InsertGlobal(round, ifaces.QueryIDf("Decomposition_HashHi"), symbolic.Sub(exprHi, h.HashHi))
}
// It assigns the columns specific to the module.
func (h *HashOutput) AssignHashOutPut(
run *wizard.ProverRuntime,
permTrace keccak.PermTraces,
) {
// assign Info trace
var v, w field.Element
var hashLo, hashHi []field.Element
for _, digest := range permTrace.HashOutPut {
hi := digest[:maxNByte]
lo := digest[maxNByte:]
v.SetBytes(hi[:])
w.SetBytes(lo[:])
hashLo = append(hashLo, w)
hashHi = append(hashHi, v)
}
run.AssignColumn(h.HashHi.GetColID(), smartvectors.RightZeroPadded(hashHi, h.MaxNumRows))
run.AssignColumn(h.HashLo.GetColID(), smartvectors.RightZeroPadded(hashLo, h.MaxNumRows))
// slices from PermTrace
var hashHiSlices, hashLoSlices [numLanesInHashOutPut / 2][numSlices][]field.Element
for i := range hashLo {
base := 16 // slices of 4 bits, thus base is 2^4
numchunck := 32 // hashHi is 128 bits and would be decomposed into 32 slices of 4 bits
decHashLo := keccakf.DecomposeFr(hashLo[i], base, numchunck)
sliceLo := SlicesBeToLeUint4(decHashLo)
decHashHi := keccakf.DecomposeFr(hashHi[i], base, numchunck)
sliceHi := SlicesBeToLeUint4(decHashHi)
for k := range hashLoSlices[0] {
// dec[:16] goes to hashSlices[0], and dec[16:] goes to hashSlices[1]
hashLoSlices[0][k] = append(hashLoSlices[0][k], sliceLo[k])
hashLoSlices[1][k] = append(hashLoSlices[1][k], sliceLo[k+numSlices])
hashHiSlices[0][k] = append(hashHiSlices[0][k], sliceHi[k])
hashHiSlices[1][k] = append(hashHiSlices[1][k], sliceHi[k+numSlices])
}
}
for j := range h.HashLoSlices {
for k := range h.HashLoSlices[0] {
run.AssignColumn(h.HashHiSlices[j][k].GetColID(), smartvectors.RightZeroPadded(hashHiSlices[j][k], h.MaxNumRows))
run.AssignColumn(h.HashLoSlices[j][k].GetColID(), smartvectors.RightZeroPadded(hashLoSlices[j][k], h.MaxNumRows))
}
}
}

View File

@@ -1,47 +0,0 @@
package datatransfer
import (
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of info module, for testing
func makeTestCaseHashOutput() (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
gbmSize := 512
maxNumKeccakF := 128
gbm := generic.GenericByteModule{}
def := generic.PHONEY_RLP
h := HashOutput{}
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
gbm = CommitGBM(comp, round, def, gbmSize)
h.newHashOutput(comp, round, maxNumKeccakF)
}
prover = func(run *wizard.ProverRuntime) {
traces := permTrace.PermTraces{}
gt := generic.GenTrace{}
AssignGBMfromTable(run, &gbm)
gbm.AppendTraces(run, &gt, &traces)
h.AssignHashOutPut(run, traces)
}
return define, prover
}
func TestInfoTraceModule(t *testing.T) {
define, prover := makeTestCaseHashOutput()
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}

View File

@@ -1,256 +0,0 @@
package datatransfer
import (
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/projection"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/dedicated"
)
type lane struct {
// All the lanes of all the blocks
lane ifaces.Column
// A lane may be spread over several limbs.
// Lane is then obtained via an partitioned inner-product
// as lane = \sum_i coeff[i]* cldSpaghetti[i].
// Coeff represents the joint coefficients for stitching slices of the lane together.
coeff ifaces.Column
// The ipTracker tracks the partitioned inner-product.
ipTracker ifaces.Column
// IsLaneActive is 1 of the effective part of lane column.
// It is of the form (1,...,1,O,..O). Namely starting with elements 1 , followed by zero.
// The number of ones equals (the number of blocks * 17).
isLaneActive ifaces.Column
// It is 1 if the lane is the first lane of the new hash.
isFirstLaneOfNewHash ifaces.Column
// shifted version of (effective part of) isLaneComplete
isLaneCompleteShifted ifaces.Column
}
func (l *lane) newLane(comp *wizard.CompiledIOP,
round, maxRows, maxNumRowsForLane int,
sCLD spaghettizedCLD,
) {
// Declare the columns
l.insertCommit(comp, round, maxRows, maxNumRowsForLane)
// Declare the constraints
// constraints over isLaneActive
//
// 1. they are binary
// 2. isLaneActive starts with ones and ends with zeroes
l.csIsLaneActive(comp, round, sCLD)
// Constraints on Coeff, it is a accumulator of cldLenPowersSpaghetti over isLaneComplete
// coef[0] := 1
// coeff[i] := coeff[i-1] * cldLenPowersSpaghetti[i-1] * (1-isLaneComplete[i-1]) + isLaneComplete[i-1]
l.csCoeff(comp, round, sCLD)
// Constraints on the Recomposition of slices into the lanes
l.csRecomposeToLanes(comp, round, sCLD)
// constraints over isFirstLaneOfNewHash
// Project the isFirstLaneOfNewHash from isFirstByteOfNewHash
projection.InsertProjection(comp, ifaces.QueryIDf("Project_IsFirstLaneOfHash"),
[]ifaces.Column{sCLD.isFirstSliceOfNewHash},
[]ifaces.Column{l.isFirstLaneOfNewHash},
l.isLaneCompleteShifted, l.isLaneActive)
}
// InsertCommit commits to the columns specific to the submodule.
func (l *lane) insertCommit(comp *wizard.CompiledIOP, round, maxRows, maxNumRowsForLane int) {
l.lane = comp.InsertCommit(round, ifaces.ColIDf("Lane"), maxNumRowsForLane)
l.coeff = comp.InsertCommit(round, ifaces.ColIDf("Coefficient"), maxRows)
l.isLaneActive = comp.InsertCommit(round, ifaces.ColIDf("LaneIsActive"), maxNumRowsForLane)
l.ipTracker = comp.InsertCommit(round, ifaces.ColIDf("IPTracker_Lane"), maxRows)
l.isFirstLaneOfNewHash = comp.InsertCommit(round, ifaces.ColIDf("IsFirstLane_Of_NewHash"), maxNumRowsForLane)
l.isLaneCompleteShifted = comp.InsertCommit(round, ifaces.ColIDf("IsLaneCompleteShifted"), maxRows)
}
// It declares the constraints over isLaneActive
func (l *lane) csIsLaneActive(comp *wizard.CompiledIOP, round int, s spaghettizedCLD) {
// constraints over the right form of isLaneActive
//
// 1. It is binary
//
// 2. starts with ones and ends with zeroes.
//
// We don't check that it has the same number of ones as s.isLaneComplete,
// since we later do a projection query that guarantees this fact.
comp.InsertGlobal(round, ifaces.QueryIDf("IsLaneActive_IsBinary"),
symbolic.Mul(l.isLaneActive, symbolic.Sub(1, l.isLaneActive)))
a := symbolic.Sub(l.isLaneActive, column.Shift(l.isLaneActive, 1))
// a should be binary (for constraint 2)
comp.InsertGlobal(round, ifaces.QueryIDf("OnesThenZeroes"),
symbolic.Mul(a, symbolic.Sub(1, a)))
// constraints over isLaneCompleteShifted
// isLaneCompleteShifted = shift(isLaneComplete,-1)
// isLaneCompleteShifted[0] = 1
comp.InsertGlobal(round, ifaces.QueryIDf("Shift_IsLaneComplete_Glob"),
symbolic.Mul(symbolic.Sub(l.isLaneCompleteShifted,
column.Shift(s.isLaneComplete, -1)), s.isActive))
comp.InsertLocal(round, ifaces.QueryIDf("Shift_IsLaneComplete_Loc"),
symbolic.Mul(symbolic.Sub(l.isLaneCompleteShifted, 1), s.isActive))
}
// it declares the constraints over coeff
func (l *lane) csCoeff(
comp *wizard.CompiledIOP,
round int,
s spaghettizedCLD,
) {
//local; coeff[0]=1
comp.InsertLocal(round, ifaces.QueryIDf("Coeffcient_Loc"), symbolic.Mul(symbolic.Sub(l.coeff, 1), s.isActive))
// coeff[i] := coeff[i-1] * partialCoeff[i-1] * (1-isLaneComplete[i-1]) + isLaneComplete[i-1]
res := symbolic.Mul(column.Shift(l.coeff, -1), column.Shift(s.cldLenPowersSpaghetti, -1))
res = symbolic.Mul(res, symbolic.Sub(1, column.Shift(s.isLaneComplete, -1)))
res = symbolic.Add(res, column.Shift(s.isLaneComplete, -1))
expr := symbolic.Mul(symbolic.Sub(l.coeff, res), s.isActive)
comp.InsertGlobal(round, ifaces.QueryIDf("Coefficient_Glob"), expr)
}
// It declares the constraints over the lanes
// Lanes are the recomposition of slices in SpaghettizedCLD.
func (l *lane) csRecomposeToLanes(
comp *wizard.CompiledIOP,
round int,
s spaghettizedCLD,
) {
// compute the partitioned inner product
//ipTaker[i] = (cldSpaghetti[i] * coeff[i]) + ipTracker[i-1]* isLaneComplete[i]
// Constraints on the Partitioned Inner-Products
dedicated.InsertPartitionedIP(comp, round, s.cldSpaghetti,
l.coeff,
column.Shift(s.isLaneComplete, -1),
l.ipTracker)
// Project the lane from ipTracker over the lane
projection.InsertProjection(comp, ifaces.QueryIDf("ProjectOverLanes"),
[]ifaces.Column{l.ipTracker},
[]ifaces.Column{l.lane},
s.isLaneComplete, l.isLaneActive)
}
// It assigns the columns specific to the submodule
func (l *lane) assignLane(
run *wizard.ProverRuntime,
iPadd importAndPadd,
sCLD spaghettizedCLD,
permTrace keccak.PermTraces,
maxRows, maxNumRowsForLane int) {
// assign l.isLaneActive
run.AssignColumn(l.isLaneActive.GetColID(),
smartvectors.RightZeroPadded(vector.Repeat(field.One(), len(permTrace.Blocks)*numLanesInBlock), maxNumRowsForLane))
// assign coeff
l.assignCoeff(run, sCLD, maxRows)
// assign the lanes
l.assignLaneColumn(run, maxNumRowsForLane, permTrace, iPadd)
// isLanecompleteShifted
witSize := smartvectors.Density(sCLD.isLaneComplete.GetColAssignment(run))
isLaneComplete := sCLD.isLaneComplete.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
var shifted []field.Element
if witSize != 0 {
shifted = append(shifted, field.One())
shifted = append(shifted, isLaneComplete[:witSize-1]...)
}
run.AssignColumn(l.isLaneCompleteShifted.GetColID(), smartvectors.RightZeroPadded(shifted, sCLD.isLaneComplete.Size()))
}
// It assigns column coeff
func (l *lane) assignCoeff(
run *wizard.ProverRuntime,
s spaghettizedCLD,
maxNumRows int) {
one := field.One()
witSize := smartvectors.Density(s.isLaneComplete.GetColAssignment(run))
isLaneComplete := s.isLaneComplete.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
//partialCoeff := cldLenPowersSpaghetti
// Coeff[0] = 1
// Coeff[i] := (Coeff[i-1] * partialCoeff[i-1] * (1-isLaneComplete[i-1])) + isLaneComplete[i-1]
coeff := make([]field.Element, witSize)
partialCoeff := s.cldLenPowersSpaghetti.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
if witSize != 0 {
coeff[0] = field.One()
}
var res, notComplete field.Element
for i := 1; i < witSize; i++ {
notComplete.Sub(&one, &isLaneComplete[i-1])
res.Mul(&notComplete, &partialCoeff[i-1])
res.Mul(&res, &coeff[i-1])
coeff[i].Add(&res, &isLaneComplete[i-1])
}
// assign the columns
run.AssignColumn(l.coeff.GetColID(), smartvectors.RightZeroPadded(coeff, maxNumRows))
}
// It assigns the lanes
func (l *lane) assignLaneColumn(
run *wizard.ProverRuntime,
maxNumRows int,
trace keccak.PermTraces,
iPadd importAndPadd,
) {
// Instead of building lane from the same formula defined in newLane(),
// we assign it via trace of the permutation that is already tested.
blocks := trace.Blocks
var lane []field.Element
for j := range blocks {
for i := range blocks[0] {
lane = append(lane, field.NewElement(blocks[j][i]))
}
}
run.AssignColumn(l.lane.GetColID(), smartvectors.RightZeroPadded(lane, maxNumRows))
// populate isFirstLaneOfNewHash
witSize := smartvectors.Density(iPadd.isNewHash.GetColAssignment(run))
isNewHash := iPadd.isNewHash.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
nByte := iPadd.nByte.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
sum := 0
var t []int
for j := range nByte {
if isNewHash[j] == field.One() {
// the length of the stream when we reach a newHash
t = append(t, sum)
}
sum = sum + int(nByte[j].Uint64())
}
ctr := 0
var col []field.Element
for j := range lane {
if ctr < len(t) && t[ctr] == 8*j {
col = append(col, field.One())
ctr++
} else {
col = append(col, field.Zero())
}
}
//assign the columns
run.AssignColumn(l.isFirstLaneOfNewHash.GetColID(), smartvectors.RightZeroPadded(col, maxNumRows))
}

View File

@@ -1,60 +0,0 @@
package datatransfer
import (
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of SpaghetizedCLD module, for testing
func makeTestCaseLaneModule() (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
gbm := generic.GenericByteModule{}
iPadd := importAndPadd{}
cld := cleanLimbDecomposition{nbCld: maxLanesFromLimb, nbCldSlices: numBytesInLane}
s := spaghettizedCLD{}
l := lane{}
def := generic.PHONEY_RLP
gbmSize := 512
cldSize := 2048
spaghettiSize := 8 * cldSize
laneSize := 4 * cldSize
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
gbm = CommitGBM(comp, round, def, gbmSize)
iPadd.insertCommit(comp, round, cldSize)
cld.insertCommit(comp, round, cldSize)
s.insertCommit(comp, round, cld, spaghettiSize)
l.newLane(comp, round, spaghettiSize, laneSize, s)
}
prover = func(run *wizard.ProverRuntime) {
permTrace := permTrace.PermTraces{}
gt := generic.GenTrace{}
AssignGBMfromTable(run, &gbm)
gbm.AppendTraces(run, &gt, &permTrace)
iPadd.assignImportAndPadd(run, gt, cldSize, 0)
cld.assignCLD(run, iPadd, cldSize)
s.assignSpaghetti(run, iPadd, cld, spaghettiSize)
l.assignLane(run, iPadd, s, permTrace, spaghettiSize, laneSize)
}
return define, prover
}
func TestLaneModule(t *testing.T) {
// test keccak
define, prover := makeTestCaseLaneModule()
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}

View File

@@ -1,147 +0,0 @@
package datatransfer
import (
"math/big"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/keccakf"
)
type lookUpTables struct {
//colNumber:=(1,,..,16) and colPowers:=(2^(8*1),...,2^(8*16))
colNumber ifaces.Column
colPowers ifaces.Column
// columns for base conversion
colUint16 ifaces.Column
colBaseA ifaces.Column
colBaseB ifaces.Column
// a column of single bytes
colSingleByte ifaces.Column
// columns for base conversion from baseBDirty to 4bit integers
ColUint4 ifaces.Column
ColBaseBDirty ifaces.Column
// columns for checking the maximum number of padded bytes
colSha2MaxPadding ifaces.Column
colKeccakMaxPadding ifaces.Column
}
// It commits to the lookUp tables used by dataTransfer module.
func newLookupTables(comp *wizard.CompiledIOP) lookUpTables {
res := lookUpTables{}
// table for powers of numbers (used for decomposition of clean limbs)
colNum, colPower2 := numToPower2(maxNByte)
res.colNumber = comp.InsertPrecomputed(deriveName("LookUp_Num"), colNum)
res.colPowers = comp.InsertPrecomputed(deriveName("LookUp_Powers"), colPower2)
// table for base conversion (used for converting blocks to what keccakf expect)
colUint16, colBaseA, colBaseB := baseConversionKeccakBaseX()
res.colUint16 = comp.InsertPrecomputed(ifaces.ColIDf("LOOKUP_Uint16"), colUint16)
res.colBaseA = comp.InsertPrecomputed(ifaces.ColIDf("LOOKUP_BaseA"), colBaseA)
res.colBaseB = comp.InsertPrecomputed(ifaces.ColIDf("LOOKUP_BaseB"), colBaseB)
// table for decomposition to single bytes
res.colSingleByte = comp.InsertPrecomputed(ifaces.ColIDf("LOOKUP_SingleByte"), singleByte())
// table for base conversion (from BaseBDirty to uint4)
colUint4, colBaseBDirty := baseConversionKeccakBaseBDirtyToUint4()
res.ColUint4 = comp.InsertPrecomputed(ifaces.ColIDf("LOOKUP_Uint4"), colUint4)
res.ColBaseBDirty = comp.InsertPrecomputed(ifaces.ColIDf("LOOKUP_BaseBDirty"), colBaseBDirty)
// table to check the number of padded bytes don't overflow its maximum.
res.colSha2MaxPadding = comp.InsertPrecomputed(ifaces.ColIDf("LOOKUP_Max_Pad_Sha2"),
colBlockSize(maxBlockSizeSha2, 8))
// table to check the number of padded bytes don't overflow its maximum.
res.colKeccakMaxPadding = comp.InsertPrecomputed(ifaces.ColIDf("LOOKUP_Max_Pad_Keccak"),
colBlockSize(maxBlockSize, 1))
return res
}
func numToPower2(n int) (colNum, colPower2 smartvectors.SmartVector) {
var num, power2 []field.Element
var res field.Element
for i := 0; i < n; i++ {
num = append(num, field.NewElement(uint64(i)))
res.Exp(field.NewElement(power8), big.NewInt(int64(i)))
power2 = append(power2, res)
}
size := utils.NextPowerOfTwo(n + 1)
return smartvectors.RightZeroPadded(num, size),
smartvectors.RightPadded(power2, field.One(), size)
}
// convert slices of 16bits to keccak.BaseX
func baseConversionKeccakBaseX() (uint16Col, baseACol, baseBCol smartvectors.SmartVector) {
var u, v, w []field.Element
for i := 0; i < power16; i++ {
u = append(u, field.NewElement(uint64(i)))
v = append(v, uInt16ToBaseX(uint16(i), &keccakf.BaseAFr))
w = append(w, uInt16ToBaseX(uint16(i), &keccakf.BaseBFr))
}
return smartvectors.NewRegular(u), smartvectors.NewRegular(v), smartvectors.NewRegular(w)
}
// it creates a slice of all the single bytes
func singleByte() smartvectors.SmartVector {
var v []field.Element
for i := 0; i < power8; i++ {
v = append(v, field.NewElement(uint64(i)))
}
return smartvectors.NewRegular(v)
}
func baseConversionKeccakBaseBDirtyToUint4() (
uint4Col, baseBDirtyCol smartvectors.SmartVector) {
var u, v []field.Element
for j := 0; j < keccakf.BaseBPow4; j++ {
x := field.NewElement(uint64(j))
uint4 := BaseBToUint4(x, keccakf.BaseB)
u = append(u, x)
v = append(v, field.NewElement(uint4))
}
n := utils.NextPowerOfTwo(keccakf.BaseBPow4)
for i := keccakf.BaseBPow4; i < n; i++ {
u = append(u, u[len(u)-1])
v = append(v, v[len(v)-1])
}
return smartvectors.NewRegular(v), smartvectors.NewRegular(u)
}
func BaseBToUint4(x field.Element, base int) (res uint64) {
res = 0
decomposedF := keccakf.DecomposeFr(x, base, 4)
bitPos := 1
for i, limb := range decomposedF {
bit := (limb.Uint64() >> bitPos) & 1
res |= bit << i
}
return res
}
// it return a column of as (1,2,3,...., BlockSize+len(lastPadding))
// this is the accumulator for the number of padded bytes.
// for sha2 len(lastPadding) = 8 , for keccak en(lastPadding) = 1.
// it is used to check that the number of padded bytes fall in this table.
func colBlockSize(blockSize, lenLastPad int) smartvectors.SmartVector {
n := blockSize
var u []field.Element
m := utils.NextPowerOfTwo(n + lenLastPad)
for i := 0; i < n+lenLastPad; i++ {
u = append(u, field.NewElement(uint64(i+1)))
}
for i := n + lenLastPad; i < m; i++ {
u = append(u, field.NewElement(uint64(n)))
}
return smartvectors.NewRegular(u)
}

View File

@@ -1,163 +0,0 @@
package datatransfer
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
)
// it check that the number of padded zeroes for hash is not larger than the block size. This prevents the attack where the prover appends zero blocks.
func (iPadd importAndPadd) csZeroPadding(comp *wizard.CompiledIOP, round int) {
// accPaddedBytes[0] = nByte[0] * iPadded[0]
comp.InsertLocal(round, ifaces.QueryIDf("AccPaddedBytes_Loc"),
sym.Sub(iPadd.accPaddedBytes, sym.Mul(iPadd.nByte, iPadd.isPadded)))
// accPaddedBytes[i] = (accPaddedBytes[i-1] + nByte[i]) * iPadded[i]
// for i index of the row
comp.InsertGlobal(round, ifaces.QueryIDf("AccPaddedBytes_Glob"),
sym.Sub(
iPadd.accPaddedBytes,
sym.Mul(
sym.Add(column.Shift(iPadd.accPaddedBytes, -1), iPadd.nByte),
iPadd.isPadded),
))
}
// It assign the native columns specific to the module,
// the columns are extended by padding.
func (iPadd *importAndPadd) assignImportAndPadd(
run *wizard.ProverRuntime,
gt generic.GenTrace,
maxRows, hashType int) {
isSelected := gt.TO_HASH
one := field.One()
var hashNum, limb, nByte, index, cleanLimb []field.Element
for i := range isSelected {
if isSelected[i].Cmp(&one) == 0 {
hashNum = append(hashNum, gt.HashNum[i])
limb = append(limb, gt.Limb[i])
nByte = append(nByte, gt.NByte[i])
index = append(index, gt.Index[i])
cleanLimb = append(cleanLimb, gt.CleanLimb[i])
}
}
// extend the columns to include the padded limbs
eLimb, eNbyte, eHashNum, eIndex, eCleanLimb, isNewHash, isInserted, isPadded :=
extendWithPadding(limb, nByte, hashNum, index, cleanLimb, hashType)
// sanity check
if len(eHashNum) != len(isNewHash) {
utils.Panic("HashNum and isNewHash have different sizes %v, %v ",
len(eHashNum), len(isNewHash))
}
// assign the columns
run.AssignColumn(iPadd.isNewHash.GetColID(), smartvectors.RightZeroPadded(isNewHash, maxRows))
run.AssignColumn(iPadd.limb.GetColID(), smartvectors.RightZeroPadded(eLimb, maxRows))
run.AssignColumn(iPadd.nByte.GetColID(), smartvectors.RightZeroPadded(eNbyte, maxRows))
run.AssignColumn(iPadd.isActive.GetColID(), smartvectors.RightZeroPadded(vector.Repeat(one, len(eLimb)), maxRows))
run.AssignColumn(iPadd.isInserted.GetColID(), smartvectors.RightZeroPadded(isInserted, maxRows))
run.AssignColumn(iPadd.isPadded.GetColID(), smartvectors.RightZeroPadded(isPadded, maxRows))
run.AssignColumn(iPadd.hashNum.GetColID(), smartvectors.RightZeroPadded(eHashNum, maxRows))
run.AssignColumn(iPadd.index.GetColID(), smartvectors.RightZeroPadded(eIndex, maxRows))
run.AssignColumn(iPadd.cleanLimb.GetColID(), smartvectors.RightZeroPadded(eCleanLimb, maxRows))
run.AssignColumn(iPadd.oneCol.GetColID(), smartvectors.RightZeroPadded(vector.Repeat(field.One(), len(eLimb)), maxRows))
accPaddedBytes := make([]field.Element, len(eNbyte))
if len(eNbyte) != 0 {
accPaddedBytes[0].Mul(&eNbyte[0], &isPadded[0])
}
for i := 1; i < len(eNbyte); i++ {
accPaddedBytes[i].Add(&accPaddedBytes[i-1], &eNbyte[i])
accPaddedBytes[i].Mul(&accPaddedBytes[i], &isPadded[i])
}
run.AssignColumn(iPadd.accPaddedBytes.GetColID(), smartvectors.RightZeroPadded(accPaddedBytes, maxRows))
}
// ExtendWithPadding extends the columns by adding rows to include the padding limbs.
// for Keccak it uses hashType = 0, for Sha2 hashType = 1.
func extendWithPadding(limb, nByte, hashNum, index, cleanLimb []field.Element, hashType int) (
extendedLimb, extendedNbyte []field.Element,
extendedHashNum, extendedIndex []field.Element,
extendedCleanLimb []field.Element,
isNewHash, isInserted, isPadded []field.Element,
) {
one := field.One()
zero := field.Zero()
lenLimb := len(limb)
s := 0
var paddingLimb, paddingNbyte, paddingIndex, zeroes, ones, repeatHashNum []field.Element
for j := 0; j < lenLimb; j++ {
extendedLimb = append(extendedLimb, limb[j])
extendedNbyte = append(extendedNbyte, nByte[j])
extendedHashNum = append(extendedHashNum, hashNum[j])
extendedIndex = append(extendedIndex, index[j])
extendedCleanLimb = append(extendedCleanLimb, cleanLimb[j])
isInserted = append(isInserted, one)
isPadded = append(isPadded, zero)
if index[j].Uint64() == 0 {
isNewHash = append(isNewHash, field.One())
} else {
isNewHash = append(isNewHash, field.Zero())
}
s = s + int(nByte[j].Uint64())
if j != lenLimb-1 {
// if a new hash is about to launched, pad the last block
if index[j+1].Uint64() == 0 {
// insert new rows to include the padding limbs
switch hashType {
case Keccak:
paddingLimb, paddingNbyte, paddingIndex, zeroes, ones, repeatHashNum =
insertPaddingRowsKeccak(s%maxBlockSize, maxBlockSize, hashNum[j], index[j].Uint64())
case Sha2:
paddingLimb, paddingNbyte, paddingIndex, zeroes, ones, repeatHashNum =
insertPaddingRowsSha2(s, maxBlockSizeSha2, hashNum[j], index[j].Uint64())
default:
utils.Panic("The hashType is not supported")
}
extendedLimb = append(extendedLimb, paddingLimb...)
extendedNbyte = append(extendedNbyte, paddingNbyte...)
extendedIndex = append(extendedIndex, paddingIndex...)
extendedCleanLimb = append(extendedCleanLimb, paddingLimb...)
extendedHashNum = append(extendedHashNum, repeatHashNum...)
isNewHash = append(isNewHash, zeroes...)
isInserted = append(isInserted, zeroes...)
isPadded = append(isPadded, ones...)
s = 0
}
} else {
// if it is the last limb in the column, pad the last block
switch hashType {
case Keccak:
paddingLimb, paddingNbyte, paddingIndex, zeroes, ones, repeatHashNum =
insertPaddingRowsKeccak(s%maxBlockSize, maxBlockSize, hashNum[j], index[j].Uint64())
case Sha2:
paddingLimb, paddingNbyte, paddingIndex, zeroes, ones, repeatHashNum =
insertPaddingRowsSha2(s, maxBlockSizeSha2, hashNum[j], index[j].Uint64())
default:
utils.Panic("The hashType is not supported")
}
extendedLimb = append(extendedLimb, paddingLimb...)
extendedNbyte = append(extendedNbyte, paddingNbyte...)
extendedHashNum = append(extendedHashNum, repeatHashNum...)
extendedIndex = append(extendedIndex, paddingIndex...)
extendedCleanLimb = append(extendedCleanLimb, paddingLimb...)
isNewHash = append(isNewHash, zeroes...)
isInserted = append(isInserted, zeroes...)
isPadded = append(isPadded, ones...)
}
}
return extendedLimb, extendedNbyte, extendedHashNum, extendedIndex, extendedCleanLimb, isNewHash, isInserted, isPadded
}

View File

@@ -1,124 +0,0 @@
package datatransfer
import (
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
)
// InsertPadding asserts if the padding over the given (gbm) columns is done correctly.
/*
padding is expected to be done via inserting new limbs;
- limb 1 for the first padded limb, 0 bytes for the middle limbs, and 128 for the last limb
- limb 129 if we require only one byte of padding
*/
func (iPadd *importAndPadd) insertPaddingKeccak(comp *wizard.CompiledIOP, round int, lookUpKeccakPad ifaces.Column) {
/* if isPadded[i-1] = 0, isPadded[i] = 1, isPadded[i+1] =1 ----> limb = 1, nByte = 1
if isPadded[i-1] = 1, isPadded[i] = 1, isPadded[i+1] =0 ----> limb = 128, nByte = 1
if isPadded[i-1] = 0, isPadded[i] = 1, isPadded[i+1] =0 ----> limb = 129 , nByte = 1
if isPadded[i-1] = 1, isPadded[i] = 1, isPadded[i+1] =1 ----> limb = 0
the constraints over NBytes also guarantees the correct number of padded zeroes.*/
dsv := symbolic.NewConstant(1) // domain separator value, for padding
fpv := symbolic.NewConstant(128) // final padding value
fpvPlus := symbolic.NewConstant(129) // padding value for a single byte padding
isPaddedMinus := column.Shift(iPadd.isPadded, -1)
isPaddedPlus := column.Shift(iPadd.isPadded, 1)
firstPad := sym.Mul(sym.Sub(1, isPaddedMinus), iPadd.isPadded, isPaddedPlus,
sym.Sub(iPadd.limb, dsv))
firstPadLen := sym.Mul(sym.Sub(1, isPaddedMinus), iPadd.isPadded, isPaddedPlus,
sym.Sub(iPadd.nByte, 1))
lastPad := sym.Mul(isPaddedMinus, iPadd.isPadded, sym.Sub(1, isPaddedPlus),
sym.Sub(iPadd.limb, fpv))
lastPaddLen := sym.Mul(isPaddedMinus, iPadd.isPadded, sym.Sub(1, isPaddedPlus),
sym.Sub(iPadd.nByte, 1))
middlePads := sym.Mul(isPaddedMinus, iPadd.isPadded, isPaddedPlus, iPadd.limb)
singlePad := sym.Mul(sym.Sub(1, isPaddedMinus), iPadd.isPadded, sym.Sub(1, isPaddedPlus),
sym.Sub(iPadd.limb, fpvPlus))
comp.InsertGlobal(round, ifaces.QueryIDf("FirstPad"), firstPad)
comp.InsertGlobal(round, ifaces.QueryIDf("FirstPadLen"), firstPadLen)
comp.InsertGlobal(round, ifaces.QueryIDf("MiddlePads"), middlePads)
comp.InsertGlobal(round, ifaces.QueryIDf("LastPad"), lastPad)
comp.InsertGlobal(round, ifaces.QueryIDf("LastPadLen"), lastPaddLen)
comp.InsertGlobal(round, ifaces.QueryIDf("SinglePad"), singlePad)
comp.InsertInclusionConditionalOnIncluded(round, ifaces.QueryIDf("LOOKUP_NB_ZeroPadded"),
[]ifaces.Column{lookUpKeccakPad}, []ifaces.Column{iPadd.accPaddedBytes}, iPadd.isPadded)
}
// InsertPaddingRows receives the number of existing bytes in the block and complete the block by padding.
func insertPaddingRowsKeccak(n, max int, hashNum field.Element, lastIndex uint64) (
limb, nbyte, index, zeroes, ones, repeatHashNum []field.Element,
) {
zero := field.Zero()
one := field.One()
maxNByteFr := field.NewElement(maxNByte)
remain := max - n
if remain >= 2 {
// applies the domain separator
limb = append(limb, one)
nbyte = append(nbyte, field.One())
zeroes = append(zeroes, zero)
ones = append(ones, one)
repeatHashNum = append(repeatHashNum, hashNum)
lastIndex++
index = append(index, field.NewElement(lastIndex))
a := (remain - 2) % maxNByte
b := (remain - 2) / maxNByte
// zero pad on the right
for i := 0; i < b; i++ {
limb = append(limb, zero)
nbyte = append(nbyte, maxNByteFr)
zeroes = append(zeroes, zero)
ones = append(ones, one)
repeatHashNum = append(repeatHashNum, hashNum)
lastIndex++
index = append(index, field.NewElement(lastIndex))
}
if a != 0 {
limb = append(limb, zero)
nbyte = append(nbyte, field.NewElement(uint64(a)))
zeroes = append(zeroes, zero)
ones = append(ones, one)
repeatHashNum = append(repeatHashNum, hashNum)
lastIndex++
index = append(index, field.NewElement(lastIndex))
}
// padding on the right with 0X80
limb = append(limb, field.NewElement(128))
nbyte = append(nbyte, one)
zeroes = append(zeroes, zero)
ones = append(ones, one)
repeatHashNum = append(repeatHashNum, hashNum)
lastIndex++
index = append(index, field.NewElement(lastIndex))
}
// padding with 0x81, for padding with a single byte
if remain == 1 {
limb = append(limb, field.NewElement(129))
nbyte = append(nbyte, one)
zeroes = append(zeroes, zero)
ones = append(ones, one)
repeatHashNum = append(repeatHashNum, hashNum)
lastIndex++
index = append(index, field.NewElement(lastIndex))
}
// sanity check
if len(zeroes) != len(repeatHashNum) {
utils.Panic(" they should have the same length")
}
return limb, nbyte, index, zeroes, ones, repeatHashNum
}

View File

@@ -1,170 +0,0 @@
package datatransfer
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
)
// InsertPadding asserts if the padding over the given (gbm) columns is done correctly.
/*
padding is expected to be done via inserting new limbs;
- limb 0x80 for the first padded limb, 0 bytes for the middle limbs, and len<<=3 for the last limb
_ last limb should be 8 bytes
- the number of zeroes should not be more than block-size
(this prevents attacks where a full block of zeroes is padded,
the cases where less than required zeroes are padded is prevented later, by the constraint that blocks should be full)
*/
func (iPadd importAndPadd) insertPaddingSha2(comp *wizard.CompiledIOP, round int, lookUpSha2Pad ifaces.Column) {
// commit to the column streamLen,
// it keeps (lenStream<<=3) in front of the last limb.
streamLen := comp.InsertCommit(round, ifaces.ColIDf("StreamLen"), iPadd.isActive.Size())
comp.SubProvers.AppendToInner(round, func(run *wizard.ProverRuntime) {
witSize := smartvectors.Density(iPadd.isActive.GetColAssignment(run))
nByte := iPadd.nByte.GetColAssignment(run).IntoRegVecSaveAlloc()
isInserted := iPadd.isInserted.GetColAssignment(run).IntoRegVecSaveAlloc()
isPadded := iPadd.isPadded.GetColAssignment(run).IntoRegVecSaveAlloc()
streamLenWit := make([]field.Element, witSize)
s := uint64(0)
for i := 0; i < witSize; i++ {
if isPadded[i].IsZero() {
s = s + nByte[i].Uint64()
streamLenWit[i] = field.NewElement(s)
} else {
streamLenWit[i] = field.NewElement(s)
if isInserted[i+1].IsOne() || i == witSize-1 {
s <<= 3
streamLenWit[i] = field.NewElement(s)
s = 0
}
}
}
run.AssignColumn(streamLen.GetColID(),
smartvectors.RightZeroPadded(streamLenWit, iPadd.isActive.Size()))
})
// streamLen[0] = nByte[0]
comp.InsertLocal(round, ifaces.QueryIDf("streamLen_Local"), sym.Sub(streamLen, iPadd.nByte))
// accumulate nBytes of the original stream (thus ignoring the padded ones)
// for each new stream restart the accumulator
isNotRestarted := sym.Sub(1, sym.Mul(column.Shift(iPadd.isPadded, -1), iPadd.isInserted))
a := sym.Add(sym.Mul(column.Shift(streamLen, -1), isNotRestarted),
sym.Mul(iPadd.nByte, iPadd.isInserted)) // ignore the padded ones
// shift the streamLen by 3 over the last limb
isLastLimb := sym.Add(sym.Mul(iPadd.isPadded, column.Shift(iPadd.isInserted, 1)),
sym.Mul(iPadd.isActive, sym.Sub(1, column.Shift(iPadd.isActive, 1))))
b := sym.Add(sym.Mul(a, sym.Sub(1, isLastLimb)), sym.Mul(a, 8, isLastLimb))
comp.InsertGlobal(round, ifaces.QueryIDf("StreamLen_Glob"), sym.Mul(sym.Sub(b, streamLen), iPadd.isActive))
iPadd.csZeroPadding(comp, round)
/* if isPadded[i-1] = 0, isPadded[i] = 1, isPadded[i+1] =1 ----> limb = 0x80 and nByte = 1
if isPadded[i-1] = 1, isPadded[i] = 1, isPadded[i+1] =0 ----> limb = streamLen and nByte = 8
if isPadded[i-1] = 1, isPadded[i] = 1, isPadded[i+1] =1 ----> limb = 0
the constraints over NBytes also guarantees the correct number of padded zeroes.*/
dsv := symbolic.NewConstant(128) // domain separator value, for padding
isPaddedMinus := column.Shift(iPadd.isPadded, -1)
isPaddedPlus := column.Shift(iPadd.isPadded, 1)
firstPad := sym.Mul(sym.Sub(1, isPaddedMinus), iPadd.isPadded,
isPaddedPlus, sym.Sub(iPadd.limb, dsv))
firstPadLen := sym.Mul(sym.Sub(1, isPaddedMinus), iPadd.isPadded,
isPaddedPlus, sym.Sub(iPadd.nByte, 1))
lastPad := sym.Mul(isPaddedMinus, iPadd.isPadded, sym.Sub(1, isPaddedPlus), sym.Sub(iPadd.limb, streamLen))
lastPadLen := sym.Mul(isPaddedMinus, iPadd.isPadded, sym.Sub(1, isPaddedPlus), sym.Sub(iPadd.nByte, 8))
middlePad := sym.Mul(isPaddedMinus, iPadd.isPadded, isPaddedPlus, iPadd.limb)
comp.InsertGlobal(round, ifaces.QueryIDf("FirstPad"), firstPad)
comp.InsertGlobal(round, ifaces.QueryIDf("FirstPadLen"), firstPadLen)
comp.InsertGlobal(round, ifaces.QueryIDf("MiddlePads"), middlePad)
comp.InsertGlobal(round, ifaces.QueryIDf("lastPad"), lastPad)
comp.InsertGlobal(round, ifaces.QueryIDf("lastPadLen"), lastPadLen)
comp.InsertInclusionConditionalOnIncluded(round, ifaces.QueryIDf("LOOKUP_NB_ZeroPadded"),
[]ifaces.Column{lookUpSha2Pad}, []ifaces.Column{iPadd.accPaddedBytes}, iPadd.isPadded)
}
// InsertPaddingRows receives the number of existing bytes in the block and complete the block by padding.
func insertPaddingRowsSha2(streamLen, max int, hashNum field.Element, lastIndex uint64) (
limb, nbyte, index, zeroes, ones, repeatHashNum []field.Element,
) {
maxNByteFr := field.NewElement(16)
dsv := field.NewElement(128)
zero := field.Zero()
one := field.One()
n := streamLen % max
// applies the domain separator
limb = append(limb, dsv)
nbyte = append(nbyte, field.One())
zeroes = append(zeroes, zero)
ones = append(ones, one)
repeatHashNum = append(repeatHashNum, hashNum)
lastIndex++
index = append(index, field.NewElement(lastIndex))
var a, b int
if n < 56 {
remain := max - n - 9 //
a = remain % maxNByte
b = remain / maxNByte
} else {
// from the current block it has remained (max - 1 - n)
// and (max - 8) from the next block
remain := (max - 1 - n) + (max - 8)
a = remain % maxNByte
b = remain / maxNByte
}
// zero pad on the right
for i := 0; i < b; i++ {
limb = append(limb, zero)
nbyte = append(nbyte, maxNByteFr)
zeroes = append(zeroes, zero)
ones = append(ones, one)
repeatHashNum = append(repeatHashNum, hashNum)
lastIndex++
index = append(index, field.NewElement(lastIndex))
}
if a != 0 {
limb = append(limb, zero)
nbyte = append(nbyte, field.NewElement(uint64(a)))
zeroes = append(zeroes, zero)
ones = append(ones, one)
repeatHashNum = append(repeatHashNum, hashNum)
lastIndex++
index = append(index, field.NewElement(lastIndex))
}
// padding on the right with len<<=3
streamLen <<= 3
limb = append(limb, field.NewElement(uint64(streamLen)))
nbyte = append(nbyte, field.NewElement(8))
zeroes = append(zeroes, zero)
ones = append(ones, one)
repeatHashNum = append(repeatHashNum, hashNum)
lastIndex++
index = append(index, field.NewElement(lastIndex))
// sanity check
if len(zeroes) != len(repeatHashNum) {
utils.Panic(" they should have the same length")
}
return limb, nbyte, index, zeroes, ones, repeatHashNum
}

View File

@@ -1,80 +0,0 @@
package datatransfer
import (
"testing"
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/crypto/sha2"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of Padding submodule, for testing
func GetPaddingForTest(hashType int) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
gbmSize := 512
size := 2048
iPadd := importAndPadd{}
gbm := generic.GenericByteModule{}
lu := lookUpTables{}
def := generic.PHONEY_RLP
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
iPadd.insertCommit(comp, round, size)
gbm = CommitGBM(comp, round, def, gbmSize)
lu = newLookupTables(comp)
if hashType == Keccak {
iPadd.insertPaddingKeccak(comp, round, lu.colKeccakMaxPadding)
} else {
iPadd.insertPaddingSha2(comp, round, lu.colSha2MaxPadding)
}
}
prover = func(run *wizard.ProverRuntime) {
gt := generic.GenTrace{}
keccakTrace := keccak.PermTraces{}
sha2Trace := sha2.HashTraces{}
AssignGBMfromTable(run, &gbm)
gbm.AppendTraces(run, &gt, &keccakTrace, &sha2Trace)
iPadd.assignImportAndPadd(run, gt, size, hashType)
// check that blocks generated by iPadd are consistence with the trace
if hashType == Sha2 {
} else {
blocks := blocksFromIPadd(run, iPadd, maxBlockSize)
for j := range blocks {
if keccakTrace.Blocks[j] != *bytesAsBlockPtrUnsafe(blocks[j]) {
utils.Panic("%v-th block is not correct", j)
}
}
}
}
return define, prover
}
// test Keccak Padding
func TestPaddingKeccak(t *testing.T) {
define, prover := GetPaddingForTest(0)
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof for keccak")
}
// test Sha2 Padding
func TestPaddingSha2(t *testing.T) {
define, prover := GetPaddingForTest(1)
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof for sha2")
}

View File

@@ -1,255 +0,0 @@
package datatransfer
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/dedicated"
)
// The spaghettizedCLD module implements the utility to spaghetize the CLD columns.
type spaghettizedCLD struct {
// the size of spaghettized columns
spaghettiSize int
// the spaghetti versions of cld, cldLen,cldPower
cldSpaghetti ifaces.Column
cldLenSpaghetti ifaces.Column
cldLenPowersSpaghetti ifaces.Column
// It accumulate the length of all the slices pushed into the same lanes (supposed to be 8).
accCldLenSpaghetti ifaces.Column
// It indicate the first slice from the new hash over the spaghetti
isFirstSliceOfNewHash ifaces.Column
// The matrix form of isFirstSliceOfNewHash
isNewHashMatrix []ifaces.Column
// It is 1 when a lane in complete.
isLaneComplete ifaces.Column
// The spaghetti version of cldLenBinary, used to indicate the effective part of the spaghetties.
isActive ifaces.Column
}
// NewSpaghetti declare the new columns specific to the module,
// and also the constraints asserting to the correct form of the spaghetti.
// It facilitate the task of pushing limbs to the lanes.
// It insures that 8 bytes are engaged to be pushed in each lane.
func (s *spaghettizedCLD) newSpaghetti(
comp *wizard.CompiledIOP,
round int,
iPadd importAndPadd,
cld cleanLimbDecomposition,
maxNumRows int,
) {
s.spaghettiSize = maxNumRows
// Declare the columns
s.insertCommit(comp, round, cld, maxNumRows)
// Declare the constraints
// Constraints over the spaghetti forms
dedicated.InsertIsSpaghetti(comp, round, ifaces.QueryIDf("Spaghetti-CLD"),
[][]ifaces.Column{cld.cld[:], cld.cldLen[:],
cld.cldLenPowers[:], cld.cldLenBinary[:], s.isNewHashMatrix[:]},
cld.cldLenBinary[:],
[]ifaces.Column{s.cldSpaghetti, s.cldLenSpaghetti,
s.cldLenPowersSpaghetti, s.isActive, s.isFirstSliceOfNewHash},
maxNumRows,
)
// Constraint over isLaneComplete
// The number of bytes to be pushed into the same lane is 8.
// accCldLenSpaghetti = 8 iff isLaneComplete = 1
dedicated.InsertIsTargetValue(comp, round, ifaces.QueryIDf("LanesAreComplete"),
field.NewElement(numBytesInLane),
s.accCldLenSpaghetti,
s.isLaneComplete,
)
// isLaneComplete is binary
comp.InsertGlobal(round, ifaces.QueryIDf("IsLaneComplete_IsBinary"),
symbolic.Mul(s.isLaneComplete, symbolic.Sub(1, s.isLaneComplete)))
// Constraints over the accumulator of cldLenSpaghtti
// accCldLenSpaghtti[0] = accCldLenSpaghtti[0]
comp.InsertLocal(round, ifaces.QueryIDf("AccCLDLenSpaghetti_Loc"),
symbolic.Sub(s.accCldLenSpaghetti, s.cldLenSpaghetti))
// accCldLenSpaghtti[i] = accCldLenSpaghtti[i-1]*(1-isLaneComplete[i-1]) + cldLenSpaghtti[i]
res := symbolic.Sub(1, column.Shift(s.isLaneComplete, -1)) // 1-isLaneComplete[i-1]
expr := symbolic.Sub(symbolic.Add(symbolic.Mul(column.Shift(s.accCldLenSpaghetti, -1), res),
s.cldLenSpaghetti), s.accCldLenSpaghetti)
comp.InsertGlobal(round, ifaces.QueryIDf("AccCLDLenSpaghetti_Glob"),
expr)
// constraints over the form of isNewHashOverSpaghetti.
// Considering cldSpaghetti, for
// the first byte from the new hash, isNewHashOverSpaghetti = 1.
// define matrices a , b as follows where cld-matrix is from cld module
// a[0] =cld[0]
// a[j+1] = cld[j+1]-cld[j] for j=1,...,15
// b[j] = a[j] * iPadd.isNewHash for any j.
// Thus the constraint is equivalent with;
// spaghetti(b) == isNewHashOverSpaghetti
s.csIsNewHash(comp, round, iPadd, cld)
}
// InsertCommit declare the columns specific to the module.
func (s *spaghettizedCLD) insertCommit(comp *wizard.CompiledIOP, round int, cld cleanLimbDecomposition, maxNumRows int) {
s.isNewHashMatrix = make([]ifaces.Column, len(cld.cld))
// declare the columns
s.cldSpaghetti = comp.InsertCommit(round, ifaces.ColIDf("CLD_Spaghetti"), maxNumRows)
s.cldLenSpaghetti = comp.InsertCommit(round, ifaces.ColIDf("CLD_Len_Spaghetti"), maxNumRows)
s.accCldLenSpaghetti = comp.InsertCommit(round, ifaces.ColIDf("AccCLDLen_Spaghetti"), maxNumRows)
s.cldLenPowersSpaghetti = comp.InsertCommit(round, ifaces.ColIDf("CLD_LenPowers_Spaghetti"), maxNumRows)
s.isLaneComplete = comp.InsertCommit(round, ifaces.ColIDf("IsLaneComplete"), maxNumRows)
s.isActive = comp.InsertCommit(round, ifaces.ColIDf("IsActive_Spaghetti"), maxNumRows)
s.isFirstSliceOfNewHash = comp.InsertCommit(round, ifaces.ColIDf("IsNewHash_Spaghetti"), maxNumRows)
for j := range s.isNewHashMatrix {
s.isNewHashMatrix[j] = comp.InsertCommit(round, ifaces.ColIDf("IsNewHash_Matrix_%v", j), cld.cld[0].Size())
}
}
// Considering cldSpaghetti, for
// the first byte from the new hash, isFirstByteOfNewHash = 1.
func (s *spaghettizedCLD) csIsNewHash(comp *wizard.CompiledIOP,
round int,
iPadd importAndPadd,
cld cleanLimbDecomposition,
) {
// a[0] =cld[0]
// a[j+1] = cldLenBinary[j+1]-cldLenBinary[j] for j=0,...,14
// b[j] = a[j] * isNewHash for any j.
// Thus the constraint is equivalent with;
// spaghetti(b) == isNewHashOverSpaghetti
var a, b [maxLanesFromLimb]*symbolic.Expression
a[0] = ifaces.ColumnAsVariable(cld.cldLenBinary[0])
for j := 0; j < maxLanesFromLimb-1; j++ {
a[j+1] = symbolic.Sub(cld.cldLenBinary[j+1], cld.cldLenBinary[j])
}
for j := range cld.cld {
b[j] = symbolic.Mul(a[j], iPadd.isNewHash)
}
// the matrix form of b
for j := range cld.cld {
comp.InsertGlobal(round, ifaces.QueryIDf("Matrix_IsNewHash_%v", j),
symbolic.Sub(b[j], s.isNewHashMatrix[j]))
}
// note: by the way that b is built, we d'ont need to check isFirstByteOfNewHash is binary
}
// AssignSpaghetti assigns the columns specific to the module.
func (s *spaghettizedCLD) assignSpaghetti(
run *wizard.ProverRuntime,
iPadd importAndPadd,
cld cleanLimbDecomposition,
maxNumRows int) {
// populate filter
filter := cld.cldLenBinary
witSize := smartvectors.Density(filter[0].GetColAssignment(run))
// fetch the columns
filterWit := make([][]field.Element, len(filter))
cldLenWit := make([][]field.Element, len(filter))
cldLenPowersWit := make([][]field.Element, len(filter))
cldWit := make([][]field.Element, len(filter))
cldLenBinaryWit := make([][]field.Element, len(filter))
for i := range filter {
filterWit[i] = make([]field.Element, witSize)
cldWit[i] = cld.cld[i].GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
cldLenWit[i] = cld.cldLen[i].GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
cldLenBinaryWit[i] = cld.cldLenBinary[i].GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
cldLenPowersWit[i] = cld.cldLenPowers[i].GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
filterWit[i] = cldLenBinaryWit[i]
}
// make the spaghetti version of the fetched columns.
spaghetti := makeSpaghetti(filterWit, cldWit, cldLenWit, cldLenBinaryWit, cldLenPowersWit)
// assign the columns
run.AssignColumn(s.cldSpaghetti.GetColID(), smartvectors.RightZeroPadded(spaghetti[0], maxNumRows))
run.AssignColumn(s.cldLenSpaghetti.GetColID(), smartvectors.RightZeroPadded(spaghetti[1], maxNumRows))
run.AssignColumn(s.isActive.GetColID(), smartvectors.RightZeroPadded(spaghetti[2], maxNumRows))
run.AssignColumn(s.cldLenPowersSpaghetti.GetColID(), smartvectors.RightZeroPadded(spaghetti[3], maxNumRows))
// populate isLaneComplete
cldLenSpaghetti := spaghetti[1]
isLaneComplete := AccReachedTargetValue(cldLenSpaghetti, numBytesInLane)
// populate accumulator
accCldLenWit := make([]field.Element, len(spaghetti[0]))
if len(cldLenSpaghetti) != 0 {
accCldLenWit[0] = cldLenSpaghetti[0]
}
var res field.Element
one := field.One()
for i := 1; i < len(spaghetti[0]); i++ {
res.Sub(&one, &isLaneComplete[i-1])
res.Mul(&accCldLenWit[i-1], &res)
accCldLenWit[i].Add(&res, &cldLenSpaghetti[i])
}
// assign the accumulator of cldLen and isLaneComplete.
run.AssignColumn(s.accCldLenSpaghetti.GetColID(), smartvectors.RightZeroPadded(accCldLenWit, maxNumRows))
run.AssignColumn(s.isLaneComplete.GetColID(), smartvectors.RightZeroPadded(isLaneComplete, maxNumRows))
// assign isNewHashMatrix and isFirstByteOfNewHash.
s.assignIsNewHash(run, iPadd, cld, maxNumRows)
}
func (s *spaghettizedCLD) assignIsNewHash(
run *wizard.ProverRuntime,
iPadd importAndPadd,
cld cleanLimbDecomposition,
maxNumRows int) {
witSize := smartvectors.Density(iPadd.isNewHash.GetColAssignment(run))
isNewHash := iPadd.isNewHash.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
nByte := iPadd.nByte.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
sum := 0
var t []int
for j := range nByte {
if isNewHash[j] == field.One() {
// the length of the stream when we reach a newHash
t = append(t, sum)
}
sum = sum + int(nByte[j].Uint64())
}
// isActive is equivalent with cldLenBinarySpaghetti
witSizeSpaghetti := smartvectors.Density(s.isActive.GetColAssignment(run))
cldLenSpaghetti := s.cldLenSpaghetti.GetColAssignment(run).IntoRegVecSaveAlloc()[:witSizeSpaghetti]
ctr := 0
sumS := 0
var col []field.Element
for j := range cldLenSpaghetti {
if ctr < len(t) && t[ctr] == sumS {
col = append(col, field.One())
ctr++
} else {
col = append(col, field.Zero())
}
sumS = sumS + int(cldLenSpaghetti[j].Uint64())
}
//assign the columns
run.AssignColumn(s.isFirstSliceOfNewHash.GetColID(), smartvectors.RightZeroPadded(col, maxNumRows))
// populate the isNewHashMatrix
cldLenBinary := make([][]field.Element, maxLanesFromLimb)
for j := range cld.cld {
cldLenBinary[j] = make([]field.Element, witSize)
cldLenBinary[j] = cld.cldLenBinary[j].GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
}
matrix := makeMatrix(cldLenBinary[:], col)
for j := range matrix {
run.AssignColumn(s.isNewHashMatrix[j].GetColID(), smartvectors.RightZeroPadded(matrix[j], cld.cld[0].Size()))
}
}

View File

@@ -1,53 +0,0 @@
package datatransfer
import (
"testing"
permTrace "github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of spaghettizedCLD module, for testing
func makeTestCaseSpaghettiModule() (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
gbm := generic.GenericByteModule{}
iPadd := importAndPadd{}
cld := cleanLimbDecomposition{nbCld: maxLanesFromLimb, nbCldSlices: numBytesInLane}
s := &spaghettizedCLD{}
def := generic.PHONEY_RLP
// spaghetti Size
size := 64
spaghettiSize := 4 * size
gbmSize := 32
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
gbm = CommitGBM(comp, round, def, gbmSize)
iPadd.insertCommit(comp, round, size)
cld.insertCommit(comp, round, size)
s.newSpaghetti(comp, round, iPadd, cld, spaghettiSize)
}
prover = func(run *wizard.ProverRuntime) {
traces := permTrace.PermTraces{}
gt := generic.GenTrace{}
AssignGBMfromTable(run, &gbm)
gbm.AppendTraces(run, &gt, &traces)
iPadd.assignImportAndPadd(run, gt, size, 0)
cld.assignCLD(run, iPadd, size)
s.assignSpaghetti(run, iPadd, cld, spaghettiSize)
}
return define, prover
}
func TestSpaghettiModule(t *testing.T) {
// test keccak
define, prover := makeTestCaseSpaghettiModule()
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}

View File

@@ -1,292 +0,0 @@
package datatransfer
import (
"strconv"
"strings"
"unsafe"
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
)
// deriveName derives column names.
func deriveName(mainName string, ids ...int) ifaces.ColID {
idStr := []string{}
for i := range ids {
idStr = append(idStr, strconv.Itoa(ids[i]))
}
return ifaces.ColIDf("%v_%v_%v", "DT", mainName, strings.Join(idStr, "_"))
}
// getZeroOnes receives n and outputs the pattern (0,..0,1,..,1) such that there are n elements 1.
func getZeroOnes(n field.Element, max int) (a []field.Element) {
if n.Uint64() > uint64(max) {
utils.Panic("%v should be smaller than %v", n.Uint64(), max)
}
for j := 0; j < max-int(n.Uint64()); j++ {
a = append(a, field.Zero())
}
for i := max - int(n.Uint64()); i < max; i++ {
a = append(a, field.One())
}
return a
}
func bytesAsBlockPtrUnsafe(s []byte) *keccak.Block {
return (*keccak.Block)(unsafe.Pointer(&s[0]))
}
func baseRecomposeHandles(a []ifaces.Column, base any) *symbolic.Expression {
res := symbolic.NewConstant(0)
for k := len(a) - 1; k >= 0; k-- {
res = symbolic.Add(symbolic.Mul(res, base), a[k])
}
return res
}
// It recompose the slices with the given length in a little-endian order
func baseRecomposeByLengthHandles(slices []ifaces.Column, base any, lenSlices []ifaces.Column) *symbolic.Expression {
res := symbolic.NewConstant(0)
for k := 0; k < len(slices); k++ {
res = symbolic.Add(symbolic.Mul(res, base), symbolic.Mul(slices[k], lenSlices[k]))
}
return res
}
// it recompose the slices with the given length in big-endian, when the lengths of the slices are binary.
func baseRecomposeBinaryLen(slices []ifaces.Column, base any, lenSlices []ifaces.Column) *symbolic.Expression {
res := symbolic.NewConstant(0)
for k := len(slices) - 1; k >= 0; k-- {
currBase := symbolic.Add(symbolic.Mul(base, lenSlices[k]), symbolic.Sub(1, lenSlices[k]))
res = symbolic.Add(symbolic.Mul(res, currBase), slices[k])
}
return res
}
// Converts a 16bits fieldElement to a given base, the base should be given in field element
// form to save on expensive conversion.
func uInt16ToBaseX(x uint16, base *field.Element) field.Element {
res := field.Zero()
one := field.One()
resIsZero := true
for k := 16; k >= 0; k-- {
// The test allows skipping useless field muls or testing
// the entire field element.
if !resIsZero {
res.Mul(&res, base)
}
// Skips the field addition if the bit is zero
bit := (x >> k) & 1
if bit > 0 {
res.Add(&res, &one)
resIsZero = false
}
}
return res
}
// It receives a set of numbers and cut/stitch them to the given length.
// Example; if the given length is 8 and the number of chunk is 3;
// then (15,3,6) is organized as (8,7,1,2,6),
// 15 --> 8,7, 3-->1,2 (because stitching 7 and 1 gives 8)
func cutAndStitch(nByte []field.Element, nbChunk, lenChunk int) (b [][]field.Element) {
missing := uint64(lenChunk)
b = make([][]field.Element, nbChunk)
for i := range nByte {
var a []field.Element
curr := nByte[i].Uint64()
for curr != 0 {
if curr >= missing {
a = append(a, field.NewElement(missing))
curr = curr - missing
missing = uint64(lenChunk)
} else {
a = append(a, field.NewElement(curr))
missing = missing - curr
curr = 0
}
}
// message to hash is based on big endian, LCD should be big endian order.
// Thus add the zeros at the beginning, if we have less than nbChunk.
for len(a) < nbChunk {
a = append([]field.Element{field.Zero()}, a...)
}
for j := 0; j < nbChunk; j++ {
b[j] = append(b[j], a[j])
}
}
return b
}
// It receives the length of the slices and decompose the element to the slices with the given lengths.
// decomposition is in little Endian. Zeroes are added at the beginning if we have less than three slices.
func decomposeByLength(a field.Element, lenA int, givenLen []int) (slices []field.Element) {
//sanity check
s := 0
for i := range givenLen {
s = s + givenLen[i]
}
if s != lenA {
utils.Panic("input can not be decomposed to the given lengths")
}
b := a.Bytes()
bytes := b[32-lenA:]
slices = make([]field.Element, len(givenLen))
for i := range givenLen {
if givenLen[i] == 0 {
slices[i] = field.Zero()
} else {
b := bytes[:givenLen[i]]
x := 0
s := uint64(0)
for j := 0; j < givenLen[i]; j++ {
s = s | uint64(b[j])<<x
x = x + 8
}
slices[i] = field.NewElement(s)
bytes = bytes[givenLen[i]:]
}
}
return slices
}
// It receives a column and indicates where the accumulation reaches the target value
// It panics if at any point the accumulation goes beyond the target value.
func AccReachedTargetValue(column []field.Element, targetVal int) (reachedTarget []field.Element) {
s := 0
for j := range column {
s = s + int(column[j].Uint64())
if s > targetVal {
utils.Panic("Should not reach a value larger than target value")
}
if s == targetVal {
reachedTarget = append(reachedTarget, field.One())
s = 0
} else {
reachedTarget = append(reachedTarget, field.Zero())
}
}
return reachedTarget
}
// It receives multiple matrices and a filter, it returns the spaghetti form of the matrices
func makeSpaghetti(filter [][]field.Element, matrix ...[][]field.Element) (spaghetti [][]field.Element) {
spaghetti = make([][]field.Element, len(matrix))
// populate spaghetties
for i := range filter[0] {
for j := range filter {
if filter[j][i].Uint64() == 1 {
for k := range matrix {
spaghetti[k] = append(spaghetti[k], matrix[k][j][i])
}
}
}
}
return spaghetti
}
// It receives a vector and a filter and fold it to a matrix.
func makeMatrix(filter [][]field.Element, myVector []field.Element) (matrix [][]field.Element) {
matrix = make([][]field.Element, len(filter))
for j := range matrix {
matrix[j] = make([]field.Element, len(filter[0]))
}
// populate matrix
k := 0
for i := range filter[0] {
for j := range filter {
if filter[j][i].Uint64() == 1 && k < len(myVector) {
matrix[j][i] = myVector[k]
k++
}
}
}
return matrix
}
// It receives the length of the slices and decompose the element to the slices with the given lengths.
// decomposition is in little Endian. Zeroes are added at the beginning if we have less than three slices.
func decomposeByLengthFr(a field.Element, lenA int, givenLen []field.Element) (slices []field.Element) {
//sanity check
s := 0
for i := range givenLen {
s = s + int(givenLen[i].Uint64())
}
if s != lenA {
utils.Panic("input can not be decomposed to the given lengths")
}
b := a.Bytes()
bytes := b[32-lenA:]
slices = make([]field.Element, len(givenLen))
for i := range givenLen {
if givenLen[i] == field.Zero() {
slices[i] = field.Zero()
} else {
b := bytes[:givenLen[i].Uint64()]
x := 0
s := uint64(0)
for j := 0; j < int(givenLen[i].Uint64()); j++ {
s = s | uint64(b[j])<<x
x = x + 8
}
slices[i] = field.NewElement(s)
bytes = bytes[givenLen[i].Uint64():]
}
}
return slices
}
// It converts a slices of uint4 from Bing-endian to little-endian.
// Since BE to LE is over Bytes;
//
// for uint4 two adjacent elements keep their order
//
// e.g., (a0,a1,a2,a3,....,a14,a15)
// is converted to (a14,a15, ....,a2,a3,a0,a1)
func SlicesBeToLeUint4(s []field.Element) []field.Element {
i := 0
var a []field.Element
for i < len(s) {
a = append(a, s[len(s)-1-i-1])
a = append(a, s[len(s)-1-i])
i = i + 2
}
return a
}
// It converts a slices of uint4 from Bing-endian to little-endian.
func SlicesBeToLeHandle(s []ifaces.Column) []ifaces.Column {
i := 0
var a []ifaces.Column
for i < len(s) {
a = append(a, s[len(s)-1-i-1])
a = append(a, s[len(s)-1-i])
i = i + 2
}
return a
}

View File

@@ -1,326 +0,0 @@
package datatransfer
import (
"crypto/rand"
"math/big"
mrand "math/rand"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/sha3"
)
type table struct {
data data
info info
hasInfoTrace bool
}
type data struct {
hashNum, index, nByte, toHash []int
limb [][16]byte
}
type info struct {
hashNum []int
hashLo, hashHi [][16]byte
isHashLo, isHashHi []int
}
// AssignGBMfromTable is used for testing.
// It assigns the Gbm (arithmetization columns relevant to keccak) from a random table.
// witSize is the effective size of the module (with no padding)
// hashNum is the number of hashes that are extracted from such table.
// It is exported since we are using it for testing in different packages.
func AssignGBMfromTable(run *wizard.ProverRuntime, gbm *generic.GenericByteModule) {
targetSize := gbm.Data.Limb.Size()
// To support the edge cases, the assignment may not complete the column
size := targetSize - targetSize/15
table := &table{}
*table = tableForTest(size)
limb := table.data.limb
u := make([]field.Element, size)
for i := range limb {
u[i].SetBytes(limb[i][:])
}
a := smartvectors.ForTest(table.data.hashNum...)
aa := smartvectors.LeftZeroPadded(smartvectors.IntoRegVec(a), targetSize)
run.AssignColumn(gbm.Data.HashNum.GetColID(), aa)
b := smartvectors.ForTest(table.data.index...)
bb := smartvectors.LeftZeroPadded(smartvectors.IntoRegVec(b), targetSize)
run.AssignColumn(gbm.Data.Index.GetColID(), bb)
c := smartvectors.ForTest(table.data.nByte...)
cc := smartvectors.LeftZeroPadded(smartvectors.IntoRegVec(c), targetSize)
run.AssignColumn(gbm.Data.NBytes.GetColID(), cc)
run.AssignColumn(gbm.Data.Limb.GetColID(), smartvectors.LeftZeroPadded(u, targetSize))
d := smartvectors.ForTest(table.data.toHash...)
dd := smartvectors.LeftZeroPadded(smartvectors.IntoRegVec(d), targetSize)
run.AssignColumn(gbm.Data.TO_HASH.GetColID(), dd)
// assign Info trace
if gbm.Info != (generic.GenInfoModule{}) {
hashLo := table.info.hashLo
hashHi := table.info.hashHi
v := make([]field.Element, len(hashLo))
w := make([]field.Element, len(hashHi))
//reverse the order of bytes, to use SetByte() that is based on bigEndian
var s, t [maxNByte]byte
var hashLoLE, hashHiLE [][maxNByte]byte
for i := range hashLo {
for j := range hashLo[0] {
s[j] = hashLo[i][maxNByte-1-j]
t[j] = hashHi[i][maxNByte-1-j]
}
hashLoLE = append(hashLoLE, s)
hashHiLE = append(hashHiLE, t)
}
for i := range hashLo {
v[i].SetBytes(hashLoLE[i][:])
w[i].SetBytes(hashHiLE[i][:])
}
run.AssignColumn(gbm.Info.HashLo.GetColID(), smartvectors.LeftZeroPadded(v, targetSize))
run.AssignColumn(gbm.Info.HashHi.GetColID(), smartvectors.LeftZeroPadded(w, targetSize))
if len(gbm.Info.HashNum.GetColID()) != 0 {
t := smartvectors.ForTest(table.info.hashNum...)
tt := smartvectors.LeftZeroPadded(smartvectors.IntoRegVec(t), targetSize)
run.AssignColumn(gbm.Info.HashNum.GetColID(), tt)
}
z := smartvectors.ForTest(table.info.isHashLo...)
run.AssignColumn(gbm.Info.IsHashLo.GetColID(), smartvectors.LeftZeroPadded(smartvectors.IntoRegVec(z), targetSize))
run.AssignColumn(gbm.Info.IsHashHi.GetColID(), smartvectors.LeftZeroPadded(smartvectors.IntoRegVec(z), targetSize))
}
}
// CommitGBM is used for testing, it commits to the gbm columns,
// i.e., the set of arithmetization columns relevant to keccak.
// It is exported since we are using it for testing in different packages.
func CommitGBM(
comp *wizard.CompiledIOP,
round int,
gbmDef generic.GenericByteModuleDefinition,
size int,
) (gbm generic.GenericByteModule) {
gbm.Data.HashNum = comp.InsertCommit(round, gbmDef.Data.HashNum, size)
gbm.Data.Index = comp.InsertCommit(round, gbmDef.Data.Index, size)
gbm.Data.Limb = comp.InsertCommit(round, gbmDef.Data.Limb, size)
gbm.Data.NBytes = comp.InsertCommit(round, gbmDef.Data.NBytes, size)
if gbmDef.Data.HashNum != generic.PHONEY_RLP.Data.HashNum {
gbm.Data.TO_HASH = comp.InsertCommit(round, gbmDef.Data.TO_HASH, size)
} else {
gbm.Data.TO_HASH = comp.InsertCommit(round, ifaces.ColIDf("To-Hash"), size)
}
if gbmDef.Info != (generic.InfoDef{}) {
if len(gbmDef.Info.HashNum) != 0 {
gbm.Info.HashNum = comp.InsertCommit(round, gbmDef.Info.HashNum, size)
}
gbm.Info.HashLo = comp.InsertCommit(round, gbmDef.Info.HashLo, size)
gbm.Info.HashHi = comp.InsertCommit(round, gbmDef.Info.HashHi, size)
gbm.Info.IsHashLo = comp.InsertCommit(round, gbmDef.Info.IsHashLo, size)
gbm.Info.IsHashHi = comp.InsertCommit(round, gbmDef.Info.IsHashHi, size)
}
return gbm
}
// tableForTest generates random gbm tables for the test
func tableForTest(size int) (t table) {
numHash := size / 7
// it fills up DataTrace and outputs the inputs for hashes
msg, chosens := dataTrace(&t, numHash, size)
// it fills up the InfoTrace
infoTrace(&t, numHash, msg, chosens)
// set hasInfoTrace to true
t.hasInfoTrace = true
return t
}
// It fills up the data trace of the table.
func dataTrace(t *table, numHash, size int) ([][]byte, []int) {
inLen := 0 // the total size of 'DataTrace'
// choose the limbs for each hash
// we set the limbs to less than LENGTH bytes and then pads them to get LENGTH byte (exactly like zkEVM)
limbs := make([][][]byte, numHash)
//at the same time build the hash inputs
msg := make([][]byte, numHash)
s := make([]int, numHash)
for i := 0; i < numHash; i++ {
// added +1 to prevent edge-cases
nlimb := mrand.Intn(size-(numHash-i-1)*5-inLen) + 1 //nolint
if i == numHash-1 {
nlimb = size - inLen
}
limbs[i] = make([][]byte, nlimb)
s[i] = 0
for j := range limbs[i] {
// for big tests
limbs[i][j] = make([]byte, mrand.Intn(maxNByte)+1) //nolint
// for small tests
//limbs[i][j] = make([]byte, 1) //nolint
_, err := rand.Read(limbs[i][j])
if err != nil {
logrus.Fatalf("error while generating random bytes: %s", err)
}
s[i] += len(limbs[i][j])
}
inLen += nlimb
}
if inLen != size {
utils.Panic("size of the table expected to be %v but it is %v ", size, inLen)
}
// fill up the table 'DataTrace'
t.data.hashNum = make([]int, inLen)
t.data.index = make([]int, inLen)
t.data.limb = make([][16]byte, inLen)
t.data.nByte = make([]int, inLen)
t.data.toHash = make([]int, inLen)
ctr := 0
var chosen []int
for k := 0; k < numHash; k++ {
nBig, _ := rand.Int(rand.Reader, big.NewInt(2))
bit := nBig.Uint64()
if bit == 1 {
chosen = append(chosen, k+1)
}
for j := range limbs[k] {
t.data.hashNum[ctr+j] = k + 1
t.data.index[ctr+j] = j
t.data.toHash[ctr+j] = int(bit)
t.data.limb[ctr+j] = toByte16(limbs[k][j])
t.data.nByte[ctr+j] = len(limbs[k][j])
}
ctr += len(limbs[k])
}
if ctr != inLen {
panic("the length of the table is not consistent with HASH_NUM and LIMB")
}
if len(msg) != numHash {
panic("needs one message per hash")
}
// get the inputs for the hashes
for i := range msg {
for j := range limbs[i] {
msg[i] = append(msg[i], limbs[i][j]...)
}
if len(msg[i]) != s[i] {
utils.Panic("message is not set to the right length, message length %v, what it should be %v", len(msg[i]), s[i])
}
}
return msg, chosen
}
// It fills up the info trace of the table.
func infoTrace(t *table, numHash int, msg [][]byte, chosen []int) {
out := t.info
out.hashNum = make([]int, numHash)
out.hashLo = make([][16]byte, numHash)
out.hashHi = make([][16]byte, numHash)
out.isHashLo = make([]int, numHash)
out.isHashHi = make([]int, numHash)
// sanity check
if len(msg) != numHash {
panic(" needs one message per hash")
}
for i := range out.hashNum {
out.hashNum[i] = i + 1
// compute the hash for each msg
h := sha3.NewLegacyKeccak256()
h.Write(msg[i])
outHash := h.Sum(nil)
//assign Hash_HI and Hash_LOW
if len(outHash) != 2*maxNByte {
panic("can not cut the hash-output into Two Byte16")
}
copy(out.hashHi[i][:], outHash[:maxNByte])
copy(out.hashLo[i][:], outHash[maxNByte:])
for _, choose := range chosen {
if out.hashNum[i] == choose {
out.isHashLo[i] = 1
out.isHashHi[i] = 1
}
}
}
t.info = out
}
// It extends a short slice to [16]bytes.
func toByte16(b []byte) [16]byte {
if len(b) > maxNByte {
utils.Panic("the length of input should not be greater than %v", maxNByte)
}
n := maxNByte - len(b)
a := make([]byte, n)
var c [maxNByte]byte
b = append(b, a...)
copy(c[:], b)
return c
}
// It gets the block from the ImportAndPadd module and check it against the permutation trace.
func blocksFromIPadd(
run *wizard.ProverRuntime,
iPadd importAndPadd,
blockSize uint64,
) [][]byte {
witSize := smartvectors.Density(iPadd.nByte.GetColAssignment(run))
eNByte := iPadd.nByte.GetColAssignment(run).IntoRegVecSaveAlloc()
eCleanLimb := iPadd.cleanLimb.GetColAssignment(run).IntoRegVecSaveAlloc()
isNewHash := iPadd.isNewHash.GetColAssignment(run).IntoRegVecSaveAlloc()
isEndOfBlock := make([]field.Element, witSize)
ctr := 0
s := eNByte[0].Uint64()
var block [][]byte
var stream []byte
nbyte := eNByte[0].Uint64()
limbBytes := eCleanLimb[0].Bytes()
usefulBytes := limbBytes[32-nbyte:]
stream = append(stream, usefulBytes[:nbyte]...)
for j := 1; j < witSize; j++ {
// sanity check
if isNewHash[j] == field.One() && s != 0 {
utils.Panic(" the last block should be complete before launching a new hash")
}
nbyte := eNByte[j].Uint64()
s = s + nbyte
limbBytes := eCleanLimb[j].Bytes()
usefulBytes := limbBytes[32-nbyte:]
if s > blockSize || s == blockSize {
s = s - blockSize
res := usefulBytes[:(nbyte - s)]
newBlock := append(stream, res...)
block = append(block, newBlock)
stream = usefulBytes[(nbyte - s):nbyte]
isEndOfBlock[j] = field.One()
ctr++
} else {
stream = append(stream, usefulBytes[:nbyte]...)
}
}
return block
}

View File

@@ -1,77 +0,0 @@
package dedicated
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
)
/*
InsertIsTargetValue is a query to indicate where a target value in the column appears.
It receives three main inputs;
- the TargetValue
- columnA that is subject to the search
- columnB, a binary column that is 1 where columnA equals TargetValue, and is zero everywhere else
The query IsTargetValue asserts that columnB indeed has the right form, namely;
for the row-index i, column B[i] =1 iff column A[i] =TargetValue.
Note : The query does not check that colB is binary
*/
func InsertIsTargetValue(
comp *wizard.CompiledIOP,
round int,
queryName ifaces.QueryID,
targetVal field.Element,
colA ifaces.Column,
colB any,
) {
// declare the new column colC
colC := comp.InsertCommit(round, ifaces.ColIDf(string(queryName)), colA.Size())
// to have colB[i] = 1 iff colA[i]=targetValue
// impose three following constrains (where t = targetValue -colA)
//
// 1. t * (t * colC -1) =0
//
// 2. t * colB = 0
//
// 3. (t * colC + colB - 1) = 0
//
// t := targetVal - colA
t := symbolic.Sub(targetVal, colA)
// if t[i] !=0 ---> colC[i] = t^(-1)
// i.e., t * (t * colC -1) =0
expr := symbolic.Mul(symbolic.Sub(symbolic.Mul(t, colC), 1), t)
comp.InsertGlobal(round, ifaces.QueryIDf("%v_%v", string(queryName), 1), expr)
// t * colB = 0
expr = symbolic.Mul(t, colB)
comp.InsertGlobal(round, ifaces.QueryIDf("%v_%v", string(queryName), 2), expr)
// (t * colC + colB - 1) = 0
expr = symbolic.Sub(symbolic.Add(symbolic.Mul(t, colC), colB), 1)
comp.InsertGlobal(round, ifaces.QueryIDf("%v_%v", string(queryName), 3), expr)
comp.SubProvers.AppendToInner(round,
func(run *wizard.ProverRuntime) {
assignInverse(run, targetVal, colA, colC)
},
)
}
func assignInverse(run *wizard.ProverRuntime, targetVal field.Element, colA, colC ifaces.Column) {
cola := colA.GetColAssignment(run).IntoRegVecSaveAlloc()
colCWit := make([]field.Element, len(cola))
var res field.Element
for i := range cola {
res.Sub(&targetVal, &cola[i])
colCWit[i].Inverse(&res)
}
run.AssignColumn(colC.GetColID(), smartvectors.NewRegular(colCWit))
}

View File

@@ -1,42 +0,0 @@
package dedicated
import (
"testing"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/stretchr/testify/assert"
)
func makeTestCaseIsTargetValue() (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
size := 8
var colA, colB ifaces.Column
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
colA = comp.InsertCommit(round, ifaces.ColIDf("ColA"), size)
colB = comp.InsertCommit(round, ifaces.ColIDf("ColB"), size)
InsertIsTargetValue(comp, round, ifaces.QueryIDf("IsTarget"), field.NewElement(3), colA, colB)
}
prover = func(run *wizard.ProverRuntime) {
cola := vector.ForTest(3, 4, 5, 9, 1, 9, 7, 3)
colb := vector.ForTest(1, 0, 0, 0, 0, 0, 0, 1)
run.AssignColumn(colA.GetColID(), smartvectors.NewRegular(cola))
run.AssignColumn(colB.GetColID(), smartvectors.NewRegular(colb))
}
return define, prover
}
func TestIsZeroIff(t *testing.T) {
define, prover := makeTestCaseIsTargetValue()
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}

View File

@@ -1,96 +0,0 @@
package dedicated
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
)
// InsertPartitionedIP registers a partitioned inner-product (PIP) query.
/*
A PIP query is an inner-product query of two vectors over a given partition.
Example:
partition := (0,..,1,0,...,1,...,0,..1),
where it is 1 on positions k+1,l+1,n.
Note that a partition ends with 1.
colA := (a0,..,ak,b(k+1),...,bl,c(l+1),..cn)
colB := (a'0,..,a'k,b'(k+1),...,b'l,c'(l+1),..c'n)
Then ipTracker is computed as follows;
- ipTracker[0] = colA[0]*colB[0]
- ipTracker[i]= colA[i]*colB[i] + ipTracker[i-1]*(1-partition[i])
Where i stands for the row-index.
The result of PIP is stored in i-th row of ipTracker where partition[i+1] == 1.
Thus at position (k,l,n-1) we respectively have (\sum_i a_i*a'_i,\sum_i b_i*b'_i,\sum_i c_i*c'_i)
*/
func InsertPartitionedIP(
comp *wizard.CompiledIOP,
round int,
colA, colB ifaces.Column,
partition, ipTracker ifaces.Column,
) {
one := symbolic.NewConstant(1)
// Compute the partitioned inner-product
cola := ifaces.ColumnAsVariable(colA)
colb := ifaces.ColumnAsVariable(colB)
shiftTracker := ifaces.ColumnAsVariable(column.Shift(ipTracker, -1))
// iptaker[i] = (colA[i] * colB) + ipTracker[i-1]* (1-partition[i]).
expr1 := ifaces.ColumnAsVariable(ipTracker).
Sub((cola.Mul(colb)).
Add(shiftTracker.Mul(one.Sub(ifaces.ColumnAsVariable(partition)))),
)
comp.InsertGlobal(round, ifaces.QueryIDf("PIP_%v", ipTracker.GetColID()), expr1)
// ipTracker[0] =colA[0]*colB[0]
comp.InsertLocal(round,
ifaces.QueryIDf("PIP_Local_%v", ipTracker.GetColID()),
ifaces.ColumnAsVariable(ipTracker).
Sub(cola.Mul(colb)),
)
comp.SubProvers.AppendToInner(round,
func(run *wizard.ProverRuntime) {
assignPIP(run, colA, colB, partition, ipTracker)
},
)
}
// It assigns IPTracker for PIP.
func assignPIP(run *wizard.ProverRuntime, colA, colB, partition, ipTracker ifaces.Column) {
cola := colA.GetColAssignment(run).IntoRegVecSaveAlloc()
colb := colB.GetColAssignment(run).IntoRegVecSaveAlloc()
partitionWit := partition.GetColAssignment(run).IntoRegVecSaveAlloc()
one := field.One()
var notPartition field.Element
witSize := smartvectors.Density(run.GetColumn(colA.GetColID()))
var u, v field.Element
ipTrackerWit := make([]field.Element, witSize)
if witSize != 0 {
ipTrackerWit[0].Mul(&cola[0], &colb[0])
for i := 1; i < witSize; i++ {
u.Mul(&cola[i], &colb[i])
notPartition.Sub(&one, &partitionWit[i])
v.Mul(&ipTrackerWit[i-1], &notPartition)
ipTrackerWit[i].Add(&u, &v)
}
}
run.AssignColumn(ipTracker.GetColID(), smartvectors.RightZeroPadded(ipTrackerWit, colA.Size()))
}

View File

@@ -1,46 +0,0 @@
package dedicated
import (
"testing"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of Partitioned inner-product, for testing
func makeTestCasePIP() (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
size := 8
var colA, colB, partition, ipTracker ifaces.Column
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
// commit to the colA,colB,partition,ipTracker
colA = comp.InsertCommit(round, ifaces.ColIDf("ColA"), size)
colB = comp.InsertCommit(round, ifaces.ColIDf("ColB"), size)
partition = comp.InsertCommit(round, ifaces.ColIDf("Partition"), size)
ipTracker = comp.InsertCommit(round, ifaces.ColIDf("IPTracker"), size)
InsertPartitionedIP(comp, round, colA, colB, partition, ipTracker)
}
prover = func(run *wizard.ProverRuntime) {
// assign the columns
run.AssignColumn(colA.GetColID(), smartvectors.ForTest(1, 0, 2, 3, 1, 4, 3, 2))
run.AssignColumn(colB.GetColID(), smartvectors.ForTest(0, 0, 0, 1, 1, 1, 2, 1))
run.AssignColumn(partition.GetColID(), smartvectors.ForTest(1, 0, 0, 1, 0, 0, 1, 1))
}
return define, prover
}
func TestPIPModule(t *testing.T) {
define, prover := makeTestCasePIP()
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}

View File

@@ -1,155 +0,0 @@
package dedicated
import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
)
// It checks if spaghettiOfMatrix is a filtered spaghetti of matrix
// by spaghetti form we mean ,
// passing through matrix row by row append the elements indicated by the filters to the spaghettiOfMatrix.
//
// The filter should have a special form over rows; starting with zero ending with ones.
// The query can handle multiple matrices over the same filter.
// Note : query does not check the form of the filter. Thus, it should have been checked before calling the query
func InsertIsSpaghetti(comp *wizard.CompiledIOP, round int, queryName ifaces.QueryID,
matrix [][]ifaces.Column, filter []ifaces.Column, spaghettiOfMatrix []ifaces.Column, spaghettiSize int) {
size := filter[0].Size()
// tags for the filtered element of the matrix
tags := make([]ifaces.Column, len(filter))
// projection of the filter over the spaghettiOfMatrix
projectedFilter := make([]ifaces.Column, len(filter))
// Declare the new columns
spaghettiOfTags := comp.InsertCommit(round,
ifaces.ColIDf("%v_%v", queryName, "TagSpaghetti"), spaghettiSize)
for j := range filter {
projectedFilter[j] = comp.InsertCommit(round,
ifaces.ColIDf("%v_%v_%v", queryName, "FilterSpaghetti", j), spaghettiSize)
tags[j] = comp.InsertCommit(round, ifaces.ColIDf("%v_%v_%v", queryName, "Tags", j), size)
}
// Constraints over the tags; tag increases by one over the filtered elements
for j := 1; j < len(filter); j++ {
// tags[j]-tags[j-1] is 1 if filter[j-1]=1
a := symbolic.Sub(tags[j], tags[j-1])
comp.InsertGlobal(round, ifaces.QueryIDf("%v_%v-%v", queryName, 1, j),
symbolic.Mul(symbolic.Mul(symbolic.Sub(1, a)),
filter[j-1]))
// We have to go to the previous row if filter[j] = 1 and filter[j-1]=0.
// In this case, tags[j]- shift(tags[len(matrix)-1],-1) should be 1.
b := symbolic.Sub(tags[j], column.Shift(tags[len(filter)-1], -1))
expr2 := symbolic.Mul(symbolic.Sub(b, 1), symbolic.Mul(filter[j],
symbolic.Mul(symbolic.Sub(1, filter[j-1]))))
comp.InsertGlobal(round, ifaces.QueryIDf("%v_%v_%v", queryName, 2, j), expr2)
}
// Constraints over spaghettiTags; it increases by 1.
// spaghettiTag - shift(spaghettiTag , -1) = 1
dif := symbolic.Sub(spaghettiOfTags, column.Shift(spaghettiOfTags, -1))
isActive := symbolic.NewConstant(0)
for i := range filter {
isActive = symbolic.Add(isActive, projectedFilter[i])
}
comp.InsertGlobal(round, ifaces.QueryIDf("%v_%v", queryName, "SpaghettiOfTag_Increases"),
symbolic.Mul(symbolic.Sub(dif, 1), isActive))
comp.SubProvers.AppendToInner(round,
func(run *wizard.ProverRuntime) {
assignSpaghetti(run, tags, filter,
spaghettiOfTags, projectedFilter)
},
)
matrices := make([][]ifaces.Column, len(matrix[0]))
for i := range matrix[0] {
for j := range matrix {
matrices[i] = append(matrices[i], matrix[j][i])
}
matrices[i] = append(matrices[i], tags[i])
}
colB := append(spaghettiOfMatrix, spaghettiOfTags)
// project matrix and tags over the spaghetti version
for i := range matrix[0] {
comp.InsertInclusionDoubleConditional(round, ifaces.QueryIDf("%v_LookUp_Matrix-In-Vector_%v", queryName, i),
colB, matrices[i], projectedFilter[i], filter[i])
comp.InsertInclusionDoubleConditional(round, ifaces.QueryIDf("%v_LookUp_Vector-In-Matrix_%v", queryName, i),
matrices[i], colB, filter[i], projectedFilter[i])
}
}
func assignSpaghetti(run *wizard.ProverRuntime, tags, filter []ifaces.Column,
spaghettiOfTags ifaces.Column, spaghettiOfFilters []ifaces.Column) {
witSize := smartvectors.Density(filter[0].GetColAssignment(run))
tagsWit := make([][]field.Element, len(filter))
filtersWit := make([][]field.Element, len(filter))
// populate filter
for i := range filter {
filtersWit[i] = make([]field.Element, witSize)
filtersWit[i] = filter[i].GetColAssignment(run).IntoRegVecSaveAlloc()[:witSize]
}
tag := uint64(1)
// populate tags
for i := range filtersWit[0] {
for j := range filtersWit {
if filtersWit[j][i].Uint64() != 0 {
tagsWit[j] = append(tagsWit[j], field.NewElement(tag))
tag++
} else {
tagsWit[j] = append(tagsWit[j], field.Zero())
}
}
}
for j := range tagsWit {
run.AssignColumn(tags[j].GetColID(), smartvectors.RightZeroPadded(tagsWit[j], filter[0].Size()))
}
var spaghettiOfTagWit []field.Element
spaghettiOfFiltersWit := make([][]field.Element, len(spaghettiOfFilters))
// populate spaghetties
for i := range filtersWit[0] {
for j := range filtersWit {
if filtersWit[j][i].Uint64() == 1 {
spaghettiOfTagWit = append(spaghettiOfTagWit, tagsWit[j][i])
}
for k := range filtersWit {
if k != j && filtersWit[k][i].Uint64() != 0 {
spaghettiOfFiltersWit[j] = append(spaghettiOfFiltersWit[j], field.Zero())
} else if k == j && filtersWit[j][i].Uint64() != 0 {
spaghettiOfFiltersWit[j] = append(spaghettiOfFiltersWit[j], filtersWit[j][i])
}
}
}
}
// assign the columns
run.AssignColumn(spaghettiOfTags.GetColID(), smartvectors.RightZeroPadded(spaghettiOfTagWit, spaghettiOfTags.Size()))
for j := range filter {
run.AssignColumn(spaghettiOfFilters[j].GetColID(), smartvectors.RightZeroPadded(spaghettiOfFiltersWit[j], spaghettiOfTags.Size()))
}
}

View File

@@ -1,123 +0,0 @@
package dedicated
import (
"crypto/rand"
"math/big"
"testing"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/stretchr/testify/assert"
)
func makeTestCaseIsSpaghetti() (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
round := 0
size := 64
spaghettiSize := 128
ncol := 6
nMatrix := 3
matrix := make([][]ifaces.Column, nMatrix)
spaghettiOfMatrix := make([]ifaces.Column, nMatrix)
filter := make([]ifaces.Column, ncol)
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
// declare the columns
for k := range matrix {
matrix[k] = make([]ifaces.Column, ncol)
spaghettiOfMatrix[k] = comp.InsertCommit(round, ifaces.ColIDf("SpaghettiOfMatrix_%v", k), spaghettiSize)
}
for i := range filter {
for k := range matrix {
matrix[k][i] = comp.InsertCommit(round, ifaces.ColIDf("Matrix_%v_%v", k, i), size)
}
filter[i] = comp.InsertCommit(round, ifaces.ColIDf("Filters_%v", i), size)
}
// insert the query
InsertIsSpaghetti(comp, round, ifaces.QueryIDf("IsSpaghetti"),
matrix, filter, spaghettiOfMatrix, spaghettiSize)
}
// assign matrix and filter (spaghettiOfMatrix is assigned by the query itself).
prover = func(run *wizard.ProverRuntime) {
matrixWit := make([][][]field.Element, nMatrix)
filtersWit := make([][]field.Element, ncol)
witSize := 7
for k := range matrix {
matrixWit[k] = make([][]field.Element, ncol)
for j := range filter {
matrixWit[k][j] = make([]field.Element, witSize)
for i := 0; i < witSize; i++ {
nBig, _ := rand.Int(rand.Reader, big.NewInt(int64(1024)))
a := nBig.Uint64()
matrixWit[k][j][i] = field.NewElement(a)
}
}
}
for j := range filter {
filtersWit[j] = make([]field.Element, witSize)
for i := 0; i < witSize; i++ {
// Such a filter has the right form for the query;
// starting with zeroes ending with ones
if i%7 == 0 {
filtersWit[j][i] = field.One()
}
}
}
for j := range filter {
run.AssignColumn(filter[j].GetColID(), smartvectors.RightZeroPadded(filtersWit[j], size))
for k := range matrix {
run.AssignColumn(matrix[k][j].GetColID(), smartvectors.RightZeroPadded(matrixWit[k][j], size))
}
}
for j := range matrix {
spaghetti := makeSpaghetti(filtersWit, matrixWit[j])
run.AssignColumn(spaghettiOfMatrix[j].GetColID(), smartvectors.RightZeroPadded(spaghetti[0], spaghettiSize))
}
}
return define, prover
}
func TestIsSpaghetti(t *testing.T) {
define, prover := makeTestCaseIsSpaghetti()
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}
// It receives multiple matrices and a filter, it returns the spaghetti form of the matrices
func makeSpaghetti(filter [][]field.Element, matrix ...[][]field.Element) (spaghetti [][]field.Element) {
spaghetti = make([][]field.Element, len(matrix))
// populate spaghetties
for i := range filter[0] {
for j := range filter {
if filter[j][i].Uint64() == 1 {
for k := range matrix {
spaghetti[k] = append(spaghetti[k], matrix[k][j][i])
}
}
}
}
return spaghetti
}

View File

@@ -3,13 +3,9 @@ package generic
import (
"bytes"
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/crypto/sha2"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
)
// GenericByteModule encodes the limbs with a left alignment approach
@@ -92,141 +88,6 @@ func NewGenericByteModule(
return res
}
// Implements the trace providing mechanism for the generic byte module.
// Optionally, generate traces for different hashes that might be applied over the limbs.
func (gen *GenericByteModule) AppendTraces(
run *wizard.ProverRuntime,
genTrace *GenTrace,
trace ...interface{},
) {
data := gen.Data
info := gen.Info
// Extract the assignments through a shallow copy.
hashNum := gen.extractCol(run, gen.Data.HashNum)
refCol := gen.Data.HashNum
index := gen.extractCol(run, gen.Data.Index, refCol)
limbs := gen.extractCol(run, gen.Data.Limb, refCol)
nBytes := gen.extractCol(run, gen.Data.NBytes, refCol)
toHash := gen.extractCol(run, data.TO_HASH, refCol)
// if info is not empty, extract it
if info != (GenInfoModule{}) {
hashLo := gen.extractCol(run, info.HashLo)
genTrace.HashLo = hashLo
hashHi := gen.extractCol(run, info.HashHi, info.HashLo)
genTrace.HashHi = hashHi
isHashLo := gen.extractCol(run, info.IsHashLo, info.HashLo)
genTrace.IsHashLo = isHashLo
isHashHi := gen.extractCol(run, info.IsHashHi, info.HashLo)
genTrace.IsHashHi = isHashHi
hashNum_Info := gen.extractCol(run, info.HashNum, info.HashLo)
genTrace.HashNum_Info = hashNum_Info
}
stream := bytes.Buffer{}
streamSha2 := bytes.Buffer{}
limbSerialized := [32]byte{}
one := field.One()
cleanLimb := make([]field.Element, len(hashNum))
for pos := range hashNum {
// Check if the current limb can be appended
if toHash != nil && (toHash[pos] != one) {
continue
}
// Sanity-check, if the index is zero must be equivalent to an empty
// stream
if index[pos].IsZero() != (stream.Len() == 0) && index[pos].IsZero() != (streamSha2.Len() == 0) {
utils.Panic(
"the index==0 should mean an empty stream, index %v, stream.Len() %v\n",
index[pos], stream.Len())
}
// Extract the limb, which is left aligned to the 16-th byte
limbSerialized = limbs[pos].Bytes()
byteSize := nBytes[pos].Uint64()
res := limbSerialized[GEN_LEFT_ALIGNMENT : GEN_LEFT_ALIGNMENT+byteSize]
cleanLimb[pos].SetBytes(res)
stream.Write(
res,
)
streamSha2.Write(
res,
)
// If we are on the last limb or if the hashNum increases, it means
// that we need to close the hash to start the next one
if pos+1 == len(hashNum) || index[pos+1].Uint64() == 0 {
for i := range trace {
switch v := trace[i].(type) {
case *keccak.PermTraces:
{
keccak.Hash(stream.Bytes(), v)
stream.Reset()
}
case *sha2.HashTraces:
{
sha2.Hash(streamSha2.Bytes(), v)
streamSha2.Reset()
}
default:
utils.Panic("other hashes are not supported")
}
}
}
}
genTrace.HashNum = hashNum
genTrace.Index = index
genTrace.Limb = limbs
genTrace.NByte = nBytes
genTrace.CleanLimb = cleanLimb
genTrace.TO_HASH = toHash
}
// Extract a shallow copy of the active zone of a column. Meaning the unpadded
// area where the column encodes actual data.
// The option refCol is used for the sanity check on the length of effective window.
func (gen *GenericByteModule) extractCol(
run *wizard.ProverRuntime,
col ifaces.Column, refCol ...ifaces.Column,
) []field.Element {
// Fetches the smart-vector and delimitate the active zone. Here we assume
// that all the columns are zero-prepended. And have the same length.
col_ := run.Columns.MustGet(col.GetColID())
a := smartvectors.Window(col_)
m := smartvectors.Density(col_)
// sanity check on the effective window; the effective part of column is the part without zero-padding.
if len(refCol) != 0 {
id := refCol[0].GetColID()
refColSV := run.Columns.MustGet(id)
lenRefCol := smartvectors.Density(refColSV)
if m != lenRefCol {
utils.Panic(
"column %v has different effective length from the reference column %v, length %v and %v",
col.GetColID(), id, m, lenRefCol)
}
}
// return the effective part of the column
return a
}
// ScanStream scans the receiver GenDataModule's assignment and returns the list
// of the byte stream encoded in the assignment.
func (gdm *GenDataModule) ScanStreams(run *wizard.ProverRuntime) [][]byte {

View File

@@ -5,6 +5,7 @@ import (
"math/rand"
"github.com/consensys/zkevm-monorepo/prover/backend/files"
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
@@ -13,7 +14,7 @@ import (
// it receives columns hashNum and toHash and generates GenDataModule.
func GenerateAndAssignGenDataModule(run *wizard.ProverRuntime, gdm *generic.GenDataModule,
hashNumInt, toHashInt []int, path ...string) {
hashNumInt, toHashInt []int, flag bool, path ...string) {
var (
size = gdm.Limb.Size()
@@ -45,8 +46,16 @@ func GenerateAndAssignGenDataModule(run *wizard.ProverRuntime, gdm *generic.GenD
toHash[i] = field.NewElement(uint64(toHashInt[i]))
hashNum[i] = field.NewElement(uint64(hashNumInt[i]))
numBytesInt, numBytesF := randNBytes(rng)
nBytes[i] = numBytesF
var numBytesInt int
var numBytesF field.Element
if flag {
numBytesInt, numBytesF = randNBytes(rng)
nBytes[i] = numBytesF
} else {
nBytes[i] = field.NewElement(16)
numBytesInt = 16
}
limbs[i] = randLimbs(rng, numBytesInt)
}
@@ -118,3 +127,65 @@ func CreateGenDataModule(
gbm.TO_HASH = createCol("TO_HASH")
return gbm
}
// CreateGenInfoModule is used for testing, it commits to the [generic.GenInfoModule] columns,
func CreateGenInfoModule(
comp *wizard.CompiledIOP,
name string,
size int,
) (gim generic.GenInfoModule) {
createCol := common.CreateColFn(comp, name, size)
gim.HashHi = createCol("HASH_HI")
gim.HashLo = createCol("HASH_LO")
gim.IsHashHi = createCol("IS_HASH_HI")
gim.IsHashLo = createCol("IS_HASH_LO")
return gim
}
// it embeds the expected hash (for the steam encoded inside gdm) inside gim columns.
func GenerateAndAssignGenInfoModule(
run *wizard.ProverRuntime,
gim *generic.GenInfoModule,
gdm generic.GenDataModule,
isHashHi, isHashLo []int,
) {
var (
hashHi = common.NewVectorBuilder(gim.HashHi)
hashLo = common.NewVectorBuilder(gim.HashLo)
isHashHiCol = common.NewVectorBuilder(gim.IsHashHi)
isHashLoCol = common.NewVectorBuilder(gim.IsHashLo)
)
streams := gdm.ScanStreams(run)
var res [][32]byte
for _, stream := range streams {
res = append(res, keccak.Hash(stream))
}
ctrHi := 0
ctrLo := 0
for i := range isHashHi {
if isHashHi[i] == 1 {
hashHi.PushHi(res[ctrHi])
isHashHiCol.PushInt(1)
ctrHi++
} else {
hashHi.PushInt(0)
isHashHiCol.PushInt(0)
}
if isHashLo[i] == 1 {
hashLo.PushLo(res[ctrLo])
isHashLoCol.PushInt(1)
ctrLo++
} else {
hashLo.PushInt(0)
isHashLoCol.PushInt(0)
}
}
hashHi.PadAndAssign(run)
hashLo.PadAndAssign(run)
isHashHiCol.PadAndAssign(run)
isHashLoCol.PadAndAssign(run)
}

View File

@@ -5,13 +5,13 @@ import (
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
projection "github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/projection"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
commonconstraints "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common/common_constraints"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
)
@@ -22,13 +22,14 @@ const (
type GenericAccumulatorInputs struct {
MaxNumKeccakF int
Providers []generic.GenDataModule
ProvidersData []generic.GenDataModule
ProvidersInfo []generic.GenInfoModule
}
// The sub-module GenericAccumulator filters the data from different [generic.GenDataModule],
// The sub-module GenericDataAccumulator filters the data from different [generic.GenDataModule],
//
// and stitch them together to build a single module.
type GenericAccumulator struct {
type GenericDataAccumulator struct {
Inputs *GenericAccumulatorInputs
// stitching of modules together
Provider generic.GenDataModule
@@ -44,44 +45,30 @@ type GenericAccumulator struct {
}
// It declares the new columns and the constraints among them
func NewGenericAccumulator(comp *wizard.CompiledIOP, inp GenericAccumulatorInputs) *GenericAccumulator {
func NewGenericDataAccumulator(comp *wizard.CompiledIOP, inp GenericAccumulatorInputs) *GenericDataAccumulator {
d := &GenericAccumulator{
d := &GenericDataAccumulator{
size: utils.NextPowerOfTwo(inp.MaxNumKeccakF * blockSize),
Inputs: &inp,
}
// declare the new columns per gbm
d.declareColumns(comp, len(inp.Providers))
d.declareColumns(comp, len(inp.ProvidersData))
// constraints over sFilters
//
// 1. they are binary
//
// 2. they do'nt overlap isActive = \sum sFilters
//
// 3. sFilter[i] starts immediately after sFilters[i-1].
isActive := symbolic.NewConstant(0)
for i := range d.sFilters {
comp.InsertGlobal(0, ifaces.QueryIDf("sFilter_IsBinary_%v", i),
symbolic.Mul(d.sFilters[i], symbolic.Sub(1, d.sFilters[i])))
isActive = symbolic.Add(d.sFilters[i], isActive)
// sFilter[i] starts immediately after sFilters[i-1].
s := sym.NewConstant(0)
for i := 0; i < len(d.sFilters); i++ {
commonconstraints.MustBeActivationColumns(comp, d.sFilters[i], sym.Sub(1, s))
s = sym.Add(s, d.sFilters[i])
}
comp.InsertGlobal(0, ifaces.QueryIDf("sFilters_NoOverlap"), symbolic.Sub(d.IsActive, isActive))
// for constraint 3; over (1-\sum_{j<i} sFilters[j])*isActive we need that,
// sFilters[i] have the form (oneThenZeros) namely, it start from ones followed by zeroes.
s := symbolic.NewConstant(0)
for i := range d.sFilters {
// over (1-s)*isActive, sFilter[i] is oneThenZero
// sFilter[i] is oneThenZero is equivalent with b (in the following) is binary
b := symbolic.Sub(d.sFilters[i], column.Shift(d.sFilters[i], 1)) // should be binary
comp.InsertGlobal(0, ifaces.QueryIDf("IsOne_ThenZero_%v", i),
symbolic.Mul(symbolic.Sub(1, s), d.IsActive, symbolic.Mul(symbolic.Sub(1, b), b)))
s = symbolic.Add(s, d.sFilters[i])
}
comp.InsertGlobal(0, ifaces.QueryIDf("ADDs_UP_TO_IS_ACTIVE_DATA"),
sym.Sub(s, d.IsActive))
// by the constraints over sFilter, and the following, we have that isActive is an Activation column.
commonconstraints.MustBeBinary(comp, d.IsActive)
// projection among providers and stitched module
for i, gbm := range d.Inputs.Providers {
for i, gbm := range d.Inputs.ProvidersData {
projection.InsertProjection(comp, ifaces.QueryIDf("Stitch_Modules_%v", i),
[]ifaces.Column{gbm.HashNum, gbm.Limb, gbm.NBytes, gbm.Index},
@@ -91,20 +78,11 @@ func NewGenericAccumulator(comp *wizard.CompiledIOP, inp GenericAccumulatorInput
)
}
// constraints over isActive
// 1. it is binary
// 2. it is zero followed by ones// constraints over isActive
comp.InsertGlobal(0, ifaces.QueryIDf("IsActive_IsBinary_DataTrace"),
symbolic.Mul(d.IsActive, symbolic.Sub(1, isActive)))
col := symbolic.Sub(column.Shift(d.IsActive, 1), d.IsActive) // should be binary
comp.InsertGlobal(0, ifaces.QueryIDf("IsOneThenZero_DataTrace"),
symbolic.Mul(col, symbolic.Sub(1, col)))
return d
}
// It declares the columns specific to the DataModule
func (d *GenericAccumulator) declareColumns(comp *wizard.CompiledIOP, nbProviders int) {
func (d *GenericDataAccumulator) declareColumns(comp *wizard.CompiledIOP, nbProviders int) {
createCol := common.CreateColFn(comp, GENERIC_ACCUMULATOR, d.size)
d.sFilters = make([]ifaces.Column, nbProviders)
@@ -121,9 +99,9 @@ func (d *GenericAccumulator) declareColumns(comp *wizard.CompiledIOP, nbProvider
}
// It assigns the columns specific to the submodule.
func (d *GenericAccumulator) Run(run *wizard.ProverRuntime) {
func (d *GenericDataAccumulator) Run(run *wizard.ProverRuntime) {
// fetch the gbm witnesses
providers := d.Inputs.Providers
providers := d.Inputs.ProvidersData
asb := make([]assignmentBuilder, len(providers))
for i := range providers {
asb[i].HashNum = providers[i].HashNum.GetColAssignment(run).IntoRegVecSaveAlloc()
@@ -135,7 +113,6 @@ func (d *GenericAccumulator) Run(run *wizard.ProverRuntime) {
sFilters := make([][]field.Element, len(providers))
for i := range providers {
// remember that gt is the providers assignment removing the padded part
filter := asb[i].TO_HASH
// populate sFilters
@@ -156,14 +133,14 @@ func (d *GenericAccumulator) Run(run *wizard.ProverRuntime) {
//assign sFilters
for i := range providers {
run.AssignColumn(d.sFilters[i].GetColID(), smartvectors.LeftZeroPadded(sFilters[i], d.size))
run.AssignColumn(d.sFilters[i].GetColID(), smartvectors.RightZeroPadded(sFilters[i], d.size))
}
// populate and assign isActive
isActive := vector.Repeat(field.One(), len(sFilters[0]))
run.AssignColumn(d.IsActive.GetColID(), smartvectors.LeftZeroPadded(isActive, d.size))
run.AssignColumn(d.IsActive.GetColID(), smartvectors.RightZeroPadded(isActive, d.size))
// populate sModule
// populate Provider
var sHashNum, sLimb, sNBytes, sIndex []field.Element
for i := range providers {
filter := asb[i].TO_HASH
@@ -182,10 +159,10 @@ func (d *GenericAccumulator) Run(run *wizard.ProverRuntime) {
}
}
run.AssignColumn(d.Provider.HashNum.GetColID(), smartvectors.LeftZeroPadded(sHashNum, d.size))
run.AssignColumn(d.Provider.Limb.GetColID(), smartvectors.LeftZeroPadded(sLimb, d.size))
run.AssignColumn(d.Provider.NBytes.GetColID(), smartvectors.LeftZeroPadded(sNBytes, d.size))
run.AssignColumn(d.Provider.Index.GetColID(), smartvectors.LeftZeroPadded(sIndex, d.size))
run.AssignColumn(d.Provider.HashNum.GetColID(), smartvectors.RightZeroPadded(sHashNum, d.size))
run.AssignColumn(d.Provider.Limb.GetColID(), smartvectors.RightZeroPadded(sLimb, d.size))
run.AssignColumn(d.Provider.NBytes.GetColID(), smartvectors.RightZeroPadded(sNBytes, d.size))
run.AssignColumn(d.Provider.Index.GetColID(), smartvectors.RightZeroPadded(sIndex, d.size))
}

View File

@@ -17,7 +17,7 @@ func makeTestCaseDataModule(c []makeTestCase) (
) {
maxNumKeccakF := 8
d := &GenericAccumulator{}
d := &GenericDataAccumulator{}
gdms := make([]generic.GenDataModule, len(c))
define = func(build *wizard.Builder) {
@@ -27,14 +27,14 @@ func makeTestCaseDataModule(c []makeTestCase) (
}
inp := GenericAccumulatorInputs{
Providers: gdms,
ProvidersData: gdms,
MaxNumKeccakF: maxNumKeccakF,
}
d = NewGenericAccumulator(comp, inp)
d = NewGenericDataAccumulator(comp, inp)
}
prover = func(run *wizard.ProverRuntime) {
for i := range gdms {
testdata.GenerateAndAssignGenDataModule(run, &gdms[i], c[i].HashNum, c[i].ToHash)
testdata.GenerateAndAssignGenDataModule(run, &gdms[i], c[i].HashNum, c[i].ToHash, true)
}
d.Run(run)
}

View File

@@ -0,0 +1,169 @@
package gen_acc
import (
"fmt"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
projection "github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/projection"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
commonconstraints "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common/common_constraints"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
)
// The sub-module GenericInfoAccumulator filters the data from different [generic.GenInfoModule],
//
// and stitch them together to build a single module.
type GenericInfoAccumulator struct {
Inputs *GenericAccumulatorInputs
// stitching of modules together
// HashHi and HashLo are over the same row
// isHashHi = isHashLo = IsActive
Provider generic.GenInfoModule
// filter indicating where each original module is located over the stitched one
sFilters []ifaces.Column
// the active part of the stitching module
IsActive ifaces.Column
// max number of rows for the stitched module
size int
}
func NewGenericInfoAccumulator(comp *wizard.CompiledIOP, inp GenericAccumulatorInputs) *GenericInfoAccumulator {
info := &GenericInfoAccumulator{
size: utils.NextPowerOfTwo(inp.MaxNumKeccakF),
Inputs: &inp,
}
// declare columns
info.declareColumns(comp, len(inp.ProvidersInfo))
// sFilter[i] starts immediately after sFilters[i-1].
s := sym.NewConstant(0)
for i := 0; i < len(info.sFilters); i++ {
commonconstraints.MustBeActivationColumns(comp, info.sFilters[i], sym.Sub(1, s))
s = sym.Add(s, info.sFilters[i])
}
comp.InsertGlobal(0, ifaces.QueryIDf("ADDs_UP_TO_IS_ACTIVE_Info"),
sym.Sub(s, info.IsActive))
// by the constraints over sFilter, and the following, we have that isActive is an Activation column.
commonconstraints.MustBeBinary(comp, info.IsActive)
// projection among providers and stitched module
for i, gbm := range info.Inputs.ProvidersInfo {
projection.InsertProjection(comp, ifaces.QueryIDf("Stitch_Modules_Hi_%v", i),
[]ifaces.Column{gbm.HashHi},
[]ifaces.Column{info.Provider.HashHi},
gbm.IsHashHi,
info.sFilters[i],
)
projection.InsertProjection(comp, ifaces.QueryIDf("Stitch_Modules_Lo_%v", i),
[]ifaces.Column{gbm.HashLo},
[]ifaces.Column{info.Provider.HashLo},
gbm.IsHashLo,
info.sFilters[i],
)
}
return info
}
// declare columns
func (info *GenericInfoAccumulator) declareColumns(comp *wizard.CompiledIOP, nbProviders int) {
createCol := common.CreateColFn(comp, GENERIC_ACCUMULATOR, info.size)
info.IsActive = createCol("IsActive_Info")
info.sFilters = make([]ifaces.Column, nbProviders)
for i := 0; i < nbProviders; i++ {
info.sFilters[i] = createCol("sFilterOut_%v", i)
}
info.Provider.HashHi = createCol("Hash_Hi")
info.Provider.HashLo = createCol("Hash_Lo")
info.Provider.IsHashHi = info.IsActive
info.Provider.IsHashLo = info.IsActive
}
func (info *GenericInfoAccumulator) Run(run *wizard.ProverRuntime) {
// fetch the witnesses of gbm
providers := info.Inputs.ProvidersInfo
asb := make([]infoAssignmentBuilder, len(providers))
for i := range providers {
asb[i].hashHi = providers[i].HashHi.GetColAssignment(run).IntoRegVecSaveAlloc()
asb[i].hashLo = providers[i].HashLo.GetColAssignment(run).IntoRegVecSaveAlloc()
asb[i].isHashHi = providers[i].IsHashHi.GetColAssignment(run).IntoRegVecSaveAlloc()
asb[i].isHashLo = providers[i].IsHashLo.GetColAssignment(run).IntoRegVecSaveAlloc()
}
sFilters := make([][]field.Element, len(providers))
for i := range providers {
filter := asb[i].isHashHi
// populate sFilters
for j := range sFilters {
for k := range filter {
if filter[k] == field.One() {
if j == i {
sFilters[j] = append(sFilters[j], field.One())
} else {
sFilters[j] = append(sFilters[j], field.Zero())
}
}
}
}
}
//assign sFilters
for i := range providers {
run.AssignColumn(info.sFilters[i].GetColID(), smartvectors.RightZeroPadded(sFilters[i], info.size))
}
// populate and assign isActive
isActive := vector.Repeat(field.One(), len(sFilters[0]))
run.AssignColumn(info.IsActive.GetColID(), smartvectors.RightZeroPadded(isActive, info.size))
// populate Provider
var sHashHi, sHashLo []field.Element
for i := range providers {
filterHi := asb[i].isHashHi
filterLo := asb[i].isHashLo
hashHi := asb[i].hashHi
hashLo := asb[i].hashLo
for j := range filterHi {
if filterHi[j] == field.One() {
sHashHi = append(sHashHi, hashHi[j])
}
if filterLo[j] == field.One() {
sHashLo = append(sHashLo, hashLo[j])
}
}
}
for i := range sFilters {
fmt.Printf("sFilter[%v] %v\n", i, vector.Prettify(sFilters[i]))
}
run.AssignColumn(info.Provider.HashHi.GetColID(), smartvectors.RightZeroPadded(sHashHi, info.size))
run.AssignColumn(info.Provider.HashLo.GetColID(), smartvectors.RightZeroPadded(sHashLo, info.size))
}
type infoAssignmentBuilder struct {
hashHi, hashLo []field.Element
isHashHi, isHashLo []field.Element
}

View File

@@ -0,0 +1,112 @@
package gen_acc
import (
"testing"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
// It generates Define and Assign function of Data module, for testing
func makeTestCaseInfoModule(c []makeInfoTestCase) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
var (
maxNumKeccakF = 16
d = &GenericInfoAccumulator{}
gdms = make([]generic.GenInfoModule, len(c))
)
define = func(build *wizard.Builder) {
comp := build.CompiledIOP
for i := range gdms {
createCol := common.CreateColFn(comp, "TESTING_INFO_ACCUMULATOR", c[i].Size)
gdms[i] = generic.GenInfoModule{
HashHi: createCol("Hash_Hi_%v", i),
HashLo: createCol("Hash_Lo_%v", i),
IsHashLo: createCol("Is_Hash_Lo_%v", i),
IsHashHi: createCol("Is_Hash_Hi_%v", i),
}
}
inp := GenericAccumulatorInputs{
ProvidersInfo: gdms,
MaxNumKeccakF: maxNumKeccakF,
}
d = NewGenericInfoAccumulator(comp, inp)
}
prover = func(run *wizard.ProverRuntime) {
for i := range gdms {
generateAndAssignGenInfoModule(run, &gdms[i], c[i])
}
d.Run(run)
}
return define, prover
}
func TestInfoModule(t *testing.T) {
define, prover := makeTestCaseInfoModule(infoTestCases)
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}
type makeInfoTestCase struct {
Name string
Size int
HashHi []int
HashLo []int
IsHashHi []int
IsHashLo []int
}
var infoTestCases = []makeInfoTestCase{
{
Name: "GenDataModule1",
Size: 8,
HashHi: []int{17, 19, 1, 3, 2},
HashLo: []int{14, 1, 1, 0, 7},
IsHashHi: []int{1, 0, 1, 1, 0},
IsHashLo: []int{1, 0, 1, 1, 0},
},
{
Name: "GenDataModule2",
Size: 16,
HashHi: []int{1, 89, 1, 1, 6, 1, 2, 3, 90, 3},
HashLo: []int{17, 34, 1, 1, 9, 21, 2, 3, 44, 11},
IsHashHi: []int{1, 0, 1, 0, 1, 1, 1, 1, 0, 0},
IsHashLo: []int{0, 1, 0, 1, 0, 1, 1, 1, 1, 0}, // shift
},
{
Name: "GenDataModule3",
Size: 16,
HashHi: []int{1, 89, 1, 1, 6, 1, 2, 3, 90, 3, 4, 0},
HashLo: []int{1, 89, 1, 1, 6, 1, 2, 3, 90, 3, 4, 0}, // same
IsHashHi: []int{1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0},
IsHashLo: []int{0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1}, // shift
},
}
func generateAndAssignGenInfoModule(run *wizard.ProverRuntime, gbm *generic.GenInfoModule, c makeInfoTestCase) {
hashHi := common.NewVectorBuilder(gbm.HashHi)
hashLo := common.NewVectorBuilder(gbm.HashLo)
isHashHi := common.NewVectorBuilder(gbm.IsHashHi)
isHashLo := common.NewVectorBuilder(gbm.IsHashLo)
for i := range c.HashHi {
hashHi.PushInt(c.HashHi[i])
hashLo.PushInt(c.HashLo[i])
isHashHi.PushInt(c.IsHashHi[i])
isHashLo.PushInt(c.IsHashLo[i])
}
hashHi.PadAndAssign(run)
hashLo.PadAndAssign(run)
isHashHi.PadAndAssign(run)
isHashLo.PadAndAssign(run)
}

View File

@@ -17,6 +17,7 @@ type HashBaseConversionInput struct {
// they are in baseB-LE.
LimbsHiB []ifaces.Column
LimbsLoB []ifaces.Column
IsActive ifaces.Column
MaxNumKeccakF int
Lookup lookUpTables
}
@@ -25,6 +26,8 @@ type hashBaseConversion struct {
Inputs *HashBaseConversionInput
// hash limbs in uint-BE
limbsHi, limbsLo []ifaces.Column
// it indicates the active part of HashHi/HashLo
IsActive ifaces.Column
// the hash result in BE
HashLo, HashHi ifaces.Column
size int
@@ -35,8 +38,9 @@ type hashBaseConversion struct {
func NewHashBaseConversion(comp *wizard.CompiledIOP, inp HashBaseConversionInput) *hashBaseConversion {
h := &hashBaseConversion{
Inputs: &inp,
size: utils.NextPowerOfTwo(inp.MaxNumKeccakF),
Inputs: &inp,
size: utils.NextPowerOfTwo(inp.MaxNumKeccakF),
IsActive: inp.IsActive,
}
// declare the columns
h.DeclareColumns(comp)

View File

@@ -1,125 +0,0 @@
// The keccak package specifies all the mechanism through which the zkevm
// keccaks are proven and extracted from the arithmetization of the zkEVM.
package keccak
import (
"runtime"
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/acc_module"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/datatransfer"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/dedicated"
g "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/keccakf"
)
// A trace provider represents a module capable sending data to the keccak
// module. It does so by appending keccak block traces to a given list. The
// trace provider may access the current Prover runtime to extract information.
type TraceProvider interface {
AppendTraces(run *wizard.ProverRuntime, traces *keccak.PermTraces, genTrace *g.GenTrace)
}
// Module provides the Keccak component of the zkEVM
type Module struct {
Settings *Settings
Keccakf keccakf.Module
DataTransfer datatransfer.Module
dataTrace acc_module.DataModule
infoTrace acc_module.InfoModule
providers []g.GenericByteModule
// for inputs that are slices of bytes
SliceProviders [][]byte
MaxNumKeccakF int
}
// Registers the keccak module module within the zkevm arithmetization
func (m *Module) Define(comp *wizard.CompiledIOP, providers []g.GenericByteModule, nbKeccakF int) {
m.providers = providers
m.MaxNumKeccakF = nbKeccakF
// Pass 0 as the definition round as the present context implies that the
// keccakf module is here for the zkevm.
// unify the data from different providers in a single provider, in a provable way
m.dataTrace.NewDataModule(comp, 0, m.MaxNumKeccakF, m.providers)
// assign the provider to DataTransfer module
m.DataTransfer.Provider = m.dataTrace.Provider
// check the correct dataSerialization of limbs to blocks via dataTransfer module
m.DataTransfer.NewDataTransfer(comp, 0, m.MaxNumKeccakF, 0)
// run keccakF over the blocks, in a provable way
m.Keccakf = keccakf.NewModule(comp, 0, m.MaxNumKeccakF)
// assign the blocks from DataTransfer to keccakF,
// also take the output from keccakF and give it back to DataTransfer
m.CsConnectDataTransferToKeccakF(comp, 0)
// project back the hash results to the providers
m.infoTrace.NewInfoModule(comp, 0, m.MaxNumKeccakF, m.providers, m.DataTransfer.HashOutput, m.dataTrace)
}
// Assigns the keccak module. This module does not require external
// output as everything is readily available from the arithmetization traces.
func (m *Module) AssignKeccak(run *wizard.ProverRuntime) {
// assign the aggregated Provider
m.dataTrace.AssignDataModule(run, m.providers)
// Construct the traces for the aggregated Provider
permTrace := keccak.PermTraces{}
genTrace := g.GenTrace{}
m.DataTransfer.Provider.AppendTraces(run, &genTrace, &permTrace)
// If we have too many permutations, truncate them
limit := m.Keccakf.MaxNumKeccakf
if len(permTrace.Blocks) > limit {
utils.Panic("got too many keccakf. Limit is %v, but received %v", limit, len(permTrace.Blocks))
}
// And manually assign the module from the content of genTrace and permTrace.
m.DataTransfer.AssignModule(run, permTrace, genTrace)
m.Keccakf.Assign(run, permTrace)
m.infoTrace.AssignInfoModule(run, m.providers)
// We empirically found that a forced GC here was improving the runtime.
runtime.GC()
}
// It connect the data-transfer module to the keccakf module via a projection query over the blocks.
func (mod *Module) CsConnectDataTransferToKeccakF(comp *wizard.CompiledIOP, round int) {
dt := mod.DataTransfer
keccakf := mod.Keccakf
// constraints over Data-module (inputs)
var filterIsBlock []ifaces.Column
for j := 0; j < 17; j++ {
filterIsBlock = append(filterIsBlock, keccakf.IO.IsBlcok)
}
spaghettiSize := dt.BaseConversion.LaneX.Size()
dedicated.InsertIsSpaghetti(comp, round, ifaces.QueryIDf("ExportBlocks"),
[][]ifaces.Column{keccakf.Blocks[:]}, filterIsBlock, []ifaces.Column{dt.BaseConversion.LaneX}, spaghettiSize)
// constraints over Info-module (outputs)
// (i.e., from hashOutputSlicesBaseB to hashHiSlices/hashLowSlices)
offSet := 2
for j := range dt.HashOutput.HashLoSlices {
for k := range dt.HashOutput.HashLoSlices[0] {
comp.InsertInclusion(round, ifaces.QueryIDf("BaseConversion_HashOutput_%v_%v", j, k),
[]ifaces.Column{dt.LookUps.ColBaseBDirty, dt.LookUps.ColUint4},
[]ifaces.Column{keccakf.IO.HashOutputSlicesBaseB[j][k], dt.HashOutput.HashHiSlices[j][k]})
comp.InsertInclusion(round, ifaces.QueryIDf("BaseConversion_HashOutput_%v_%v", j+offSet, k),
[]ifaces.Column{dt.LookUps.ColBaseBDirty, dt.LookUps.ColUint4},
[]ifaces.Column{keccakf.IO.HashOutputSlicesBaseB[j+offSet][k], dt.HashOutput.HashLoSlices[j][k]})
}
}
}

View File

@@ -1,6 +1,7 @@
package keccak
import (
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
@@ -8,7 +9,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/base_conversion.go"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/keccakf"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/packing/dedicated/spaghettifier"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/packing/dedicated/spaghettifier"
)
type LaneInfo struct {
@@ -17,8 +18,8 @@ type LaneInfo struct {
IsLaneActive ifaces.Column
}
// CustomizedKeccakInputs stores the inputs required for [NewCustomizedKeccak]
type CustomizedKeccakInputs struct {
// KeccakOverBlockInputs stores the inputs required for [NewKeccakOverBlocks]
type KeccakOverBlockInputs struct {
LaneInfo LaneInfo
MaxNumKeccakF int
@@ -26,11 +27,13 @@ type CustomizedKeccakInputs struct {
Provider [][]byte
}
// CustomizedkeccakHash stores the result of the hash and the [wizard.ProverAction] of all the submodules.
type CustomizedkeccakHash struct {
Inputs *CustomizedKeccakInputs
// KeccakOverBlocks stores the result of the hash and the [wizard.ProverAction] of all the submodules.
type KeccakOverBlocks struct {
Inputs *KeccakOverBlockInputs
HashHi, HashLo ifaces.Column
MaxNumKeccakF int
// it indicates the active part of HashHi/HashLo
IsActive ifaces.Column
MaxNumKeccakF int
// prover actions for internal modules
pa_blockBaseConversion wizard.ProverAction
@@ -39,9 +42,9 @@ type CustomizedkeccakHash struct {
keccakF keccakf.Module
}
// NewCustomizedKeccak implements the utilities for proving keccak hash over the given blocks.
// NewKeccakOverBlocks implements the utilities for proving keccak hash over the given blocks.
// It assumes that the padding and packing of the stream into blocks is done correctly,and thus directly uses the blocks.
func NewCustomizedKeccak(comp *wizard.CompiledIOP, inp CustomizedKeccakInputs) *CustomizedkeccakHash {
func NewKeccakOverBlocks(comp *wizard.CompiledIOP, inp KeccakOverBlockInputs) *KeccakOverBlocks {
var (
maxNumKeccakF = inp.MaxNumKeccakF
lookupBaseConversion = base_conversion.NewLookupTables(comp)
@@ -70,6 +73,7 @@ func NewCustomizedKeccak(comp *wizard.CompiledIOP, inp CustomizedKeccakInputs) *
keccakf.IO.HashOutputSlicesBaseB[2][:],
keccakf.IO.HashOutputSlicesBaseB[3][:]...,
),
IsActive: keccakf.IO.IsActive,
MaxNumKeccakF: maxNumKeccakF,
Lookup: lookupBaseConversion,
}
@@ -93,11 +97,12 @@ func NewCustomizedKeccak(comp *wizard.CompiledIOP, inp CustomizedKeccakInputs) *
)
// set the module
m := &CustomizedkeccakHash{
m := &KeccakOverBlocks{
Inputs: &inp,
MaxNumKeccakF: maxNumKeccakF,
HashHi: bcForHash.HashHi,
HashLo: bcForHash.HashLo,
IsActive: bcForHash.IsActive,
pa_blockBaseConversion: bcForBlock,
keccakF: keccakf,
pa_hashBaseConversion: bcForHash,
@@ -108,12 +113,12 @@ func NewCustomizedKeccak(comp *wizard.CompiledIOP, inp CustomizedKeccakInputs) *
}
// It implements [wizard.ProverAction] for customizedKeccak.
func (m *CustomizedkeccakHash) Run(run *wizard.ProverRuntime) {
func (m *KeccakOverBlocks) Run(run *wizard.ProverRuntime) {
// assign blockBaseConversion
m.pa_blockBaseConversion.Run(run)
// assign keccakF
// first, construct the traces for the accumulated Provider
permTrace := GenerateTrace(m.Inputs.Provider)
permTrace := keccak.GenerateTrace(m.Inputs.Provider)
m.keccakF.Assign(run, permTrace)
// assign HashBaseConversion
m.pa_hashBaseConversion.Run(run)

View File

@@ -4,6 +4,7 @@ import (
"crypto/rand"
"testing"
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
@@ -13,12 +14,12 @@ import (
"github.com/stretchr/testify/assert"
)
// makes Define and Prove function for testing [NewCustomizedKeccak]
// makes Define and Prove function for testing [NewKeccakOverBlocks]
func MakeTestCaseCustomizedKeccak(t *testing.T, providers [][]byte) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
mod := &CustomizedkeccakHash{}
mod := &KeccakOverBlocks{}
maxNumKeccakF := 16
size := utils.NextPowerOfTwo(maxNumKeccakF * generic.KeccakUsecase.NbOfLanesPerBlock())
@@ -26,7 +27,7 @@ func MakeTestCaseCustomizedKeccak(t *testing.T, providers [][]byte) (
comp := builder.CompiledIOP
createCol := common.CreateColFn(comp, "Test_Customized_Keccak", size)
inp := CustomizedKeccakInputs{
inp := KeccakOverBlockInputs{
LaneInfo: LaneInfo{
Lanes: createCol("Lanes"),
IsFirstLaneOfNewHash: createCol("IsFirstLaneOfNewHash"),
@@ -36,7 +37,7 @@ func MakeTestCaseCustomizedKeccak(t *testing.T, providers [][]byte) (
MaxNumKeccakF: maxNumKeccakF,
Provider: providers,
}
mod = NewCustomizedKeccak(comp, inp)
mod = NewKeccakOverBlocks(comp, inp)
}
prover = func(run *wizard.ProverRuntime) {
@@ -45,7 +46,7 @@ func MakeTestCaseCustomizedKeccak(t *testing.T, providers [][]byte) (
mod.Run(run)
// check the hash result
permTrace := GenerateTrace(mod.Inputs.Provider)
permTrace := keccak.GenerateTrace(mod.Inputs.Provider)
hi := mod.HashHi.GetColAssignment(run).IntoRegVecSaveAlloc()
lo := mod.HashLo.GetColAssignment(run).IntoRegVecSaveAlloc()
for i, expectedHash := range permTrace.HashOutPut {

View File

@@ -1,41 +1,39 @@
// The keccak package specifies all the mechanism through which the zkevm
// keccaks are proven and extracted from the arithmetization of the zkEVM.
// The keccak package implements the utilities for proving the hash over a single provider.
// The provider of type [generic.GenericByteModule] encodes the inputs and outputs of hash (related to the same module).
// The inputs and outputs are respectively embedded inside [generic.GenDataModule], and [generic.GenInfoModule].
package keccak
import (
"github.com/consensys/zkevm-monorepo/prover/crypto/keccak"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/projection"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/importpad"
gen_acc "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/acc_module"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/packing"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/packing"
)
const (
numLanesPerBlock = 17
)
// KeccakInput stores the inputs for [NewKeccak]
type KeccakInput struct {
Settings *Settings
Providers []generic.GenDataModule
// KeccakSingleProviderInput stores the inputs for [NewKeccakSingleProvider]
type KeccakSingleProviderInput struct {
MaxNumKeccakF int
Provider generic.GenericByteModule
}
// keccakHash stores the hash result and [wizard.ProverAction] of the submodules.
type keccakHash struct {
Inputs *KeccakInput
// KeccakSingleProvider stores the hash result and [wizard.ProverAction] of the submodules.
type KeccakSingleProvider struct {
Inputs *KeccakSingleProviderInput
HashHi, HashLo ifaces.Column
MaxNumKeccakF int
// indicates the active part of HashHi/HashLo
IsActive ifaces.Column
MaxNumKeccakF int
// prover actions for internal modules
pa_acc wizard.ProverAction
pa_importPad, pa_packing wizard.ProverAction
pa_cKeccak *CustomizedkeccakHash
// the result of genericAccumulator
Provider generic.GenDataModule
pa_cKeccak *KeccakOverBlocks
}
// NewCustomizedKeccak implements the utilities for proving keccak hash
@@ -45,24 +43,16 @@ type keccakHash struct {
// - Padding module to insure the correct padding of the streams.
// - packing module to insure the correct packing of padded-stream into blocks.
// - customizedKeccak to insures the correct hash computation over the given blocks.
func NewKeccak(comp *wizard.CompiledIOP, inp KeccakInput) *keccakHash {
func NewKeccakSingleProvider(comp *wizard.CompiledIOP, inp KeccakSingleProviderInput) *KeccakSingleProvider {
var (
maxNumKeccakF = inp.Settings.MaxNumKeccakf
maxNumKeccakF = inp.MaxNumKeccakF
size = utils.NextPowerOfTwo(maxNumKeccakF * generic.KeccakUsecase.BlockSizeBytes())
inpAcc = gen_acc.GenericAccumulatorInputs{
MaxNumKeccakF: maxNumKeccakF,
Providers: inp.Providers,
}
// unify the data from different providers in a single provider
acc = gen_acc.NewGenericAccumulator(comp, inpAcc)
// apply import and pad
inpImportPadd = importpad.ImportAndPadInputs{
Name: "KECCAK",
Src: generic.GenericByteModule{
Data: acc.Provider,
Data: inp.Provider.Data,
},
PaddingStrategy: generic.KeccakUsecase,
}
@@ -84,7 +74,7 @@ func NewKeccak(comp *wizard.CompiledIOP, inp KeccakInput) *keccakHash {
packing = packing.NewPack(comp, inpPck)
// apply customized keccak over the blocks
cKeccakInp = CustomizedKeccakInputs{
cKeccakInp = KeccakOverBlockInputs{
LaneInfo: LaneInfo{
Lanes: packing.Repacked.Lanes,
IsFirstLaneOfNewHash: packing.Repacked.IsFirstLaneOfNewHash,
@@ -93,36 +83,45 @@ func NewKeccak(comp *wizard.CompiledIOP, inp KeccakInput) *keccakHash {
MaxNumKeccakF: maxNumKeccakF,
}
cKeccak = NewCustomizedKeccak(comp, cKeccakInp)
cKeccak = NewKeccakOverBlocks(comp, cKeccakInp)
)
projection.InsertProjection(comp, "KECCAK_RES_HI",
[]ifaces.Column{cKeccak.HashHi},
[]ifaces.Column{inp.Provider.Info.HashHi},
cKeccak.IsActive,
inp.Provider.Info.IsHashHi,
)
projection.InsertProjection(comp, "KECCAK_RES_LO",
[]ifaces.Column{cKeccak.HashLo},
[]ifaces.Column{inp.Provider.Info.HashLo},
cKeccak.IsActive,
inp.Provider.Info.IsHashLo,
)
// set the module
m := &keccakHash{
m := &KeccakSingleProvider{
Inputs: &inp,
MaxNumKeccakF: maxNumKeccakF,
HashHi: cKeccak.HashHi,
HashLo: cKeccak.HashLo,
pa_acc: acc,
IsActive: cKeccak.IsActive,
pa_importPad: imported,
pa_packing: packing,
pa_cKeccak: cKeccak,
Provider: acc.Provider,
}
return m
}
// It implements [wizard.ProverAction] for keccak.
func (m *keccakHash) Run(run *wizard.ProverRuntime) {
func (m *KeccakSingleProvider) Run(run *wizard.ProverRuntime) {
// assign the genericAccumulator module
m.pa_acc.Run(run)
// assign ImportAndPad module
m.pa_importPad.Run(run)
// assign packing module
m.pa_packing.Run(run)
providerBytes := m.Provider.ScanStreams(run)
providerBytes := m.Inputs.Provider.Data.ScanStreams(run)
m.pa_cKeccak.Inputs.Provider = providerBytes
m.pa_cKeccak.Run(run)
}
@@ -134,11 +133,3 @@ func isBlock(col ifaces.Column) []ifaces.Column {
}
return isBlock
}
// it generates [keccak.PermTraces] from the given stream.
func GenerateTrace(streams [][]byte) (t keccak.PermTraces) {
for _, stream := range streams {
keccak.Hash(stream, &t)
}
return t
}

View File

@@ -0,0 +1,67 @@
package keccak
import (
"testing"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic/testdata"
"github.com/stretchr/testify/assert"
)
func MakeTestCaseKeccak(t *testing.T, c makeTestCaseGBM) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
mod := &KeccakSingleProvider{}
maxNumKeccakF := 12
gdm := generic.GenDataModule{}
gim := generic.GenInfoModule{}
define = func(builder *wizard.Builder) {
comp := builder.CompiledIOP
gdm = testdata.CreateGenDataModule(comp, c.Name, c.SizeData)
gim = testdata.CreateGenInfoModule(comp, c.Name, c.SizeInfo)
inp := KeccakSingleProviderInput{
MaxNumKeccakF: maxNumKeccakF,
Provider: generic.GenericByteModule{
Data: gdm,
Info: gim},
}
mod = NewKeccakSingleProvider(comp, inp)
}
prover = func(run *wizard.ProverRuntime) {
testdata.GenerateAndAssignGenDataModule(run, &gdm, c.HashNum, c.ToHash, true)
// expected hash is embedded inside gim columns.
testdata.GenerateAndAssignGenInfoModule(run, &gim, gdm, c.IsHashHi, c.IsHashLo)
mod.Run(run)
}
return define, prover
}
func TestKeccak(t *testing.T) {
definer, prover := MakeTestCaseKeccak(t, testCasesGBMSingleProvider)
comp := wizard.Compile(definer, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}
var testCasesGBMSingleProvider = makeTestCaseGBM{
Name: "GenDataModule3",
SizeData: 32,
HashNum: []int{1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 4, 4},
ToHash: []int{1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1},
SizeInfo: 8,
IsHashHi: []int{1, 0, 0, 1, 1, 1, 0},
IsHashLo: []int{0, 1, 0, 0, 1, 1, 1}, // shift
}

View File

@@ -1,120 +0,0 @@
package keccak
import (
"fmt"
"sync"
"testing"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/innerproduct"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/lookup"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/permutation"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/specialqueries"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/acc_module"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/datatransfer/datatransfer"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/stretchr/testify/assert"
)
func MakeTestCaseKeccakModule(numProviders int) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
mod := Module{}
round := 0
nbKeccakF := 128
gbmSize := make([]int, numProviders)
def := make([]generic.GenericByteModuleDefinition, numProviders)
gbms := make([]generic.GenericByteModule, numProviders)
def[0] = generic.RLP_ADD
def[1] = generic.SHAKIRA
gbmSize[0] = 512
gbmSize[1] = 128
define = func(builder *wizard.Builder) {
comp := builder.CompiledIOP
for i := range gbms {
gbms[i] = acc_module.CommitGBM(comp, round, def[i], gbmSize[i])
}
mod.Define(comp, gbms, nbKeccakF)
}
prover = func(run *wizard.ProverRuntime) {
for i := range gbms {
acc_module.AssignGBMfromTable(run, &gbms[i], gbmSize[i]-4, gbmSize[i]/7)
}
mod.AssignKeccak(run)
}
return define, prover
}
func TestKeccakModule(t *testing.T) {
definer, prover := MakeTestCaseKeccakModule(2)
comp := wizard.Compile(definer, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}
func BenchmarkKeccakModule(b *testing.B) {
nbKeccakF := []int{
1 << 13,
// 1 << 16,
// 1 << 18,
// 1 << 20,
}
once := &sync.Once{}
for _, numKeccakF := range nbKeccakF {
b.Run(fmt.Sprintf("%v-numKeccakF", numKeccakF), func(b *testing.B) {
define := func(build *wizard.Builder) {
comp := build.CompiledIOP
mod := Module{}
gbm0 := datatransfer.CommitGBM(comp, 0, generic.SHAKIRA, 2)
gbm1 := datatransfer.CommitGBM(comp, 0, generic.RLP_ADD, 2)
gbms := []generic.GenericByteModule{gbm0, gbm1}
mod.Define(comp, gbms, numKeccakF)
}
var (
compiled = wizard.Compile(
define,
specialqueries.RangeProof,
specialqueries.CompileFixedPermutations,
permutation.CompileGrandProduct,
lookup.CompileLogDerivative,
innerproduct.Compile,
)
numCells = 0
numCols = 0
)
for _, colID := range compiled.Columns.AllKeys() {
numCells += compiled.Columns.GetSize(colID)
numCols += 1
}
b.ReportMetric(float64(numCells), "#cells")
b.ReportMetric(float64(numCols), "#columns")
once.Do(func() {
for _, colID := range compiled.Columns.AllKeys() {
fmt.Printf("%v, %v\n", colID, compiled.Columns.GetSize(colID))
}
})
})
}
}

View File

@@ -0,0 +1,103 @@
package keccak
import (
"testing"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic/testdata"
"github.com/stretchr/testify/assert"
)
func MakeTestCaseKeccakZkEVM(t *testing.T, c []makeTestCaseGBM) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
mod := &KeccakZkEVM{}
maxNumKeccakF := 12
gdms := make([]generic.GenDataModule, len(c))
gims := make([]generic.GenInfoModule, len(c))
gbm := make([]generic.GenericByteModule, len(c))
define = func(builder *wizard.Builder) {
comp := builder.CompiledIOP
for i := range gdms {
gdms[i] = testdata.CreateGenDataModule(comp, c[i].Name, c[i].SizeData)
gims[i] = testdata.CreateGenInfoModule(comp, c[i].Name, c[i].SizeInfo)
gbm[i] = generic.GenericByteModule{
Data: gdms[i],
Info: gims[i],
}
}
inp := KeccakZkEVMInput{
Settings: &Settings{
MaxNumKeccakf: maxNumKeccakF,
},
Providers: gbm,
}
mod = NewKeccakZkEVM(comp, inp)
}
prover = func(run *wizard.ProverRuntime) {
for i := range gdms {
testdata.GenerateAndAssignGenDataModule(run, &gdms[i], c[i].HashNum, c[i].ToHash, true)
// expected hash is embedded inside gim columns.
testdata.GenerateAndAssignGenInfoModule(run, &gims[i], gdms[i], c[i].IsHashHi, c[i].IsHashLo)
}
mod.Run(run)
}
return define, prover
}
func TestKeccakZkEVM(t *testing.T) {
definer, prover := MakeTestCaseKeccakZkEVM(t, testCasesGBMMultiProvider)
comp := wizard.Compile(definer, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}
type makeTestCaseGBM struct {
Name string
SizeData int
HashNum []int
ToHash []int
SizeInfo int
IsHashHi []int
IsHashLo []int
}
var testCasesGBMMultiProvider = []makeTestCaseGBM{
{
Name: "GenDataModule1",
SizeData: 8,
HashNum: []int{1, 1, 1, 1, 2},
ToHash: []int{1, 0, 1, 0, 1},
SizeInfo: 4,
IsHashHi: []int{0, 1, 0, 1}, // # ones = number of hash from above
IsHashLo: []int{1, 0, 0, 1},
},
{
Name: "GenDataModule2",
SizeData: 16,
HashNum: []int{1, 1, 1, 1, 1, 1, 2, 3, 3, 3},
ToHash: []int{1, 0, 1, 0, 1, 1, 1, 1, 0, 0},
SizeInfo: 8,
IsHashHi: []int{0, 1, 1, 0, 0, 1},
IsHashLo: []int{0, 1, 1, 0, 0, 1}, // same
},
{
Name: "GenDataModule3",
SizeData: 32,
HashNum: []int{1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 4, 4},
ToHash: []int{1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1},
SizeInfo: 8,
IsHashHi: []int{1, 0, 0, 1, 1, 1, 0},
IsHashLo: []int{0, 1, 0, 0, 1, 1, 1}, // shift
},
}

View File

@@ -0,0 +1,73 @@
// The keccak package accumulates the providers from different zkEVM modules,
//
// and proves the hash consistency over the unified provider.
//
// The provider encodes the inputs and outputs of the hash from different modules.
package keccak
import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
gen_acc "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/acc_module"
)
type KeccakZkEVMInput struct {
Settings *Settings
// the list of providers, encoding inputs and outputs of hash
Providers []generic.GenericByteModule
}
type KeccakZkEVM struct {
Settings *Settings
// The [wizard.ProverAction] for submodules.
pa_accData wizard.ProverAction
pa_accInfo wizard.ProverAction
pa_keccak wizard.ProverAction
}
func NewKeccakZkEVM(comp *wizard.CompiledIOP, inp KeccakZkEVMInput) *KeccakZkEVM {
// create the list of [generic.GenDataModule] and [generic.GenInfoModule]
var gdm []generic.GenDataModule
var gim []generic.GenInfoModule
for i := range inp.Providers {
gdm = append(gdm, inp.Providers[i].Data)
gim = append(gim, inp.Providers[i].Info)
}
var (
inpAcc = gen_acc.GenericAccumulatorInputs{
MaxNumKeccakF: inp.Settings.MaxNumKeccakf,
ProvidersData: gdm,
ProvidersInfo: gim,
}
// unify the data from different providers in a single provider
accData = gen_acc.NewGenericDataAccumulator(comp, inpAcc)
// unify the info from different providers in a single provider
accInfo = gen_acc.NewGenericInfoAccumulator(comp, inpAcc)
keccakInp = KeccakSingleProviderInput{
Provider: generic.GenericByteModule{
Data: accData.Provider,
Info: accInfo.Provider,
},
MaxNumKeccakF: inp.Settings.MaxNumKeccakf,
}
keccak = NewKeccakSingleProvider(comp, keccakInp)
)
res := &KeccakZkEVM{
pa_accData: accData,
pa_accInfo: accInfo,
pa_keccak: keccak}
return res
}
func (k *KeccakZkEVM) Run(run *wizard.ProverRuntime) {
k.pa_accData.Run(run)
k.pa_accInfo.Run(run)
k.pa_keccak.Run(run)
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
)
const (
@@ -32,12 +33,13 @@ type InputOutput struct {
//Sum of IsBlockBaseB and IsFirstBlock
IsBlcok ifaces.Column
// shifted version of (the effective part of) isFirstBlock.
IsHashOutPut ifaces.Column
PiChiIota piChiIota
HashOutputSlicesBaseB [numLanesInHashOutPut][numSlices]ifaces.Column
// active part of HashOutputSlicesBaseB
IsActive ifaces.Column
}
/*
@@ -108,7 +110,7 @@ func (io *InputOutput) newOutput(comp *wizard.CompiledIOP, round, maxNumKeccakF
// It declares the columns specific to the submodule.
func (io *InputOutput) declareColumnsInput(comp *wizard.CompiledIOP, round, maxNumKeccakF int) {
size := numRows(maxNumKeccakF)
io.IsHashOutPut = comp.InsertCommit(round, deriveName("IS_FIRST_BLOCK_SHIFTED"), size)
io.IsHashOutPut = comp.InsertCommit(round, deriveName("IS_HASH_OUTPUT"), size)
io.IsFirstBlock = comp.InsertCommit(round, deriveName("IS_FIRST_BLOCK"), size)
io.IsBlockBaseB = comp.InsertCommit(round, deriveName("IS_BLOCK_BaseB"), size)
@@ -124,6 +126,8 @@ func (io *InputOutput) declareColumnsOutput(comp *wizard.CompiledIOP, round, max
ifaces.ColIDf("HashOutPut_SlicesBaseB_%v_%v", j, k), hashOutputSize)
}
}
io.IsActive = comp.InsertCommit(round, ifaces.ColIDf("HASH_IS_ACTIVE"), hashOutputSize)
}
// Constraints over the blocks of the message;
@@ -304,4 +308,14 @@ func (io *InputOutput) assignHashOutPut(run *wizard.ProverRuntime) {
run.AssignColumn(io.HashOutputSlicesBaseB[j][k].GetColID(), smartvectors.RightZeroPadded(hashOutput, sizeHashOutput))
}
}
isActive := common.NewVectorBuilder(io.IsActive)
//populate is active
for i := range isHashOutput {
if isHashOutput[i].IsOne() {
isActive.PushInt(1)
}
}
isActive.PadAndAssign(run)
}

View File

@@ -1,98 +0,0 @@
package keccak
import (
"testing"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic/testdata"
"github.com/stretchr/testify/assert"
)
func MakeTestCaseKeccak(t *testing.T, c []makeTestCase) (
define wizard.DefineFunc,
prover wizard.ProverStep,
) {
mod := &keccakHash{}
maxNumKeccakF := 12
gdms := make([]generic.GenDataModule, len(c))
define = func(builder *wizard.Builder) {
comp := builder.CompiledIOP
for i := range gdms {
gdms[i] = testdata.CreateGenDataModule(comp, c[i].Name, c[i].Size)
}
inp := KeccakInput{
Settings: &Settings{
MaxNumKeccakf: maxNumKeccakF,
},
Providers: gdms,
}
mod = NewKeccak(comp, inp)
}
prover = func(run *wizard.ProverRuntime) {
for i := range gdms {
testdata.GenerateAndAssignGenDataModule(run, &gdms[i], c[i].HashNum, c[i].ToHash)
}
mod.Run(run)
// check the hash result
permTrace := GenerateTrace(mod.Provider.ScanStreams(run))
hi := mod.HashHi.GetColAssignment(run).IntoRegVecSaveAlloc()
lo := mod.HashLo.GetColAssignment(run).IntoRegVecSaveAlloc()
for i, expectedHash := range permTrace.HashOutPut {
// hashHi := hash[:16] , hashLo := hash[16:]
gotHashHi := hi[i].Bytes()
gotHashLo := lo[i].Bytes()
assert.Equal(t, expectedHash[:16], gotHashHi[16:])
assert.Equal(t, expectedHash[16:], gotHashLo[16:])
}
for i := len(permTrace.HashOutPut); i < len(hi); i++ {
assert.Equal(t, field.Zero(), hi[i])
assert.Equal(t, field.Zero(), lo[i])
}
}
return define, prover
}
func TestKeccak(t *testing.T) {
definer, prover := MakeTestCaseKeccak(t, testCases)
comp := wizard.Compile(definer, dummy.Compile)
proof := wizard.Prove(comp, prover)
assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof")
}
type makeTestCase struct {
Name string
Size int
HashNum []int
ToHash []int
}
var testCases = []makeTestCase{
{
Name: "GenDataModule1",
Size: 8,
HashNum: []int{1, 1, 1, 1, 2},
ToHash: []int{1, 0, 1, 0, 1},
},
{
Name: "GenDataModule2",
Size: 16,
HashNum: []int{1, 1, 1, 1, 1, 1, 2, 3, 3, 3},
ToHash: []int{1, 0, 1, 0, 1, 1, 1, 1, 0, 0},
},
{
Name: "GenDataModule3",
Size: 32,
HashNum: []int{1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 4, 4},
ToHash: []int{1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1},
},
}

View File

@@ -13,7 +13,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
commonconstraints "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common/common_constraints"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/packing/dedicated"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/packing/dedicated"
)
// It stores the inputs for [newDecomposition] function.

View File

@@ -11,7 +11,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
commonconstraints "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common/common_constraints"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/packing/dedicated"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/packing/dedicated"
)
// laneRepackingInputs collects the inputs of the [newLane] function.

View File

@@ -8,7 +8,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/packing/dedicated/spaghettifier"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/packing/dedicated/spaghettifier"
)
const (

View File

@@ -4,7 +4,6 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/serialization"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/arithmetization"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/publicInput"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/statemanager"
@@ -16,7 +15,7 @@ type ZkEvm struct {
// process.
arithmetization arithmetization.Arithmetization
// Keccak module in use. Generated during the compilation process.
keccak keccak.Module
keccak keccak.KeccakZkEVM
// State manager module in use. Generated during the compilation process.
stateManager statemanager.StateManagerLegacy
@@ -42,7 +41,7 @@ func NewZkEVM(
stateManager: statemanager.StateManagerLegacy{
Settings: &settings.Statemanager,
},
keccak: keccak.Module{
keccak: keccak.KeccakZkEVM{
Settings: &settings.Keccak,
},
}
@@ -87,9 +86,11 @@ func (z *ZkEvm) define(b *wizard.Builder) {
// If the keccak module is enabled, set the module.
if z.keccak.Settings.Enabled {
var providers []generic.GenericByteModule
nbKeccakF := z.keccak.Settings.MaxNumKeccakf
z.keccak.Define(b.CompiledIOP, providers, nbKeccakF)
keccakInp := keccak.KeccakZkEVMInput{
Settings: z.keccak.Settings,
// list of modules for integration
}
z.keccak = *keccak.NewKeccakZkEVM(b.CompiledIOP, keccakInp)
}
}
@@ -110,7 +111,7 @@ func (z *ZkEvm) prove(input *Witness) (prover wizard.ProverStep) {
// Assign the Keccak module
if z.keccak.Settings.Enabled {
z.keccak.AssignKeccak(run)
z.keccak.Run(run)
}
}
}