Prover/beta v1 integration (#3813)

* disconnect the hub from the actual arithmetization
* adds the missing columns from the arithmetization
* fix invalid bound constraint for ecdsa
* adds the settings for all the modules
* deduplicating the columns names between sha2.packing and keccak.packing
* adjust the number of inputs for ecadd and ecmul
* sanity-checks against creating giant columns and giant range-checks
* more informative message about failing to split a global constraint
* fixing a mistake in the multi-limb cmp
* splitter: adding support for periodic sample with larger period
* ecpair: adjust the number of G2 check per circuit
* fix: generate missing twiddles for specific sis instances
* optime the FromAccessor verifier col to make it lighter
* fix: make all fields of ConstCol public
* serialization: add checks to panic when entering a private field
* factorization: remove false edge-case
* added logs for the artefact writing
* lookup: fix ensuring that the zCatalog are processed in deterministic order
* corset.go: remove deadcode
* artefacts: fix the file existence check in wizard.Artefact
* codehash: fix state-manager assignment code hash
* adds sha2 circuit
* ecdsa: fix a few bugs in the assignment part
* implements the tx-signature getter
* config: fix maximal value for the log level
* remove-me: making debugging simpler
* fixup + add some more informations
* batch-interpolate: fix the batch interpolation algorithm
* add more logs in the vortex prover
* fixup: remove the test acceleration
* vector builder: more friendly-message repush last on top of an empty builder
* public input: set the name in the settings automatically
* sha2: address the case where sha2 is never called
* keccak: remove trailing printf
* fix: sha2 failing test

* lookup: fix error message on lookup prover

* const-col: simplify the return of GetColAssignmentAt

* lane: address case where sha2 is never called

* mimccodehash: address the case where the codehash is empty

* multi-limbs: fix incorrect range query

* storage-peek: adjust the storage key decomposition to use less limbs

* fixup: remove trailing printf

* fixup: (from accessors) fix invalid getterAt

* more informative message for the projection queries

* log (dummy verifier): add a log when a query fails

* fix(nbZeroes): address the case where the number of inputs is zero

* fix(sha2): address the case where sha2 is not called

* fix(state-summary): address the fact that read|readzero follow a different pattern of account traces

* fix(state-summary): change the ordering in the state-summary

* fix(mimcCodeHash): reverse the direction of the lookup

* chores(inclusion): make the error more comprehensive by listing all the missing rows

* feat(state-manager): adds constraints for ensuring writes happen after reads

* feat(compressor): revert to the old way of hashing

* fix: comment out the checks that are pertaining to assessed issues in the arithmetization

* chores(projection): sends more insightful logs in the projection query

* style(ecdsa): remove nbTxs from assignTxSignature

* fix(vortex): adds a missing column ignoral

* adding new SIS parameters to the test

* feat(vortex): change the default SIS parameters

* chores(fs): skip the fiat-shamir update when running in dummy.Compile

* fix(vortex): bug when handling shadow columns

* fix(gnark-interpolation): better handling of constant terms

* fix(public-input): fix wrong query addressing

* fix(execution): change the structure of the circuit builder

* fixup

* feat(setup): change the ordering of the circuit generation

* fix(execution): allocate explicitly the L2 message hashes

* fixup

* fixup: remove debugging line

* fix(s3): change the line in the makefile to use include/exclude

* fix(circuit): fix incorrect assignment function

* fix(execution): correct the FinalBlockNumber assignment

* fix(execution): adjust the hashing of the L2 message hashes

* fix the assignment of modexp

* rm gitignore
This commit is contained in:
AlexandreBelling
2024-08-20 22:07:36 +02:00
committed by GitHub
parent f67f850cbc
commit d261807d61
88 changed files with 1231 additions and 424 deletions

View File

@@ -65,7 +65,7 @@ copy-prover-assets:
aws s3 sync --exclude "*prover/dev*" --exclude "*05b9ef1*" --exclude "*05b9ef1*" --exclude "*96e3a19*" --exclude "*a9e4681*" prover-assets s3://zk-uat-prover/prover-assets/ --profile=zk-uat-s3-access
download-srs:
aws s3 sync s3://gnark-ignition/with_lines/*.memdump ./prover-assets/kzgsrs/
aws s3 sync s3://gnark-ignition/with_lines/ ./prover-assets/kzgsrs --exclude "*" --include "*.memdump"
###
### Controller

View File

@@ -2,6 +2,7 @@ package execution
import (
"bytes"
"path"
"github.com/consensys/zkevm-monorepo/prover/backend/ethereum"
"github.com/consensys/zkevm-monorepo/prover/backend/execution/bridge"
@@ -144,18 +145,22 @@ func inspectStateManagerTraces(
func (req *Request) collectSignatures() map[[32]byte]ethereum.Signature {
res := map[[32]byte]ethereum.Signature{}
blocks := req.Blocks()
var (
res = map[[32]byte]ethereum.Signature{}
blocks = req.Blocks()
currTx = 0
)
for i := range blocks {
for _, tx := range blocks[i].Transactions() {
var (
txHash = [32]byte(tx.Hash())
txHash = ethereum.GetTxHash(tx)
txSignature = ethereum.GetJsonSignature(tx)
)
res[txHash] = txSignature
currTx++
}
}
@@ -176,7 +181,7 @@ func (rsp *Response) FuncInput() *execution.FunctionalPublicInput {
MaxNbL2MessageHashes: rsp.MaxNbL2MessageHashes,
ChainID: uint64(rsp.ChainID),
FinalBlockTimestamp: lastBlock.TimeStamp,
FinalBlockNumber: uint64(rsp.FirstBlockNumber + len(rsp.BlocksData)),
FinalBlockNumber: uint64(rsp.FirstBlockNumber + len(rsp.BlocksData) - 1),
InitialBlockTimestamp: firstBlock.TimeStamp,
InitialBlockNumber: uint64(rsp.FirstBlockNumber),
DataChecksum: rsp.ExecDataChecksum,
@@ -204,7 +209,7 @@ func (rsp *Response) FuncInput() *execution.FunctionalPublicInput {
func NewWitness(cfg *config.Config, req *Request, rsp *Response) *Witness {
return &Witness{
ZkEVM: &zkevm.Witness{
ExecTracesFPath: req.ConflatedExecutionTracesFile,
ExecTracesFPath: path.Join(cfg.Execution.ConflatedTracesDir, req.ConflatedExecutionTracesFile),
SMTraces: req.StateManagerTraces(),
TxSignatures: req.collectSignatures(),
L2BridgeAddress: cfg.Layer2.MsgSvcContract,

View File

@@ -235,9 +235,11 @@ func CheckBatchesSums(api frontend.API, hasher snarkHash.FieldHasher, nbBatches
api.AssertIsEqual(batchI, nbBatches) // check that we're actually done
// Ensure the computed partial sums match
// hash along the lengths and compare with expected values
for i := range batchEnds {
batchesRange.AssertEqualI(i, expectedChecksums[i], partialSums[i])
hasher.Reset()
hasher.Write(batchLengths[i], partialSums[i])
batchesRange.AssertEqualI(i, expectedChecksums[i], hasher.Sum())
}
return nil

View File

@@ -5,27 +5,28 @@ import (
"github.com/consensys/gnark/constraint"
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/frontend/cs/scs"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/gnark/profile"
"github.com/consensys/zkevm-monorepo/prover/zkevm"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/publicInput"
)
type builder struct {
comp *wizard.CompiledIOP
extractor *publicInput.FunctionalInputExtractor
zkevm *zkevm.ZkEvm
}
func NewBuilder(z *zkevm.ZkEvm) *builder {
return &builder{comp: z.WizardIOP, extractor: &z.PublicInput.Extractor}
return &builder{zkevm: z}
}
func (b *builder) Compile() (constraint.ConstraintSystem, error) {
return makeCS(b.comp, b.extractor), nil
return makeCS(b.zkevm), nil
}
// builds the circuit
func makeCS(comp *wizard.CompiledIOP, ext *publicInput.FunctionalInputExtractor) constraint.ConstraintSystem {
circuit := Allocate(comp, ext)
func makeCS(z *zkevm.ZkEvm) constraint.ConstraintSystem {
circuit := Allocate(z)
pro := profile.Start(profile.WithPath("./profiling-execution.pprof"))
defer pro.Stop()
scs, err := frontend.Compile(fr.Modulus(), scs.NewBuilder, &circuit, frontend.WithCapacity(1<<24))
if err != nil {

View File

@@ -10,6 +10,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/circuits"
"github.com/consensys/zkevm-monorepo/prover/crypto/mimc/gkrmimc"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/publicInput"
"github.com/sirupsen/logrus"
@@ -20,29 +21,38 @@ import (
// CircuitExecution for the outer-proof
type CircuitExecution struct {
// The wizard verifier circuit
WizardVerifier wizard.WizardVerifierCircuit
WizardVerifier wizard.WizardVerifierCircuit `gnark:",secret"`
// The extractor is not part of the circuit per se, but hold informations
// that is used to extract the public inputs from the the WizardVerifier.
// The extractor only needs to be provided during the definition of the
// circuit and is omitted during the assignment of the circuit.
extractor publicInput.FunctionalInputExtractor
extractor publicInput.FunctionalInputExtractor `gnark:"-"`
// The functional public inputs are the "actual" statement made by the
// circuit. They are not part of the public input of the circuit for
// a number of reasons involving efficiency and simplicity in the aggregation
// process. What is the public input is their hash.
FuncInputs FunctionalPublicInputSnark
FuncInputs FunctionalPublicInputSnark `gnark:",secret"`
// The public input of the proof
PublicInput frontend.Variable `gnark:",public"`
}
// Allocates the outer-proof circuit
func Allocate(comp *wizard.CompiledIOP, piExtractor *publicInput.FunctionalInputExtractor) CircuitExecution {
wverifier, err := wizard.AllocateWizardCircuit(comp)
func Allocate(zkevm *zkevm.ZkEvm) CircuitExecution {
wverifier, err := wizard.AllocateWizardCircuit(zkevm.WizardIOP)
if err != nil {
panic(err)
}
return CircuitExecution{
WizardVerifier: *wverifier,
extractor: zkevm.PublicInput.Extractor,
FuncInputs: FunctionalPublicInputSnark{
FunctionalPublicInputQSnark: FunctionalPublicInputQSnark{
L2MessageHashes: NewL2MessageHashes(
[][32]frontend.Variable{},
zkevm.Limits().BlockL2L1Logs,
),
},
},
}
}
@@ -52,7 +62,6 @@ func assign(
proof wizard.Proof,
funcInputs FunctionalPublicInput,
) CircuitExecution {
wizardVerifier := wizard.GetWizardVerifierCircuitAssignment(comp, proof)
return CircuitExecution{
WizardVerifier: *wizardVerifier,

View File

@@ -15,10 +15,11 @@ import (
"github.com/consensys/zkevm-monorepo/prover/utils/types"
)
// FunctionalPublicInputQSnark the information on this execution that cannot be extracted from other input in the same aggregation batch
// FunctionalPublicInputQSnark the information on this execution that cannot be
// extracted from other input in the same aggregation batch
type FunctionalPublicInputQSnark struct {
DataChecksum frontend.Variable
L2MessageHashes internal.Var32Slice
L2MessageHashes L2MessageHashes
FinalStateRootHash frontend.Variable
FinalBlockNumber frontend.Variable
FinalBlockTimestamp frontend.Variable
@@ -26,6 +27,66 @@ type FunctionalPublicInputQSnark struct {
FinalRollingHashNumber frontend.Variable
}
// L2MessageHashes is a wrapper for [Var32Slice] it is use to instantiate the
// sequence of L2MessageHash that we extract from the arithmetization. The
// reason we need a wrapper here is because we hash the L2MessageHashes in a
// specific way.
type L2MessageHashes internal.Var32Slice
// NewL2MessageHashes constructs a new var slice
func NewL2MessageHashes(v [][32]frontend.Variable, max int) L2MessageHashes {
return L2MessageHashes(internal.NewSliceOf32Array(v, max))
}
// CheckSum returns the hash of the [L2MessageHashes]. The encoding is done as
// follows:
//
// - each L2 hash is decomposed in a hi and lo part: each over 16 bytes
// - they are sequentially hashed in the following order: (hi_0, lo_0, hi_1, lo_1 ...)
//
// The function also performs a consistency check to ensure that the length of
// the slice if consistent with the number of non-zero elements. And the function
// also ensures that the non-zero elements are all packed at the beginning of the
// struct. The function returns zero if the slice encodes zero message hashes
// (this is what happens if no L2 message events are emitted during the present
// execution frame).
//
// @alex: it would be nice to make that function compatible with the GKR hasher
// factory though in practice this function will only create 32 calls to the
// MiMC permutation which makes it a non-issue.
func (l *L2MessageHashes) CheckSumMiMC(api frontend.API) frontend.Variable {
var (
// sumIsUsed is used to count the number of non-zero hashes that we
// found in l. It is to be tested against l.Length.
sumIsUsed = frontend.Variable(0)
res = frontend.Variable(0)
)
for i := range l.Values {
var (
hi = internal.Pack(api, l.Values[i][:16], 128, 8)[0]
lo = internal.Pack(api, l.Values[i][16:], 128, 8)[0]
isUsed = api.Sub(
1,
api.Mul(
api.IsZero(hi),
api.IsZero(lo),
),
)
)
tmpRes := mimc.GnarkBlockCompression(api, res, hi)
tmpRes = mimc.GnarkBlockCompression(api, tmpRes, lo)
res = api.Select(isUsed, tmpRes, res)
sumIsUsed = api.Add(sumIsUsed, isUsed)
}
api.AssertIsEqual(sumIsUsed, l.Length)
return res
}
type FunctionalPublicInputSnark struct {
FunctionalPublicInputQSnark
InitialStateRootHash frontend.Variable
@@ -70,15 +131,12 @@ func (pi *FunctionalPublicInputQSnark) RangeCheck(api frontend.API) {
}
func (pi *FunctionalPublicInputSnark) Sum(api frontend.API, hsh gnarkHash.FieldHasher) frontend.Variable {
finalRollingHash := internal.CombineBytesIntoElements(api, pi.FinalRollingHash)
initialRollingHash := internal.CombineBytesIntoElements(api, pi.InitialRollingHash)
hsh.Reset()
for _, v := range pi.L2MessageHashes.Values { // it has to be zero padded
vc := internal.CombineBytesIntoElements(api, v)
hsh.Write(vc[0], vc[1])
}
l2MessagesSum := hsh.Sum()
var (
finalRollingHash = internal.CombineBytesIntoElements(api, pi.FinalRollingHash)
initialRollingHash = internal.CombineBytesIntoElements(api, pi.InitialRollingHash)
l2MessagesSum = pi.L2MessageHashes.CheckSumMiMC(api)
)
hsh.Reset()
hsh.Write(pi.DataChecksum, l2MessagesSum,
@@ -93,7 +151,7 @@ func (pi *FunctionalPublicInput) ToSnarkType() FunctionalPublicInputSnark {
res := FunctionalPublicInputSnark{
FunctionalPublicInputQSnark: FunctionalPublicInputQSnark{
DataChecksum: slices.Clone(pi.DataChecksum[:]),
L2MessageHashes: internal.NewSliceOf32Array(pi.L2MessageHashes, pi.MaxNbL2MessageHashes),
L2MessageHashes: L2MessageHashes(internal.NewSliceOf32Array(pi.L2MessageHashes, pi.MaxNbL2MessageHashes)),
FinalStateRootHash: slices.Clone(pi.FinalStateRootHash[:]),
FinalBlockNumber: pi.FinalBlockNumber,
FinalBlockTimestamp: pi.FinalBlockTimestamp,
@@ -115,19 +173,10 @@ func (pi *FunctionalPublicInput) ToSnarkType() FunctionalPublicInputSnark {
func (pi *FunctionalPublicInput) Sum() []byte { // all mimc; no need to provide a keccak hasher
hsh := mimc.NewMiMC()
var zero [1]byte
for i := range pi.L2MessageHashes {
hsh.Write(pi.L2MessageHashes[i][:16])
hsh.Write(pi.L2MessageHashes[i][16:])
}
nbZeros := pi.MaxNbL2MessageHashes - len(pi.L2MessageHashes)
if nbZeros < 0 {
panic("too many L2 messages")
}
for i := 0; i < nbZeros; i++ {
hsh.Write(zero[:])
hsh.Write(zero[:])
}
l2MessagesSum := hsh.Sum(nil)
hsh.Reset()

View File

@@ -22,22 +22,23 @@ func checkPublicInputs(
var (
finalRollingHash = internal.CombineBytesIntoElements(api, gnarkFuncInp.FinalRollingHash)
initialRollingHash = internal.CombineBytesIntoElements(api, gnarkFuncInp.InitialRollingHash)
execDataHash = execDataHash(api, wvc, wizardFuncInp)
)
hsh, err := mimc.NewMiMC(api)
if err != nil {
panic(err)
}
hsh.Write(wvc.GetLocalPointEvalParams(wizardFuncInp.DataNbBytes.ID).Y, wvc.GetLocalPointEvalParams(wizardFuncInp.DataChecksum.ID).Y)
api.AssertIsEqual(hsh.Sum(), gnarkFuncInp.DataChecksum)
// As we have this issue, the execDataHash will not match what we have in the
// functional input (the txnrlp is incorrect). It should be converted into
// an [api.AssertIsEqual] once this is resolved.
//
// https://github.com/Consensys/zkevm-monorepo/issues/3801
//
shouldBeEqual(api, execDataHash, gnarkFuncInp.DataChecksum)
api.AssertIsEqual(
wvc.GetLocalPointEvalParams(wizardFuncInp.L2MessageHash.ID).Y,
// TODO: this operation is done a second time when computing the final
// public input which is wasteful although not dramatic (~8000 unused
// constraints)
gnarkFuncInp.L2MessageHashes.Checksum(api),
gnarkFuncInp.L2MessageHashes.CheckSumMiMC(api),
)
api.AssertIsEqual(
@@ -100,14 +101,11 @@ func checkPublicInputs(
gnarkFuncInp.FinalRollingHashNumber,
)
api.AssertIsEqual(
wvc.GetLocalPointEvalParams(wizardFuncInp.ChainID.ID).Y,
gnarkFuncInp.ChainID,
)
var (
twoPow128 = new(big.Int).SetInt64(1)
twoPow112 = new(big.Int).SetInt64(1)
_ = twoPow128.Lsh(twoPow128, 128)
_ = twoPow112.Lsh(twoPow112, 112)
bridgeAddress = api.Add(
api.Mul(
twoPow128,
@@ -117,6 +115,43 @@ func checkPublicInputs(
)
)
api.AssertIsEqual(
api.Div(
wvc.GetLocalPointEvalParams(wizardFuncInp.ChainID.ID).Y,
twoPow112,
),
gnarkFuncInp.ChainID,
)
api.AssertIsEqual(bridgeAddress, gnarkFuncInp.L2MessageServiceAddr)
}
// execDataHash hash the execution-data with its length so that we can guard
// against padding attack (although the padding attacks are not possible to
// being with due to the encoding of the plaintext)
func execDataHash(
api frontend.API,
wvc *wizard.WizardVerifierCircuit,
wFuncInp publicInput.FunctionalInputExtractor,
) frontend.Variable {
hsh, err := mimc.NewMiMC(api)
if err != nil {
panic(err)
}
hsh.Write(
wvc.GetLocalPointEvalParams(wFuncInp.DataNbBytes.ID).Y,
wvc.GetLocalPointEvalParams(wFuncInp.DataChecksum.ID).Y,
)
return hsh.Sum()
}
// shouldBeEqual is a placeholder dummy function that generate fake constraints
// as a replacement for what should be an api.AssertIsEqual. If we just commented
// out the api.AssertIsEqual we might have an unconstrained variable.
func shouldBeEqual(api frontend.API, a, b frontend.Variable) {
_ = api.Sub(a, b)
}

View File

@@ -4,12 +4,13 @@ import (
"context"
"crypto/sha256"
"fmt"
pi_interconnection "github.com/consensys/zkevm-monorepo/prover/circuits/pi-interconnection"
"io"
"os"
"path/filepath"
"strings"
pi_interconnection "github.com/consensys/zkevm-monorepo/prover/circuits/pi-interconnection"
blob_v0 "github.com/consensys/zkevm-monorepo/prover/lib/compressor/blob/v0"
blob_v1 "github.com/consensys/zkevm-monorepo/prover/lib/compressor/blob/v1"
"github.com/sirupsen/logrus"
@@ -50,10 +51,10 @@ var allCircuits = []string{
string(circuits.ExecutionLargeCircuitID),
string(circuits.BlobDecompressionV0CircuitID),
string(circuits.BlobDecompressionV1CircuitID),
string(circuits.PublicInputInterconnectionCircuitID),
string(circuits.AggregationCircuitID),
string(circuits.EmulationCircuitID),
string(circuits.EmulationDummyCircuitID), // we want to generate Verifier.sol for this one
string(circuits.PublicInputInterconnectionCircuitID),
}
func init() {

View File

@@ -8,6 +8,7 @@ retry_delays = [0, 1]
[execution]
prover_mode = "full"
conflated_traces_dir = "./prover-v2/conflated"
[blob_decompression]
prover_mode = "dev"

View File

@@ -94,7 +94,7 @@ type Config struct {
Version string `validate:"required,semver"`
// LogLevel sets the log level for the logger.
LogLevel logLevel `mapstructure:"log_level" validate:"required,gte=0,lte=5"`
LogLevel logLevel `mapstructure:"log_level" validate:"required,gte=0,lte=6"`
// AssetsDir stores the root of the directory where the assets are stored (setup) or
// accessed (prover). The file structure is described in TODO @gbotrel.

View File

@@ -7,7 +7,7 @@ import (
// Standard parameter that we use for ring-SIS they are benchmarked at achieve
// more than the 128 level of security.
var StdParams = Params{LogTwoBound: 8, LogTwoDegree: 6}
var StdParams = Params{LogTwoBound: 16, LogTwoDegree: 6}
// Params encapsulates the parameters of a ring SIS instance
type Params struct {

View File

@@ -67,8 +67,22 @@ func GenerateKey(params Params, maxNumFieldToHash int) Key {
}
// Optimization for these specific parameters
if params.LogTwoBound == 8 && params.LogTwoDegree == 6 {
res.twiddleCosets = sis.PrecomputeTwiddlesCoset(
if params.LogTwoBound == 8 && 1<<params.LogTwoDegree == 64 {
res.twiddleCosets = ringsis_64_8.PrecomputeTwiddlesCoset(
rsis.Domain.Generator,
rsis.Domain.FrMultiplicativeGen,
)
}
if params.LogTwoBound == 16 && 1<<params.LogTwoDegree == 64 {
res.twiddleCosets = ringsis_64_16.PrecomputeTwiddlesCoset(
rsis.Domain.Generator,
rsis.Domain.FrMultiplicativeGen,
)
}
if params.LogTwoBound == 8 && 1<<params.LogTwoDegree == 32 {
res.twiddleCosets = ringsis_32_8.PrecomputeTwiddlesCoset(
rsis.Domain.Generator,
rsis.Domain.FrMultiplicativeGen,
)

View File

@@ -11,6 +11,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/utils/parallel"
"github.com/consensys/zkevm-monorepo/prover/utils/types"
"github.com/sirupsen/logrus"
)
// MerkleCommitment represents a (merkle-mode) Vortex commitment
@@ -34,9 +35,14 @@ func (p *Params) CommitMerkle(ps []smartvectors.SmartVector) (encodedMatrix Enco
utils.Panic("too many rows: %v, capacity is %v\n", len(ps), p.MaxNbRows)
}
logrus.Infof("Vortex compiler: RS encoding nrows=%v of ncol=%v to codeword-size=%v", len(ps), p.NbColumns, p.NbColumns*p.BlowUpFactor)
encodedMatrix = p.encodeRows(ps)
logrus.Infof("Vortex compiler: RS encoding DONE")
logrus.Infof("Vortex compiler: SIS hashing nrows=%v of ncol=%v to codeword-size=%v", len(ps), p.NbColumns, p.NbColumns*p.BlowUpFactor)
colHashes = p.hashColumns(encodedMatrix)
logrus.Infof("Vortex compiler: SIS hashing DONE")
logrus.Infof("Vortex compiler: SIS merkle hashing START")
// Hash the digest by chunk and build the tree using the chunk hashes as leaves.
var leaves []types.Bytes32
@@ -55,6 +61,7 @@ func (p *Params) CommitMerkle(ps []smartvectors.SmartVector) (encodedMatrix Enco
return hashtypes.Hasher{Hash: p.HashFunc()}
},
)
logrus.Infof("Vortex compiler: SIS merkle hashing DONE")
return encodedMatrix, tree, colHashes
}

View File

@@ -106,9 +106,13 @@ func Interpolate(v SmartVector, x field.Element, oncoset ...bool) field.Element
// Batch-evaluate polynomials in Lagrange basis
func BatchInterpolate(vs []SmartVector, x field.Element, oncoset ...bool) []field.Element {
polys := make([][]field.Element, len(vs))
results := make([]field.Element, len(vs))
computed := make([]bool, len(vs))
var (
polys = make([][]field.Element, len(vs))
results = make([]field.Element, len(vs))
computed = make([]bool, len(vs))
totalConstant = 0
)
// smartvector to []fr.element
parallel.Execute(len(vs), func(start, stop int) {
@@ -118,29 +122,44 @@ func BatchInterpolate(vs []SmartVector, x field.Element, oncoset ...bool) []fiel
// constant vectors
results[i] = con.val
computed[i] = true
totalConstant++
continue
}
// non-constant vectors
polys[i] = vs[i].IntoRegVecSaveAlloc()
}
})
return BatchInterpolateSV(results, computed, polys, x, oncoset...)
if totalConstant == len(vs) {
return results
}
return batchInterpolateSV(results, computed, polys, x, oncoset...)
}
// Optimized batch interpolate for smart vectors.
// This reduces the number of computation by pre-processing
// constant vectors in advance in BatchInterpolate()
func BatchInterpolateSV(results []field.Element, computed []bool, polys [][]field.Element, x field.Element, oncoset ...bool) []field.Element {
func batchInterpolateSV(results []field.Element, computed []bool, polys [][]field.Element, x field.Element, oncoset ...bool) []field.Element {
poly := polys[0]
if !utils.IsPowerOfTwo(len(poly)) {
utils.Panic("only support powers of two but poly has length %v", len(poly))
n := 0
for i := range polys {
if len(polys[i]) > 0 {
n = len(polys[i])
}
}
n := len(poly)
if n == 0 {
// that's a possible edge-case and it can happen if all the input polys
// are constant smart-vectors. This is should be prevented by the the
// caller.
return results
}
if !utils.IsPowerOfTwo(n) {
utils.Panic("only support powers of two but poly has length %v", len(polys))
}
domain := fft.NewDomain(n)
denominator := make([]field.Element, n)

View File

@@ -15,8 +15,13 @@ func InterpolateGnark(api frontend.API, poly []frontend.Variable, x frontend.Var
utils.Panic("only support powers of two but poly has length %v", len(poly))
}
n := len(poly)
// When the poly is of length 1 it means it is a constant polynomial and its
// evaluation is trivial.
if len(poly) == 1 {
return poly[0]
}
n := len(poly)
domain := fft.NewDomain(n)
one := field.One()
@@ -43,7 +48,8 @@ func InterpolateGnark(api frontend.API, poly []frontend.Variable, x frontend.Var
}
// If the current term is the constant zero, we continue without generating
// constraints.
// constraints. As a result, 'terms' may contain nil elements. Therefore,
// we will need need to remove them later
if c, isC := api.Compiler().ConstantValue(poly[i]); isC && c.IsInt64() && c.Int64() == 0 {
continue
}
@@ -55,8 +61,28 @@ func InterpolateGnark(api frontend.API, poly []frontend.Variable, x frontend.Var
terms[i] = api.Mul(terms[i], poly[i])
}
nonNilTerms := make([]frontend.Variable, 0, len(terms))
for i := range terms {
if terms[i] == nil {
continue
}
nonNilTerms = append(nonNilTerms, terms[i])
}
// Then sum all the terms
res := api.Add(terms[0], terms[1], terms[2:]...)
var res frontend.Variable
switch {
case len(nonNilTerms) == 0:
res = 0
case len(nonNilTerms) == 1:
res = nonNilTerms[0]
case len(nonNilTerms) == 2:
res = api.Add(nonNilTerms[0], nonNilTerms[1])
default:
res = api.Add(nonNilTerms[0], nonNilTerms[1], nonNilTerms[2:]...)
}
/*
Then multiply the res by a factor \frac{g^{1 - n}X^n -g}{n}

View File

@@ -20,6 +20,8 @@ var _ VerifierCol = FromAccessors{}
type FromAccessors struct {
// Accessors stores the list of accessors building the column.
Accessors []ifaces.Accessor
Padding field.Element
Size_ int
// Round_ caches the round value of the column.
Round_ int
}
@@ -30,15 +32,15 @@ type FromAccessors struct {
// You should not pass accessors of type [expressionAsAccessor] as their
// evaluation within a gnark circuit requires using the frontend.API which we
// can't access in the context currently.
func NewFromAccessors(accessors []ifaces.Accessor) ifaces.Column {
if !utils.IsPowerOfTwo(len(accessors)) {
panic("the column must be a power of two")
func NewFromAccessors(accessors []ifaces.Accessor, padding field.Element, size int) ifaces.Column {
if !utils.IsPowerOfTwo(size) {
utils.Panic("the column must be a power of two (size=%v)", size)
}
round := 0
for i := range accessors {
round = max(round, accessors[i].Round())
}
return FromAccessors{Accessors: accessors, Round_: round}
return FromAccessors{Accessors: accessors, Round_: round, Padding: padding, Size_: size}
}
// Round returns the round ID of the column and implements the [ifaces.Column]
@@ -53,7 +55,7 @@ func (f FromAccessors) GetColID() ifaces.ColID {
for i := range f.Accessors {
accessorNames[i] = f.Accessors[i].Name()
}
return ifaces.ColIDf("FROM_ACCESSORS_%v", strings.Join(accessorNames, "_"))
return ifaces.ColIDf("FROM_ACCESSORS_%v_PADDING=%v_SIZE=%v", strings.Join(accessorNames, "_"), f.Padding.String(), f.Size_)
}
// MustExists implements the [ifaces.Column] interface and always returns true.
@@ -62,7 +64,7 @@ func (f FromAccessors) MustExists() {}
// Size returns the size of the colum and implements the [ifaces.Column]
// interface.
func (f FromAccessors) Size() int {
return len(f.Accessors)
return f.Size_
}
// GetColAssignment returns the assignment of the current column
@@ -71,25 +73,48 @@ func (f FromAccessors) GetColAssignment(run ifaces.Runtime) ifaces.ColAssignment
for i := range res {
res[i] = f.Accessors[i].GetVal(run)
}
return smartvectors.NewRegular(res)
return smartvectors.RightPadded(res, f.Padding, f.Size_)
}
// GetColAssignment returns a gnark assignment of the current column
func (f FromAccessors) GetColAssignmentGnark(run ifaces.GnarkRuntime) []frontend.Variable {
res := make([]frontend.Variable, len(f.Accessors))
for i := range res {
res := make([]frontend.Variable, f.Size_)
for i := range f.Accessors {
res[i] = f.Accessors[i].GetFrontendVariable(nil, run)
}
for i := len(f.Accessors); i < f.Size_; i++ {
res[i] = f.Padding
}
return res
}
// GetColAssignmentAt returns a particular position of the column
func (f FromAccessors) GetColAssignmentAt(run ifaces.Runtime, pos int) field.Element {
if pos >= f.Size_ {
utils.Panic("out of bound: size=%v pos=%v", f.Size_, pos)
}
if pos >= len(f.Accessors) {
return f.Padding
}
return f.Accessors[pos].GetVal(run)
}
// GetColAssignmentGnarkAt returns a particular position of the column in a gnark circuit
func (f FromAccessors) GetColAssignmentGnarkAt(run ifaces.GnarkRuntime, pos int) frontend.Variable {
if pos >= f.Size_ {
utils.Panic("out of bound: size=%v pos=%v", f.Size_, pos)
}
if pos >= len(f.Accessors) {
return f.Padding
}
return f.Accessors[pos].GetFrontendVariable(nil, run)
}
@@ -105,10 +130,23 @@ func (f FromAccessors) String() string {
// Split implements the [VerifierCol] interface
func (f FromAccessors) Split(_ *wizard.CompiledIOP, from, to int) ifaces.Column {
if from >= len(f.Accessors) {
return NewConstantCol(f.Padding, to-from)
}
var subAccessors = f.Accessors[from:]
if to < len(f.Accessors) {
subAccessors = f.Accessors[from:to]
}
// We don't call the accessor to ensure that the segment has the same round
// definition as the original column.
return FromAccessors{
Accessors: f.Accessors[from:to],
Accessors: subAccessors,
Round_: f.Round_,
Padding: f.Padding,
Size_: to - from,
}
}

View File

@@ -41,13 +41,10 @@ func NewFromIntVecCoin(comp *wizard.CompiledIOP, info coin.Info, ops ...FivcOp)
}
if settings.padding.IsPadded {
fullLen := utils.NextPowerOfTwo(len(access))
for len(access) < fullLen {
access = append(access, accessors.NewConstant(field.Zero()))
}
return NewFromAccessors(access, field.Zero(), utils.NextPowerOfTwo(len(access)))
}
return NewFromAccessors(access)
return NewFromAccessors(access, field.Zero(), len(access))
}
// Passes a padding value to the Fivc

View File

@@ -11,13 +11,13 @@ import (
// Represents a constant column
type ConstCol struct {
F field.Element
size int
F field.Element
Size_ int
}
// NewConstCol creates a new ConstCol column
func NewConstantCol(f field.Element, size int) ifaces.Column {
return ConstCol{F: f, size: size}
return ConstCol{F: f, Size_: size}
}
// Returns the round of definition of the column (always zero)
@@ -29,7 +29,7 @@ func (cc ConstCol) Round() int {
// Returns a generic name from the column. Defined from the coin's.
func (cc ConstCol) GetColID() ifaces.ColID {
return ifaces.ColIDf("CONSTCOL_%v_%v", cc.F.String(), cc.size)
return ifaces.ColIDf("CONSTCOL_%v_%v", cc.F.String(), cc.Size_)
}
// Always return true
@@ -37,17 +37,17 @@ func (cc ConstCol) MustExists() {}
// Returns the size of the column
func (cc ConstCol) Size() int {
return cc.size
return cc.Size_
}
// Returns a constant smart-vector
func (cc ConstCol) GetColAssignment(_ ifaces.Runtime) ifaces.ColAssignment {
return smartvectors.NewConstant(cc.F, cc.size)
return smartvectors.NewConstant(cc.F, cc.Size_)
}
// Returns the column as a list of gnark constants
func (cc ConstCol) GetColAssignmentGnark(_ ifaces.GnarkRuntime) []frontend.Variable {
res := make([]frontend.Variable, cc.size)
res := make([]frontend.Variable, cc.Size_)
for i := range res {
res[i] = cc.F
}
@@ -56,12 +56,12 @@ func (cc ConstCol) GetColAssignmentGnark(_ ifaces.GnarkRuntime) []frontend.Varia
// Returns a particular position of the coin value
func (cc ConstCol) GetColAssignmentAt(run ifaces.Runtime, pos int) field.Element {
return cc.GetColAssignment(run).Get(pos)
return cc.F
}
// Returns a particular position of the coin value
func (cc ConstCol) GetColAssignmentGnarkAt(run ifaces.GnarkRuntime, pos int) frontend.Variable {
return cc.GetColAssignmentGnark(run)[pos]
return cc.F
}
// Since the column is directly defined from the

View File

@@ -76,9 +76,5 @@ func NewConcatTinyColumns(
utils.Panic("the target length (=%v) is smaller than the given columns (=%v)", paddedSize, len(cols))
}
for len(access) < paddedSize {
access = append(access, accessors.NewConstant(paddingVal))
}
return NewFromAccessors(access)
return NewFromAccessors(access, paddingVal, paddedSize)
}

View File

@@ -86,6 +86,8 @@ func Compile(comp *wizard.CompiledIOP) {
*/
verifier := func(run *wizard.VerifierRuntime) error {
logrus.Infof("started to run the dummy verifier")
var finalErr error
lock := sync.Mutex{}
@@ -102,6 +104,7 @@ func Compile(comp *wizard.CompiledIOP) {
lock.Lock()
finalErr = fmt.Errorf("%v\nfailed %v - %v", finalErr, name, err)
lock.Unlock()
logrus.Debugf("query %v failed\n", name)
} else {
logrus.Debugf("query %v passed\n", name)
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/sirupsen/logrus"
)
const (
@@ -21,6 +22,9 @@ const (
// expression optimization and runtime memory optimizations for the prover.
func Compile(comp *wizard.CompiledIOP) {
logrus.Trace("started global constraint compiler")
defer logrus.Trace("finished global constraint compiler")
merging, anyCs := accumulateConstraints(comp)
if !anyCs {
return
@@ -38,6 +42,7 @@ func Compile(comp *wizard.CompiledIOP) {
comp.RegisterProverAction(quotientRound, &quotientCtx)
comp.RegisterProverAction(evaluationRound, evaluationProver(evaluationCtx))
comp.RegisterVerifierAction(evaluationRound, evaluationVerifier(evaluationCtx))
}
func deriveName(comp *wizard.CompiledIOP, s string, args ...any) string {

View File

@@ -106,6 +106,9 @@ func (pa evaluationProver) Run(run *wizard.ProverRuntime) {
witness := handle.GetColAssignment(run)
witnesses[i] = witness
if witness.Len() == 0 {
logrus.Errorf("found a witness of size zero: %v", handle.GetColID())
}
}
})

View File

@@ -43,7 +43,7 @@ func factorExpression(comp *wizard.CompiledIOP, expr *symbolic.Expression) *symb
if !found {
wrapper.Expr = simplify.AutoSimplify(flattenedExpr)
if err := comp.Artefacts.Store(cacheKey, wrapper); err != nil {
utils.Panic("could not cache the factored expression: %v", err)
utils.Panic("could not cache the factored expression: %v", err.Error())
}
}

View File

@@ -1,6 +1,8 @@
package lookup
import (
"slices"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/coin"
"github.com/consensys/zkevm-monorepo/prover/protocol/column/verifiercol"
@@ -32,11 +34,13 @@ func CompileLogDerivative(comp *wizard.CompiledIOP) {
// which Z context should be used to handle a part of a given permutation
// query.
zCatalog = map[[2]int]*zCtx{}
zEntries = [][2]int{}
// verifier actions
va = finalEvaluationCheck{}
)
// Skip the compilation phase if no lookup constraint is being used
// Skip the compilation phase if no lookup constraint is being used. Otherwise
// it will register a verifier action that is not required and will be bugged.
if len(mainLookupCtx.lookupTables) == 0 {
return
}
@@ -67,8 +71,33 @@ func CompileLogDerivative(comp *wizard.CompiledIOP) {
)
}
// This loops is necessary to build a sorted list of the entries of zCatalog.
// Without it, if we tried to loop over zCatalog directly, the entries would
// be processed in a non-deterministic order. The sorting order itself is
// without importance, what matters is that zEntries is in deterministic
// order.
for entry := range zCatalog {
zEntries = append(zEntries, entry)
}
slices.SortFunc(zEntries, func(a, b [2]int) int {
switch {
case a[0] < b[0]:
return -1
case a[0] > b[0]:
return 1
case a[1] < b[1]:
return -1
case a[1] > b[1]:
return 1
default:
return 0
}
})
// compile zCatalog
for entry, zC := range zCatalog {
for _, entry := range zEntries {
zC := zCatalog[entry]
// z-packing compile
zC.compile(comp)
// entry[0]:round, entry[1]: size

View File

@@ -276,7 +276,7 @@ func (a mAssignmentTask) run(run *wizard.ProverRuntime) {
if !ok {
tableRow := make([]field.Element, len(a.S[i]))
for j := range tableRow {
tableRow[i] = a.S[i][j].GetColAssignmentAt(run, k)
tableRow[j] = a.S[i][j].GetColAssignmentAt(run, k)
}
utils.Panic(
"entry %v of the table %v is not included in the table. tableRow=%v",

View File

@@ -54,14 +54,25 @@ func (ctx *SelfRecursionCtx) registersAh() {
// round
maxSize := utils.NextPowerOfTwo(ctx.VortexCtx.CommittedRowsCount)
roundStartAt := 0
// Consider the precomputed columns
if ctx.VortexCtx.IsCommitToPrecomputed() {
numPrecomputeds := len(ctx.VortexCtx.Items.Precomputeds.PrecomputedColums)
// Sanity-check : if coms in precomputeds have length zero then the
// associated Dh should be nil
if (numPrecomputeds == 0) != (ctx.Columns.precompRoot == nil) {
panic("nilness mismatch for precomputeds")
}
// The Vortex compiler is supposed to add "shadow columns" ensuring that
// every round (counting the precomputations as a round) uses ring-SIS
// polynomials fully. Otherwise, the compilation will not be able to
// be successful.
if (numPrecomputeds*ctx.SisKey().NumLimbs())%(1<<ctx.SisKey().LogTwoDegree) > 0 {
panic("the ring-SIS polynomials are not fully used")
}
// Registers the commitment key (if this matches an existing key
// then the preexisting precomputed key is reused.
ah[0] = ctx.comp.InsertPrecomputed(
@@ -77,10 +88,12 @@ func (ctx *SelfRecursionCtx) registersAh() {
if ctx.VortexCtx.IsCommitToPrecomputed() {
precompOffset += 1
}
for i, comsInRoundsI := range ctx.VortexCtx.CommitmentsByRounds.Inner() {
// Sanity-check : if coms in rounds has length zero then the
// associated Dh should be nil
// associated Dh should be nil. That happens when the examinated round
// is a "dry" round or when it has been self-recursed already.
if (len(comsInRoundsI) == 0) != (ctx.Columns.Rooth[i] == nil) {
panic("nilness mismatch")
}
@@ -91,6 +104,14 @@ func (ctx *SelfRecursionCtx) registersAh() {
continue
}
// The Vortex compiler is supposed to add "shadow columns" ensuring that
// every round (counting the precomputations as a round) uses ring-SIS
// polynomials fully. Otherwise, the compilation will not be able to
// be successful.
if (len(comsInRoundsI)*ctx.SisKey().NumLimbs())%(1<<ctx.SisKey().LogTwoDegree) > 0 {
panic("the ring-SIS polynomials are not fully used")
}
// Registers the commitment key (if this matches an existing key
// then the preexisting precomputed key is reused).
ah[i+precompOffset] = ctx.comp.InsertPrecomputed(

View File

@@ -164,6 +164,8 @@ func (ctx SelfRecursionCtx) GluePositions() {
positionVec.(verifiercol.FromAccessors).Accessors,
merklePos.Size()/sizePositionVec,
),
field.Zero(),
merklePos.Size(),
)
// If MerkleRoots is correct, then there is a permutation we can

View File

@@ -2,10 +2,15 @@ package selfrecursion
import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/sirupsen/logrus"
)
// Apply the self-recursion transformation over a vortex compiled
func SelfRecurse(comp *wizard.CompiledIOP) {
logrus.Trace("started self-recursion compiler")
defer logrus.Trace("finished self-recursion compiler")
ctx := NewSelfRecursionCxt(comp)
ctx.Precomputations()
// the round-by-round commitment phase is implicit here

View File

@@ -45,6 +45,8 @@ var sisInstances = []ringsis.Params{
{LogTwoBound: 4, LogTwoDegree: 2},
{LogTwoBound: 8, LogTwoDegree: 3},
{LogTwoBound: 8, LogTwoDegree: 6},
{LogTwoBound: 8, LogTwoDegree: 5},
{LogTwoBound: 16, LogTwoDegree: 6},
}
// testcase type

View File

@@ -288,14 +288,35 @@ func (ctx splitterCtx) compileGlobal(comp *wizard.CompiledIOP, q query.GlobalCon
}
translationMap.InsertNew(m.String(), ifaces.ColumnAsVariable(subHandle))
case variables.X:
panic("unsupported, the value of `x` in the unsplit query and the split would be different")
utils.Panic("unsupported, the value of `x` in the unsplit query and the split would be different. query=%v", q.Name())
case variables.PeriodicSample:
// Check that the period is not larger than the domain size
// Check that the period is not larger than the domain size. If
// the period is smaller this is a no-op because the period does
// not change.
translated := symbolic.NewVariable(metadata)
if m.T > ctx.size {
panic("unsupported case : the period is larger than the split")
// Here, there are two possibilities. (1) The current slot is
// on a portion of the Periodic sample where everything is
// zero or (2) the current slot matchs a portion of the
// periodic sampling containing a 1. To determine which is
// the current situation, we need to find out where the slot
// is located compared to the period.
var (
slotStartAt = (slot * ctx.size) % m.T
slotStopAt = slotStartAt + ctx.size
)
if m.Offset >= slotStartAt && m.Offset < slotStopAt {
translated = variables.NewPeriodicSample(ctx.size, m.Offset%ctx.size)
} else {
translated = symbolic.NewConstant(0)
}
}
// And we can just pass it over because the period does not change
translationMap.InsertNew(m.String(), symbolic.NewVariable(metadata))
translationMap.InsertNew(m.String(), translated)
default:
// Repass the same variable (for coins or other types of single-valued variable)
translationMap.InsertNew(m.String(), symbolic.NewVariable(metadata))

View File

@@ -15,6 +15,9 @@ import (
func CompileLocalOpening(comp *wizard.CompiledIOP) {
logrus.Trace("started local opening compiler")
defer logrus.Trace("finished local opening compiler")
// The main idea is that we want to group the fixed point queries
// that are on the same points. That way, we maintain the invariant
// that all univariate queries are on different points.

View File

@@ -28,6 +28,10 @@ Reduce all the univariate queries into a unique single point evaluation
See : https://eprint.iacr.org/2020/081.pdf (Section 3)
*/
func MultiPointToSinglePoint(targetSize int) func(comp *wizard.CompiledIOP) {
logrus.Trace("started multi-point to single-point compiler")
defer logrus.Trace("finished multi-point to single-point compiler")
return func(comp *wizard.CompiledIOP) {
ctx := createMptsCtx(comp, targetSize)

View File

@@ -43,6 +43,9 @@ Interleaving:
*/
func Naturalize(comp *wizard.CompiledIOP) {
logrus.Trace("started naturalization compiler")
defer logrus.Trace("finished naturalization compiler")
// The compilation process is applied separately for each query
for roundID := 0; roundID < comp.NumRounds(); roundID++ {
for _, qName := range comp.QueriesParams.AllKeysAt(roundID) {

View File

@@ -34,6 +34,9 @@ There are the following requirements:
*/
func Compile(blowUpFactor int, options ...VortexOp) func(*wizard.CompiledIOP) {
logrus.Trace("started vortex compiler")
defer logrus.Trace("finished vortex compiler")
if !utils.IsPowerOfTwo(blowUpFactor) {
utils.Panic("expected a power of two but rho was %v", blowUpFactor)
}
@@ -302,7 +305,7 @@ func (ctx *Ctx) compileRoundWithVortex(round int, coms []ifaces.ColID) {
if deg%numLimbs != 0 {
utils.Panic("the number of limbs should at least divide the degree")
}
numFieldPerPoly := deg / numLimbs
numFieldPerPoly := utils.Max(1, deg/numLimbs)
numShadow := (numFieldPerPoly - (len(coms) % numFieldPerPoly)) % numFieldPerPoly
targetSize := ctx.comp.Columns.GetSize(coms[0])
@@ -579,6 +582,14 @@ func (ctx *Ctx) processStatusPrecomputed() {
}
for _, name := range precomputedColNames {
_, ok := ctx.PolynomialsTouchedByTheQuery[name]
if !ok {
logrus.Warnf("got an unconstrained column: %v -> marking as ignored", name)
comp.Columns.MarkAsIgnored(name)
continue
}
pCol := comp.Columns.GetHandle(name)
// Marking all these columns as "Ignored" to mean that the compiler
// should ignore theses columns.
@@ -606,7 +617,7 @@ func (ctx *Ctx) processStatusPrecomputed() {
sisDegree = ctx.SisParams.OutputSize()
sisNumLimbs = ctx.SisParams.NumLimbs()
sisNumFieldPerPoly = utils.Max(1, sisDegree/sisNumLimbs)
numShadowRows = len(precomputedCols) % sisNumFieldPerPoly
numShadowRows = sisNumFieldPerPoly - (len(precomputedCols) % sisNumFieldPerPoly)
)
if sisDegree > sisNumLimbs && numShadowRows > 0 {
@@ -619,8 +630,10 @@ func (ctx *Ctx) processStatusPrecomputed() {
precomputedCols = append(precomputedCols, shadowCol)
}
}
}
logrus.Infof("Processed %v precomputed columns", len(precomputedCols))
ctx.Items.Precomputeds.PrecomputedColums = precomputedCols
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/variables"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/sirupsen/logrus"
)
// PlonkCheck adds a PLONK circuit in the wizard. Namely, the function takes a
@@ -48,6 +49,8 @@ func PlonkCheck(
options ...Option,
) compilationCtx {
logrus.Infof("building circuit for name=%v, nbInstance=%v", name, maxNbInstance)
// Create the ctx
ctx := createCtx(comp, name, round, circuit, maxNbInstance, options...)

View File

@@ -27,6 +27,7 @@ package projection
import (
"fmt"
"strings"
"github.com/consensys/gnark/frontend"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
@@ -39,6 +40,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizardutils"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/sirupsen/logrus"
)
// projectionProverAction is a compilation artefact generated during the
@@ -46,7 +48,9 @@ import (
// [wizard.ProverAction]. It is meant to compute to assign the "Horner" columns
// and their respective local opening queries.
type projectionProverAction struct {
Name ifaces.QueryID
FilterA, FilterB ifaces.Column
ColA, ColB []ifaces.Column
ABoard, BBoard sym.ExpressionBoard
EvalCoin coin.Info
HornerA, HornerB ifaces.Column
@@ -135,9 +139,12 @@ func InsertProjection(
aExpr, _, _ = wizardutils.AsExpr(a)
bExpr, _, _ = wizardutils.AsExpr(b)
pa = projectionProverAction{
Name: queryName,
EvalCoin: comp.InsertCoin(round, coin.Namef("%v_EVAL_COIN", queryName), coin.Field),
FilterA: filterA,
FilterB: filterB,
ColA: columnsA,
ColB: columnsB,
ABoard: aExpr.Board(),
BBoard: bExpr.Board(),
HornerA: comp.InsertCommit(round, ifaces.ColIDf("%v_HORNER_A", queryName), sizeA),
@@ -225,6 +232,69 @@ func (pa projectionProverAction) Run(run *wizard.ProverRuntime) {
run.AssignColumn(pa.HornerB.GetColID(), smartvectors.NewRegular(hornerB))
run.AssignLocalPoint(pa.HornerA0.ID, hornerA[0])
run.AssignLocalPoint(pa.HornerB0.ID, hornerB[0])
if hornerA[0] != hornerB[0] {
var (
colA = make([][]field.Element, len(pa.ColA))
colB = make([][]field.Element, len(pa.ColB))
cntA = 0
cntB = 0
rowsA = [][]string{}
rowsB = [][]string{}
)
for c := range pa.ColA {
colA[c] = pa.ColA[c].GetColAssignment(run).IntoRegVecSaveAlloc()
colB[c] = pa.ColB[c].GetColAssignment(run).IntoRegVecSaveAlloc()
}
for i := range fA {
if fA[i].IsZero() {
continue
}
row := make([]string, len(pa.ColA))
for c := range pa.ColA {
fString := colA[c][i].Text(16)
if colA[c][i].IsUint64() && colA[c][i].Uint64() < 1000000 {
fString = colA[c][i].String()
}
row[c] = fmt.Sprintf("%v=%v", pa.ColA[c].GetColID(), fString)
}
rowsA = append(rowsA, row)
cntA++
}
for i := range fB {
if fB[i].IsZero() {
continue
}
row := make([]string, len(pa.ColB))
for c := range pa.ColB {
fString := colB[c][i].Text(16)
if colB[c][i].IsUint64() && colB[c][i].Uint64() < 1000000 {
fString = colB[c][i].String()
}
row[c] = fmt.Sprintf("%v=%v", pa.ColB[c].GetColID(), fString)
}
rowsB = append(rowsB, row)
cntB++
}
for i := range rowsA {
fmt.Printf("row=%v %v %v\n", i, strings.Join(rowsA[i], " "), strings.Join(rowsB[i], " "))
}
logrus.Errorf("projection query %v failed", pa.Name)
}
}
// Run implements the [wizard.VerifierAction] interface.

View File

@@ -1,6 +1,7 @@
package query
import (
"errors"
"fmt"
"github.com/consensys/gnark/frontend"
@@ -196,6 +197,8 @@ func (r Inclusion) Check(run ifaces.Runtime) error {
}
}
var errLU error
// Effectively run the check on the included table
for row := 0; row < r.Included[0].Size(); row++ {
if r.IsFilteredOnIncluded() && filterIncluded.Get(row) == field.Zero() {
@@ -207,13 +210,14 @@ func (r Inclusion) Check(run ifaces.Runtime) error {
notFoundRow := []string{}
for c := range included {
x := included[c].Get(row)
notFoundRow = append(notFoundRow, fmt.Sprintf("%v=%v", r.Included[c].GetColID(), x.String()))
notFoundRow = append(notFoundRow, fmt.Sprintf("%v=%v", r.Included[c].GetColID(), x.Text(16)))
}
return fmt.Errorf("row %v was not found in the `including` table : %v", row, notFoundRow)
errLU = errors.Join(errLU, fmt.Errorf("row %v was not found in the `including` table : %v", row, notFoundRow))
}
}
return nil
return errLU
}
// GnarkCheck implements the [ifaces.Query] interface. It will panic in this

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"reflect"
"unicode"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
@@ -169,6 +170,10 @@ func SerializeValue(v reflect.Value, mode mode) (json.RawMessage, error) {
fieldValue = v.Field(i)
)
if unicode.IsLower(rune(fieldName[0])) {
utils.Panic("unexported field: struct=%v name=%v type=%v", typeOfV.String(), fieldName, fieldValue.Type().String())
}
r, err := SerializeValue(fieldValue, mode)
if err != nil {
return nil, fmt.Errorf("could not serialize struct field (%v).%v: %w", typeOfV.String(), rawFieldName, err)

View File

@@ -8,6 +8,7 @@ import (
"path"
"github.com/consensys/zkevm-monorepo/prover/backend/files"
"github.com/sirupsen/logrus"
)
// artefactDir is the directory used to store the artefacts. The directory is
@@ -43,26 +44,32 @@ func (a artefactCache) TryLoad(key string, obj Artefact) (found bool, parseErr e
)
if errors.Is(fCheckErr, os.ErrNotExist) {
logrus.Infof("attempted to open the cache-key=%v, was missing", fpath)
return false, nil
}
if fCheckErr != nil {
// This can happen if the directory does not exists
logrus.Infof("attempted to open the cache-key=%v err=%v", fpath, fCheckErr.Error())
return false, fmt.Errorf("CheckFilePath failed: %w", fCheckErr)
}
f, readErr := os.Open(fpath)
if readErr != nil {
logrus.Infof("attempted to open the cache-key=%v err=read-file-failed:%v", fpath, readErr.Error())
return false, fmt.Errorf("ReadFile failed: %w", readErr)
}
_, parseErr = obj.ReadFrom(f)
if parseErr != nil {
logrus.Infof("attempted to open the cache-key=%v err=read-from-failed:%v", fpath, parseErr.Error())
return false, fmt.Errorf("ReadFrom failed: %w", parseErr)
}
logrus.Infof("cache-key found cache-key=%v", fpath)
return true, nil
}
@@ -73,17 +80,15 @@ func (a artefactCache) Store(key string, obj Artefact) error {
var (
fpath = path.Join(artefactDir, key)
writingPath = fpath + ".tmp"
_, statErr = os.Stat(fpath)
_, wstatErr = os.Stat(writingPath)
statErr = files.CheckFilePath(writingPath)
)
if statErr == nil {
return fmt.Errorf("the file %q already exists", fpath)
}
if wstatErr == nil {
return fmt.Errorf("the file %q already exists", wstatErr)
}
logrus.Infof("Started writing the global constraint in the cache")
defer logrus.Infof("Done writing the global constraint in the cache")
f := files.MustOverwrite(writingPath)
if _, writeErr := obj.WriteTo(f); writeErr != nil {

View File

@@ -164,6 +164,14 @@ func (c *CompiledIOP) InsertCommit(round int, name ifaces.ColID, size int) iface
// - if the size of the column is not a power of 2
// - if a column using the same name has already been registered
func (c *CompiledIOP) InsertColumn(round int, name ifaces.ColID, size int, status column.Status) ifaces.Column {
// @alex: this has actually caught a few typos. When wrongly setting an
// incorrect but very large size here, it will generate a disproportionate
// wizard
if size > 1<<27 {
utils.Panic("column %v has size %v", name, size)
}
c.assertConsistentRound(round)
if len(name) == 0 {
@@ -486,6 +494,13 @@ func (c *CompiledIOP) InsertVerifier(round int, ver VerifierStep, gnarkVer Gnark
// - a query with the same name has already been registered in the Wizard.
func (c *CompiledIOP) InsertRange(round int, name ifaces.QueryID, h ifaces.Column, max int) {
// @alex: this has actually caught a few typos. When wrongly setting an
// incorrect but very large value here, the query will tend to always pass
// and thus the tests will tend to miss it.
if max > 1<<27 {
utils.Panic("the range check query %v has an overly large boundary (max=%v)", name, max)
}
// sanity-check the bound should be larger than 0
if max == 0 {
panic("max is zero : perhaps an overflow")

View File

@@ -3,8 +3,6 @@ package wizard
import (
"crypto/sha256"
"io"
"github.com/consensys/zkevm-monorepo/prover/utils"
)
// CompiledIOPSerializer is a function capable of serializing a Compiled-IOP
@@ -28,12 +26,12 @@ func (comp *CompiledIOP) BootstrapFiatShamir(vm VersionMetadata, ser CompiledIOP
io.WriteString(hasher, vm.Title)
io.WriteString(hasher, vm.Version)
compBlob, err := ser(comp)
if err != nil {
utils.Panic("Could not serialize the compiled IOP to bootstrap the FS state: %v", err)
}
// compBlob, err := ser(comp)
// if err != nil {
// utils.Panic("Could not serialize the compiled IOP to bootstrap the FS state: %v", err)
// }
hasher.Write(compBlob)
// hasher.Write(compBlob)
digest := hasher.Sum(nil)
digest[0] = 0 // This is to prevent potential errors due to overflowing the field
comp.fiatShamirSetup.SetBytes(digest)

View File

@@ -390,10 +390,8 @@ func GetWizardVerifierCircuitAssignment(comp *CompiledIOP, proof Proof) *WizardV
// Perform the conversion to frontend.Variable, element by element
assignedMsg := smartvectors.IntoGnarkAssignment(msgData)
res.columnsIDs.InsertNew(colName, len(res.Columns))
res.Columns = append(res.Columns, assignedMsg)
// Also add the index
res.columnsIDs.InsertNew(colName, i)
}
/*

View File

@@ -478,46 +478,49 @@ func (run *ProverRuntime) goNextRound() {
initialTranscriptSize := run.FS.TranscriptSize
initialNumCoinsGenerated := run.FS.NumCoinGenerated
/*
Make sure that all messages have been written and use them
to update the FS state. Note that we do not need to update
FS using the last round of the prover because he is always
the last one to "talk" in the protocol.
*/
start := run.FS.TranscriptSize
msgsToFS := run.Spec.Columns.AllKeysProofAt(run.currRound)
for _, msgName := range msgsToFS {
instance := run.GetMessage(msgName)
run.FS.UpdateSV(instance)
}
logrus.Debugf("Fiat-shamir round %v - %v proof elements in the transcript", run.currRound, run.FS.TranscriptSize-start)
if !run.Spec.DummyCompiled {
/*
Make sure that all messages have been written and use them
to update the FS state. Note that we do not need to update
FS using the last round of the prover because he is always
the last one to "talk" in the protocol.
*/
start = run.FS.TranscriptSize
msgsToFS = run.Spec.Columns.AllKeysPublicInputAt(run.currRound)
for _, msgName := range msgsToFS {
instance := run.GetMessage(msgName)
run.FS.UpdateSV(instance)
}
logrus.Debugf("Fiat-shamir round %v - %v public inputs in the transcript", run.currRound, run.FS.TranscriptSize-start)
/*
Make sure that all messages have been written and use them
to update the FS state. Note that we do not need to update
FS using the last round of the prover because he is always
the last one to "talk" in the protocol.
*/
start := run.FS.TranscriptSize
msgsToFS := run.Spec.Columns.AllKeysProofAt(run.currRound)
for _, msgName := range msgsToFS {
instance := run.GetMessage(msgName)
run.FS.UpdateSV(instance)
}
logrus.Debugf("Fiat-shamir round %v - %v proof elements in the transcript", run.currRound, run.FS.TranscriptSize-start)
/*
Also include the prover's allegations for all evaluations
*/
start = run.FS.TranscriptSize
paramsToFS := run.Spec.QueriesParams.AllKeysAt(run.currRound)
for _, qName := range paramsToFS {
// Implicitly, this will panic whenever we start supporting
// a new type of query params
params := run.QueriesParams.MustGet(qName)
params.UpdateFS(run.FS)
/*
Make sure that all messages have been written and use them
to update the FS state. Note that we do not need to update
FS using the last round of the prover because he is always
the last one to "talk" in the protocol.
*/
start = run.FS.TranscriptSize
msgsToFS = run.Spec.Columns.AllKeysPublicInputAt(run.currRound)
for _, msgName := range msgsToFS {
instance := run.GetMessage(msgName)
run.FS.UpdateSV(instance)
}
logrus.Debugf("Fiat-shamir round %v - %v public inputs in the transcript", run.currRound, run.FS.TranscriptSize-start)
/*
Also include the prover's allegations for all evaluations
*/
start = run.FS.TranscriptSize
paramsToFS := run.Spec.QueriesParams.AllKeysAt(run.currRound)
for _, qName := range paramsToFS {
// Implicitly, this will panic whenever we start supporting
// a new type of query params
params := run.QueriesParams.MustGet(qName)
params.UpdateFS(run.FS)
}
logrus.Debugf("Fiat-shamir round %v - %v query params in the transcript", run.currRound, run.FS.TranscriptSize-start)
}
logrus.Debugf("Fiat-shamir round %v - %v query params in the transcript", run.currRound, run.FS.TranscriptSize-start)
// Increment the number of rounds
run.currRound++

View File

@@ -153,38 +153,41 @@ func (run *VerifierRuntime) generateAllRandomCoins() {
"closing"
*/
toBeConsumed := run.Spec.Coins.AllKeysAt(currRound - 1)
run.Coins.Exists(toBeConsumed...)
run.Coins.MustExists(toBeConsumed...)
/*
Make sure that all messages have been written and use them
to update the FS state. Note that we do not need to update
FS using the last round of the prover because he is always
the last one to "talk" in the protocol.
*/
msgsToFS := run.Spec.Columns.AllKeysProofAt(currRound - 1)
for _, msgName := range msgsToFS {
instance := run.GetColumn(msgName)
logrus.Tracef("VERIFIER : Update fiat-shamir with proof message %v", msgName)
run.FS.UpdateSV(instance)
}
if !run.Spec.DummyCompiled {
msgsToFS = run.Spec.Columns.AllKeysPublicInputAt(currRound - 1)
for _, msgName := range msgsToFS {
instance := run.GetColumn(msgName)
logrus.Tracef("VERIFIER : Update fiat-shamir with public input %v", msgName)
run.FS.UpdateSV(instance)
}
/*
Make sure that all messages have been written and use them
to update the FS state. Note that we do not need to update
FS using the last round of the prover because he is always
the last one to "talk" in the protocol.
*/
msgsToFS := run.Spec.Columns.AllKeysProofAt(currRound - 1)
for _, msgName := range msgsToFS {
instance := run.GetColumn(msgName)
logrus.Tracef("VERIFIER : Update fiat-shamir with proof message %v", msgName)
run.FS.UpdateSV(instance)
}
/*
Also include the prover's allegations for all evaluations
*/
queries := run.Spec.QueriesParams.AllKeysAt(currRound - 1)
for _, qName := range queries {
// Implicitly, this will panic whenever we start supporting
// a new type of query params
logrus.Tracef("VERIFIER : Update fiat-shamir with query parameters %v", qName)
params := run.QueriesParams.MustGet(qName)
params.UpdateFS(run.FS)
msgsToFS = run.Spec.Columns.AllKeysPublicInputAt(currRound - 1)
for _, msgName := range msgsToFS {
instance := run.GetColumn(msgName)
logrus.Tracef("VERIFIER : Update fiat-shamir with public input %v", msgName)
run.FS.UpdateSV(instance)
}
/*
Also include the prover's allegations for all evaluations
*/
queries := run.Spec.QueriesParams.AllKeysAt(currRound - 1)
for _, qName := range queries {
// Implicitly, this will panic whenever we start supporting
// a new type of query params
logrus.Tracef("VERIFIER : Update fiat-shamir with query parameters %v", qName)
params := run.QueriesParams.MustGet(qName)
params.UpdateFS(run.FS)
}
}
}

View File

@@ -99,6 +99,9 @@ func factorizeExpression(expr *sym.Expression, iteration int) *sym.Expression {
// rankChildren ranks the children nodes of a list of parents based on which
// node has the highest number of parents in the list.
//
// The childrenSet is used as an exclusion set, the function shall not return
// children that are already in the children set.
func rankChildren(
parents []*sym.Expression,
childrenSet map[field.Element]*sym.Expression,
@@ -161,14 +164,9 @@ func findGdChildrenGroup(expr *sym.Expression) map[field.Element]*sym.Expression
for {
ranked := rankChildren(curParents, childrenSet)
// Happens when we have a lincomb of lincomb. Ideally they should be
// Can happen when we have a lincomb of lincomb. Ideally they should be
// merged during canonization.
if len(ranked) == 0 {
// Still if that happens when the children set is non-empty, it
// means an invariant was broken.
if len(childrenSet) > 0 {
panic("empty rank but the children set is non-empty")
}
return childrenSet
}

View File

@@ -1,6 +1,7 @@
package gnarkutil
import (
"encoding/binary"
"errors"
hashinterface "hash"
"math/big"
@@ -66,4 +67,19 @@ func partialChecksumLooselyPackedBytes(b []byte, buf []byte, h hashinterface.Has
// if b consists of only one "element", the result is not hashed
func ChecksumLooselyPackedBytes(b []byte, buf []byte, h hashinterface.Hash) {
partialChecksumLooselyPackedBytes(b, buf, h)
// hash the length along with the partial sum
var numBuf [8]byte
binary.BigEndian.PutUint64(numBuf[:], uint64(len(b)))
h.Reset()
h.Write(numBuf[:])
h.Write(buf)
res := h.Sum(nil)
for i := 0; i < len(buf)-len(res); i++ { // one final "packing"
buf[i] = 0
}
copy(buf[len(buf)-len(res):], res)
}

View File

@@ -26,6 +26,8 @@ func NewArithmetization(builder *wizard.Builder, settings Settings) *Arithmetiza
Settings: &settings,
}
wrapped.Define(builder)
registerMissingColumns(builder, &settings)
return &Arithmetization{
Settings: &settings,
}
@@ -41,3 +43,11 @@ func Assign(run *wizard.ProverRuntime, traceFile string) {
run,
)
}
// registerMissingColumn registers columns that exists in the arithmetization
// but are omitted in the define.go as they are unconstrained due to the hub
// being missing.
func registerMissingColumns(b *wizard.Builder, limits *Settings) {
b.RegisterCommit("shakiradata.LIMB", limits.Traces.Shakiradata)
b.RegisterCommit("blake2fmodexpdata.LIMB", limits.Traces.Blake2Fmodexpdata)
}

View File

@@ -151,12 +151,6 @@ func AssignFromCorset(traceFile string, run *wizard.ProverRuntime) {
}
logrus.Info("Done.")
logrus.Info("Parsing JSON...")
if err != nil {
utils.Panic("Could not read trace file, Corset says: `%v`", corsetErrToString(err))
}
logrus.Info("Done.")
numberOfThreads := runtime.NumCPU() / 2
cTraceFile := C.CString(traceFile)
@@ -170,7 +164,7 @@ func AssignFromCorset(traceFile string, run *wizard.ProverRuntime) {
false, // fail on missing columns in the trace
)
if trace == nil {
utils.Panic("Error while computing trace, Corset says: `%v`", corsetErrToString(err))
utils.Panic("Error while computing trace from file %v, Corset says: `%v`", traceFile, corsetErrToString(err))
}
logrus.Info("Done.")

View File

@@ -11,8 +11,14 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/selfrecursion"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/vortex"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/arithmetization"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/ecarith"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/ecdsa"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/ecpair"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/sha2"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/modexp"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/statemanager"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/statemanager/accumulator"
)
@@ -37,13 +43,13 @@ var (
// of the self-recursion currently relies on the number of limbs to be a
// power of two, we go with this one although it overshoots our security
// level target.
sisInstance = ringsis.Params{LogTwoBound: 8, LogTwoDegree: 6}
sisInstance = ringsis.Params{LogTwoBound: 16, LogTwoDegree: 6}
// This is the compilation suite in use for the full prover
fullCompilationSuite = compilationSuite{
// logdata.Log("initial-wizard"),
mimc.CompileMiMC,
compiler.Arcane(1<<10, 1<<19, true),
compiler.Arcane(1<<10, 1<<19, false),
vortex.Compile(
2,
vortex.ForceNumOpenedColumns(256),
@@ -56,7 +62,7 @@ var (
// logdata.Log("post-selfrecursion-1"),
cleanup.CleanUp,
mimc.CompileMiMC,
compiler.Arcane(1<<10, 1<<18, true),
compiler.Arcane(1<<10, 1<<18, false),
vortex.Compile(
2,
vortex.ForceNumOpenedColumns(256),
@@ -69,7 +75,7 @@ var (
// logdata.Log("post-selfrecursion-2"),
cleanup.CleanUp,
mimc.CompileMiMC,
compiler.Arcane(1<<10, 1<<16, true),
compiler.Arcane(1<<10, 1<<16, false),
vortex.Compile(
8,
vortex.ForceNumOpenedColumns(64),
@@ -82,7 +88,7 @@ var (
// logdata.Log("post-selfrecursion-3"),
cleanup.CleanUp,
mimc.CompileMiMC,
compiler.Arcane(1<<10, 1<<13, true),
compiler.Arcane(1<<10, 1<<13, false),
vortex.Compile(
8,
vortex.ForceNumOpenedColumns(64),
@@ -111,8 +117,11 @@ func FullZkEvm(tl *config.TracesLimits) *ZkEvm {
},
Statemanager: statemanager.Settings{
AccSettings: accumulator.Settings{
MaxNumProofs: merkleProofLimit,
MaxNumProofs: merkleProofLimit,
Name: "SM_ACCUMULATOR",
MerkleTreeDepth: 40,
},
MiMCCodeHashSize: tl.Rom,
},
// The compilation suite itself is hard-coded and reflects the
// actual full proof system.
@@ -124,6 +133,37 @@ func FullZkEvm(tl *config.TracesLimits) *ZkEvm {
Keccak: keccak.Settings{
MaxNumKeccakf: keccakLimit,
},
Ecdsa: ecdsa.Settings{
MaxNbEcRecover: tl.PrecompileEcrecoverEffectiveCalls,
MaxNbTx: tl.BlockTransactions,
NbInputInstance: 4,
NbCircuitInstances: utils.DivCeil(tl.PrecompileEcrecoverEffectiveCalls+tl.BlockTransactions, 4),
},
Modexp: modexp.Settings{
MaxNbInstance256: tl.PrecompileModexpEffectiveCalls,
MaxNbInstance4096: 1,
},
Ecadd: ecarith.Limits{
// 14 was found the right number to have just under 2^19 constraints
// per circuit.
NbInputInstances: utils.DivCeil(tl.PrecompileEcaddEffectiveCalls, 28),
NbCircuitInstances: 28,
},
Ecmul: ecarith.Limits{
NbCircuitInstances: utils.DivCeil(tl.PrecompileEcmulEffectiveCalls, 6),
NbInputInstances: 6,
},
Ecpair: ecpair.Limits{
NbMillerLoopInputInstances: 1,
NbMillerLoopCircuits: tl.PrecompileEcpairingMillerLoops,
NbFinalExpInputInstances: 1,
NbFinalExpCircuits: tl.PrecompileEcpairingEffectiveCalls,
NbG2MembershipInputInstances: 6,
NbG2MembershipCircuits: utils.DivCeil(tl.PrecompileEcpairingG2MembershipCalls, 6),
},
Sha2: sha2.Settings{
MaxNumSha2F: tl.PrecompileSha2Blocks,
},
}
// Initialize the Full zkEVM arithmetization

View File

@@ -109,6 +109,9 @@ func (vb *VectorBuilder) Pop() {
// RepushLast pushes a value equal to the last pushed value of `vb`
func (vb *VectorBuilder) RepushLast() {
if len(vb.slice) == 0 {
panic("attempted to repush the last item of an empty builder")
}
last := vb.slice[len(vb.slice)-1]
vb.PushField(last)
}

View File

@@ -105,16 +105,21 @@ func newAddress(comp *wizard.CompiledIOP, size int, ecRec *EcRecover, ac *antich
)
td.csTxnData(comp)
// projection from txn-data to address columns
projection.InsertProjection(comp, ifaces.QueryIDf("Project_AddressHi_TxnData"),
[]ifaces.Column{td.fromHi}, []ifaces.Column{addr.addressHi},
td.isFrom, addr.isAddressFromTxnData,
)
projection.InsertProjection(comp, ifaces.QueryIDf("Project_AddressLO_TxnData"),
[]ifaces.Column{td.fromLo}, []ifaces.Column{addr.addressLo},
td.isFrom, addr.isAddressFromTxnData,
)
// Waiting for the resolution of:
//
// https://github.com/Consensys/zkevm-monorepo/issues/3801
//
// // projection from txn-data to address columns
// projection.InsertProjection(comp, ifaces.QueryIDf("Project_AddressHi_TxnData"),
// []ifaces.Column{td.fromHi}, []ifaces.Column{addr.addressHi},
// td.isFrom, addr.isAddressFromTxnData,
// )
//
// projection.InsertProjection(comp, ifaces.QueryIDf("Project_AddressLO_TxnData"),
// []ifaces.Column{td.fromLo}, []ifaces.Column{addr.addressLo},
// td.isFrom, addr.isAddressFromTxnData,
// )
// impose that hashNum = ac.ID + 1
comp.InsertGlobal(0, ifaces.QueryIDf("Hash_NUM_IS_ID"),
@@ -218,6 +223,7 @@ func (addr *Addresses) assignAddress(
hashNum.PushInt(0)
}
}
hashNum.PadAndAssign(run)
addr.assignMainColumns(run, nbEcRecover, size, uaGnark)
addr.assignHelperColumns(run, ecRec)

View File

@@ -85,7 +85,7 @@ func (l *Settings) sizeAntichamber() int {
func newAntichamber(comp *wizard.CompiledIOP, inputs *antichamberInput) *antichamber {
settings := inputs.settings
if settings.MaxNbEcRecover+settings.MaxNbTx != settings.NbInputInstance*settings.NbCircuitInstances {
if settings.MaxNbEcRecover+settings.MaxNbTx > settings.NbInputInstance*settings.NbCircuitInstances {
utils.Panic("the number of supported instances %v should be %v + %v", settings.NbInputInstance*settings.NbCircuitInstances, settings.MaxNbEcRecover, settings.MaxNbTx)
}
size := inputs.settings.sizeAntichamber()
@@ -153,13 +153,13 @@ func newAntichamber(comp *wizard.CompiledIOP, inputs *antichamberInput) *anticha
//
// As the initial data is copied from the EC_DATA arithmetization module, then
// it has to be provided as an input.
func (ac *antichamber) assign(run *wizard.ProverRuntime, txGet TxSignatureGetter) {
func (ac *antichamber) assign(run *wizard.ProverRuntime, txGet TxSignatureGetter, nbTx int) {
var (
ecSrc = ac.Inputs.ecSource
txSource = ac.Inputs.txSource
nbActualEcRecover = ecSrc.nbActualInstances(run)
)
ac.assignAntichamber(run, nbActualEcRecover)
ac.assignAntichamber(run, nbActualEcRecover, nbTx)
ac.EcRecover.Assign(run, ecSrc)
ac.txSignature.assignTxSignature(run, nbActualEcRecover)
ac.UnalignedGnarkData.Assign(run, ac.unalignedGnarkDataSource(), txGet)
@@ -175,7 +175,7 @@ func (ac *antichamber) assign(run *wizard.ProverRuntime, txGet TxSignatureGetter
// - Source
//
// The assignment depends on the number of defined EcRecover and TxSignature instances.
func (ac *antichamber) assignAntichamber(run *wizard.ProverRuntime, nbEcRecInstances int) {
func (ac *antichamber) assignAntichamber(run *wizard.ProverRuntime, nbEcRecInstances, nbTxInstances int) {
var (
maxNbEcRecover = ac.Inputs.settings.MaxNbEcRecover
@@ -211,7 +211,7 @@ func (ac *antichamber) assignAntichamber(run *wizard.ProverRuntime, nbEcRecInsta
idxInstance++
}
for i := 0; i < ac.Inputs.settings.MaxNbTx; i++ {
for i := 0; i < nbTxInstances; i++ {
for j := 0; j < nbRowsPerTxSign; j++ {
resIsActive[nbRowsPerEcRec*nbEcRecInstances+i*nbRowsPerTxSign+j] = field.NewElement(1)
resID[nbRowsPerEcRec*nbEcRecInstances+i*nbRowsPerTxSign+j] = field.NewElement(idxInstance)

View File

@@ -91,7 +91,7 @@ func TestAntichamber(t *testing.T) {
ct.Assign(run,
"EC_DATA_CS_ECRECOVER", "EC_DATA_ID", "EC_DATA_LIMB", "EC_DATA_SUCCESS_BIT", "EC_DATA_INDEX", "EC_DATA_IS_DATA", "EC_DATA_IS_RES",
)
ac.assign(run, dummyTxSignatureGetter)
ac.assign(run, dummyTxSignatureGetter, limits.MaxNbTx)
})
if err := wizard.Verify(cmp, proof); err != nil {

View File

@@ -7,7 +7,6 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
)
var (
@@ -45,22 +44,25 @@ type ecDataSource struct {
}
func (ecSrc *ecDataSource) nbActualInstances(run *wizard.ProverRuntime) int {
var maxId uint64
csCol := ecSrc.CsEcrecover.GetColAssignment(run)
idCol := ecSrc.ID.GetColAssignment(run)
var (
count int = 0
csCol = ecSrc.CsEcrecover.GetColAssignment(run)
indexCol = ecSrc.Index.GetColAssignment(run)
isDataCol = ecSrc.IsData.GetColAssignment(run)
)
for i := 0; i < csCol.Len(); i++ {
sel := csCol.Get(i)
if sel.IsOne() {
id := idCol.Get(i)
if !id.IsUint64() {
utils.Panic("source ecrecover id must be uint64")
}
if id.Uint64() > maxId {
maxId = id.Uint64()
}
var (
cs = csCol.Get(i)
index = indexCol.Get(i)
isData = isDataCol.Get(i)
)
if cs.IsOne() && index.IsZero() && isData.IsOne() {
count++
}
}
return int(maxId) + 1
return count
}
func newEcRecover(comp *wizard.CompiledIOP, limits *Settings, src *ecDataSource) *EcRecover {
@@ -87,13 +89,21 @@ func (ec *EcRecover) Assign(run *wizard.ProverRuntime, src *ecDataSource) {
}
func (ec *EcRecover) assignFromEcDataSource(run *wizard.ProverRuntime, src *ecDataSource) {
sourceCsEcRecover := run.GetColumn(src.CsEcrecover.GetColID())
sourceID := run.GetColumn(src.ID.GetColID())
sourceLimb := run.GetColumn(src.Limb.GetColID())
sourceSuccessBit := run.GetColumn(src.SuccessBit.GetColID())
sourceIndex := run.GetColumn(src.Index.GetColID())
sourceIsData := run.GetColumn(src.IsData.GetColID())
sourceIsRes := run.GetColumn(src.IsRes.GetColID())
var (
nbInstances = src.nbActualInstances(run)
currRow = int(0)
sourceCsEcRecover = run.GetColumn(src.CsEcrecover.GetColID())
sourceID = run.GetColumn(src.ID.GetColID())
sourceLimb = run.GetColumn(src.Limb.GetColID())
sourceSuccessBit = run.GetColumn(src.SuccessBit.GetColID())
sourceIndex = run.GetColumn(src.Index.GetColID())
sourceIsData = run.GetColumn(src.IsData.GetColID())
sourceIsRes = run.GetColumn(src.IsRes.GetColID())
//
resEcRecoverID, resLimb, resSuccessBit, resEcRecoverIndex []field.Element
resEcRecoverIsData, resEcRecoverIsRes, resAuxProjectionMask []field.Element
)
if sourceCsEcRecover.Len() != sourceID.Len() ||
sourceID.Len() != sourceLimb.Len() ||
@@ -103,19 +113,24 @@ func (ec *EcRecover) assignFromEcDataSource(run *wizard.ProverRuntime, src *ecDa
sourceIsData.Len() != sourceIsRes.Len() {
panic("all source columns must have the same length")
}
var resEcRecoverID, resLimb, resSuccessBit, resEcRecoverIndex, resEcRecoverIsData, resEcRecoverIsRes, resAuxProjectionMask []field.Element
var rowEcRecoverID, rowLimb, rowSuccessBit, rowEcRecoverIndex, rowEcRecoverIsData, rowEcRecoverIsRes, rowAuxProjectionMask [nbRowsPerEcRec]field.Element
nbInstances := src.nbActualInstances(run)
for i := 0; i < nbInstances; i++ {
if i*nbRowsPerEcRecFetching >= sourceCsEcRecover.Len() {
break
}
selected := sourceCsEcRecover.Get(i * nbRowsPerEcRecFetching)
if selected.IsZero() {
continue
var (
rowEcRecoverID, rowLimb, rowSuccessBit, rowEcRecoverIndex [nbRowsPerEcRec]field.Element
rowEcRecoverIsData, rowEcRecoverIsRes, rowAuxProjectionMask [nbRowsPerEcRec]field.Element
)
// This loops
for _ = 0; currRow < sourceCsEcRecover.Len(); currRow++ {
selected := sourceCsEcRecover.Get(currRow)
if selected.IsOne() {
break
}
}
for j := 0; j < nbRowsPerEcRecFetching; j++ {
sourceIdx := i*nbRowsPerEcRecFetching + j
sourceIdx := currRow + j
rowEcRecoverID[j] = sourceID.Get(sourceIdx)
rowLimb[j] = sourceLimb.Get(sourceIdx)
rowSuccessBit[j] = sourceSuccessBit.Get(sourceIdx)
@@ -124,6 +139,11 @@ func (ec *EcRecover) assignFromEcDataSource(run *wizard.ProverRuntime, src *ecDa
rowEcRecoverIsRes[j] = sourceIsRes.Get(sourceIdx)
rowAuxProjectionMask[j] = sourceCsEcRecover.Get(sourceIdx)
}
// This ensures that the next iteration starts from the first position
// after the ECRECOVER segment we just imported.
currRow += nbRowsPerEcRecFetching
resEcRecoverID = append(resEcRecoverID, rowEcRecoverID[:]...)
resLimb = append(resLimb, rowLimb[:]...)
resSuccessBit = append(resSuccessBit, rowSuccessBit[:]...)

View File

@@ -28,8 +28,8 @@ func NewEcdsaZkEvm(
}
}
func (e *EcdsaZkEvm) Assign(run *wizard.ProverRuntime, txSig TxSignatureGetter) {
e.ant.assign(run, txSig)
func (e *EcdsaZkEvm) Assign(run *wizard.ProverRuntime, txSig TxSignatureGetter, nbTx int) {
e.ant.assign(run, txSig, nbTx)
}
func (e *EcdsaZkEvm) GetProviders() []generic.GenericByteModule {

View File

@@ -96,29 +96,38 @@ func (d *UnalignedGnarkData) Assign(run *wizard.ProverRuntime, src *unalignedGna
}
func (d *UnalignedGnarkData) assignUnalignedGnarkData(run *wizard.ProverRuntime, src *unalignedGnarkDataSource, txSigs TxSignatureGetter) {
// copies data from the ecrecover part and txn part. Then it also computes
// the public key values and stores them in the corresponding rows.
sourceSource := run.GetColumn(src.Source.GetColID())
sourceIsActive := run.GetColumn(src.IsActive.GetColID())
sourceLimb := run.GetColumn(src.Limb.GetColID())
sourceSuccessBit := run.GetColumn(src.SuccessBit.GetColID())
sourceTxHashHi := run.GetColumn(src.TxHashHi.GetColID())
sourceTxHashLo := run.GetColumn(src.TxHashLo.GetColID())
var (
sourceSource = run.GetColumn(src.Source.GetColID())
sourceIsActive = run.GetColumn(src.IsActive.GetColID())
sourceLimb = run.GetColumn(src.Limb.GetColID())
sourceSuccessBit = run.GetColumn(src.SuccessBit.GetColID())
sourceTxHashHi = run.GetColumn(src.TxHashHi.GetColID())
sourceTxHashLo = run.GetColumn(src.TxHashLo.GetColID())
)
if sourceSource.Len() != d.size || sourceIsActive.Len() != d.size || sourceLimb.Len() != d.size || sourceSuccessBit.Len() != d.size || sourceTxHashHi.Len() != d.size || sourceTxHashLo.Len() != d.size {
panic("unexpected source length")
}
var resIsPublicKey, resGnarkIndex, resGnarkPkIndex, resGnarkData []field.Element
txCount := 0
for i := 0; i < d.size; {
isActive := sourceIsActive.Get(i)
source := sourceSource.Get(i)
rows := make([]field.Element, nbRowsPerGnarkPushing)
var buf [32]byte
var prehashedMsg [32]byte
r, s, v := new(big.Int), new(big.Int), new(big.Int)
var err error
var prependZeroCount uint
var (
isActive = sourceIsActive.Get(i)
source = sourceSource.Get(i)
rows = make([]field.Element, nbRowsPerGnarkPushing)
buf [32]byte
prehashedMsg [32]byte
r, s, v = new(big.Int), new(big.Int), new(big.Int)
err error
prependZeroCount uint
)
if isActive.IsOne() && source.Cmp(&SOURCE_ECRECOVER) == 0 {
prependZeroCount = nbRowsPerEcRecFetching
// we copy the data from ecrecover
@@ -169,7 +178,7 @@ func (d *UnalignedGnarkData) assignUnalignedGnarkData(run *wizard.ProverRuntime,
copy(prehashedMsg[16:], txLowBts[16:])
r, s, v, err = txSigs(prehashedMsg[:])
if err != nil {
utils.Panic("error getting tx signature: %v", err)
utils.Panic("error getting tx-signature err=%v, txNum=%v", err, txCount)
}
v.FillBytes(buf[:])
rows[6].SetBytes(buf[:16])
@@ -182,6 +191,7 @@ func (d *UnalignedGnarkData) assignUnalignedGnarkData(run *wizard.ProverRuntime,
rows[11].SetBytes(buf[16:])
i += NB_TX_INPUTS
txCount++
} else {
// we have run out of inputs.
break
@@ -193,7 +203,7 @@ func (d *UnalignedGnarkData) assignUnalignedGnarkData(run *wizard.ProverRuntime,
}
err = pk.RecoverFrom(prehashedMsg[:], uint(v.Uint64()-27), r, s)
if err != nil {
utils.Panic("error recovering public key: %v", err)
utils.Panic("error recovering public: err=%v v=%v r=%v s=%v", err.Error(), v.Uint64()-27, r.String(), s.String())
}
pkx := pk.A.X.Bytes()
rows[0].SetBytes(pkx[:16])

View File

@@ -198,12 +198,13 @@ func ImportAndPad(comp *wizard.CompiledIOP, inp ImportAndPadInputs, numRows int)
func (imp *importation) Run(run *wizard.ProverRuntime) {
var (
srcData = imp.Inputs.Src.Data
hashNum = srcData.HashNum.GetColAssignment(run).IntoRegVecSaveAlloc()
limbs = srcData.Limb.GetColAssignment(run).IntoRegVecSaveAlloc()
nBytes = srcData.NBytes.GetColAssignment(run).IntoRegVecSaveAlloc()
index = srcData.Index.GetColAssignment(run).IntoRegVecSaveAlloc()
toHash = srcData.ToHash.GetColAssignment(run).IntoRegVecSaveAlloc()
sha2Count = 0
srcData = imp.Inputs.Src.Data
hashNum = srcData.HashNum.GetColAssignment(run).IntoRegVecSaveAlloc()
limbs = srcData.Limb.GetColAssignment(run).IntoRegVecSaveAlloc()
nBytes = srcData.NBytes.GetColAssignment(run).IntoRegVecSaveAlloc()
index = srcData.Index.GetColAssignment(run).IntoRegVecSaveAlloc()
toHash = srcData.ToHash.GetColAssignment(run).IntoRegVecSaveAlloc()
iab = importationAssignmentBuilder{
HashNum: common.NewVectorBuilder(imp.HashNum),
@@ -237,13 +238,17 @@ func (imp *importation) Run(run *wizard.ProverRuntime) {
for i := range hashNum {
if toHash[i].IsZero() {
if i == len(hashNum)-1 {
// The condition of sha2Count addresses the case were sha2 is never
// called.
if sha2Count > 0 && i == len(hashNum)-1 {
imp.padder.pushPaddingRows(currByteSize, &iab)
}
continue
}
sha2Count++
if i > 0 && currHashNum != hashNum[i] && !currHashNum.IsZero() {
imp.padder.pushPaddingRows(currByteSize, &iab)
}

View File

@@ -1,8 +1,6 @@
package gen_acc
import (
"fmt"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/common/vector"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
@@ -154,10 +152,6 @@ func (info *GenericInfoAccumulator) Run(run *wizard.ProverRuntime) {
}
}
for i := range sFilters {
fmt.Printf("sFilter[%v] %v\n", i, vector.Prettify(sFilters[i]))
}
run.AssignColumn(info.Provider.HashHi.GetColID(), smartvectors.RightZeroPadded(sHashHi, info.size))
run.AssignColumn(info.Provider.HashLo.GetColID(), smartvectors.RightZeroPadded(sHashLo, info.size))

View File

@@ -85,7 +85,7 @@ func NewKeccakOverBlocks(comp *wizard.CompiledIOP, inp KeccakOverBlockInputs) *K
// thus, we need to check that the blocks in keccakf matches the one from base conversion.
// blocks in keccakf are the spaghetti form of LaneX.
inpSpaghetti := spaghettifier.SpaghettificationInput{
Name: "KECCAK",
Name: "KECCAK_OVER_BLOCKS",
ContentMatrix: [][]ifaces.Column{keccakf.Blocks[:]},
Filter: isBlock(keccakf.IO.IsBlock),
SpaghettiSize: bcForBlock.LaneX.Size(),

View File

@@ -64,6 +64,7 @@ func NewKeccakSingleProvider(comp *wizard.CompiledIOP, inp KeccakSingleProviderI
IsNewHash: imported.IsNewHash,
IsActive: imported.IsActive,
},
Name: "KECCAK",
}
packing = packing.NewPack(comp, inpPck)

View File

@@ -6,6 +6,7 @@
package keccak
import (
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/generic"
gen_acc "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/acc_module"
@@ -93,9 +94,9 @@ func getShakiraArithmetization(comp *wizard.CompiledIOP) generic.GenericByteModu
},
Info: generic.GenInfoModule{
HashNum: comp.Columns.GetHandle("shakiradata.ID"),
HashLo: comp.Columns.GetHandle("shakira.LIMB"),
HashHi: comp.Columns.GetHandle("shakira.LIMB"),
IsHashLo: comp.Columns.GetHandle("shakiradata.SELECTOR_KECCAK_RES_LO"),
HashLo: comp.Columns.GetHandle("shakiradata.LIMB"),
HashHi: comp.Columns.GetHandle("shakiradata.LIMB"),
IsHashLo: column.Shift(comp.Columns.GetHandle("shakiradata.SELECTOR_KECCAK_RES_HI"), -1),
IsHashHi: comp.Columns.GetHandle("shakiradata.SELECTOR_KECCAK_RES_HI"),
},
}

View File

@@ -35,8 +35,9 @@ type block struct {
func newBlock(comp *wizard.CompiledIOP, inp blockInput) block {
var (
name = inp.lanes.Inputs.pckInp.Name
size = inp.lanes.Size
createCol = common.CreateColFn(comp, BLOCK, size)
createCol = common.CreateColFn(comp, BLOCK+"_"+name, size)
isLaneActive = inp.lanes.IsLaneActive
nbLanesPerBlock = inp.param.NbOfLanesPerBlock()
)
@@ -52,7 +53,7 @@ func newBlock(comp *wizard.CompiledIOP, inp blockInput) block {
// constraints over accNumLanes (accumulate backward)
// accNumLane[last] =isLaneActive[last]
comp.InsertLocal(0, ifaces.QueryIDf("AccNumLane_Last"),
comp.InsertLocal(0, ifaces.QueryIDf(name+"_AccNumLane_Last"),
sym.Sub(column.Shift(b.accNumLane, -1),
column.Shift(isLaneActive, -1)),
)
@@ -69,20 +70,22 @@ func newBlock(comp *wizard.CompiledIOP, inp blockInput) block {
b.accNumLane,
)
comp.InsertGlobal(0, ifaces.QueryIDf("AccNumLane_Glob"),
expr)
comp.InsertGlobal(0, ifaces.QueryIDf(name+"_AccNumLane_Glob"), expr)
// isBlockComplete[0] = 1
// NB: this guarantees that the total sum of nybtes ,given via imported.Nbytes,
// indeed divides the blockSize.
// This fact can be used to guarantee that enough zeroes where padded during padding.
comp.InsertLocal(
0, ifaces.QueryIDf("IsBlockComplete"),
sym.Sub(1, b.IsBlockComplete),
0, ifaces.QueryIDf(name+"_IsBlockComplete"),
sym.Mul(
isLaneActive,
sym.Sub(1, b.IsBlockComplete),
),
)
// if isFirstLaneOfNewHash = 1 then isBlockComplete = 1.
comp.InsertGlobal(0, ifaces.QueryIDf("EACH_HASH_HAS_COMPLETE_BLOCKS"),
comp.InsertGlobal(0, ifaces.QueryIDf(name+"_EACH_HASH_HAS_COMPLETE_BLOCKS"),
sym.Mul(
inp.lanes.IsFirstLaneOfNewHash,
sym.Sub(1, b.IsBlockComplete),

View File

@@ -45,6 +45,9 @@ func makeTestCaseBlockModule(uc generic.HashingUsecase) (
IsLaneActive: isActive,
Size: size,
IsFirstLaneOfNewHash: isFirstLaneOfHash,
Inputs: &laneRepackingInputs{
pckInp: PackingInput{Name: "TEST"},
},
},
param: uc,
}

View File

@@ -22,6 +22,7 @@ type decompositionInputs struct {
// the are used to determine the number of slices and their lengths.
param generic.HashingUsecase
cleaningCtx cleaningCtx
Name string
}
// Decomposition struct stores all the intermediate columns required to constraint correct decomposition.
@@ -109,7 +110,7 @@ func newDecomposition(comp *wizard.CompiledIOP, inp decompositionInputs) decompo
// declare the native columns
func (decomposed *decomposition) insertCommit(comp *wizard.CompiledIOP) {
createCol := common.CreateColFn(comp, DECOMPOSITION, decomposed.size)
createCol := common.CreateColFn(comp, DECOMPOSITION+"_"+decomposed.Inputs.Name, decomposed.size)
for x := 0; x < decomposed.nbSlices; x++ {
decomposed.decomposedLimbs = append(decomposed.decomposedLimbs, createCol("Decomposed_Limbs", x))
decomposed.decomposedLen = append(decomposed.decomposedLen, createCol("Decomposed_Len", x))
@@ -141,11 +142,11 @@ func (decomposed *decomposition) csDecomposLen(
// Equivalence of "decomposedLenPowers" with "2^(decomposedLen * 8)"
comp.InsertInclusion(0,
ifaces.QueryIDf("Decomposed_Len_Powers_%v", j), []ifaces.Column{lu.colNumber, lu.colPowers},
ifaces.QueryIDf("%v_Decomposed_Len_Powers_%v", decomposed.Inputs.Name, j), []ifaces.Column{lu.colNumber, lu.colPowers},
[]ifaces.Column{decomposed.decomposedLen[j], decomposed.decomposedLenPowers[j]})
}
// \sum_i decomposedLen[i]=NByte
comp.InsertGlobal(0, ifaces.QueryIDf("DecomposedLen_IsNByte"), sym.Sub(s, imported.NByte))
comp.InsertGlobal(0, ifaces.QueryIDf("%v_DecomposedLen_IsNByte", decomposed.Inputs.Name), sym.Sub(s, imported.NByte))
}
@@ -159,7 +160,7 @@ func (decomposed *decomposition) csDecomposition(
cleanLimb = sym.Add(sym.Mul(cleanLimb, decomposed.decomposedLenPowers[k]), decomposed.decomposedLimbs[k])
}
comp.InsertGlobal(0, ifaces.QueryIDf("Decompose_CleanLimbs"), sym.Sub(cleanLimb, cleanLimbs))
comp.InsertGlobal(0, ifaces.QueryIDf("Decompose_CleanLimbs_%v", decomposed.Inputs.Name), sym.Sub(cleanLimb, cleanLimbs))
}
// / Constraints over the form of filter and decomposedLen;
@@ -170,7 +171,7 @@ func (decomposed decomposition) csFilter(comp *wizard.CompiledIOP) {
// s.resIsZero = 1 iff decomposedLen = 0
decomposed.resIsZero[j], decomposed.paIsZero[j] = iszero.IsZero(comp, decomposed.decomposedLen[j])
// s.filter = (1 - s.resIsZero), this enforces filters to be binary.
comp.InsertGlobal(0, ifaces.QueryIDf("%v_%v", "IS_NON_ZERO", j),
comp.InsertGlobal(0, ifaces.QueryIDf("%v_%v_%v", decomposed.Inputs.Name, "IS_NON_ZERO", j),
sym.Sub(decomposed.filter[j],
sym.Sub(1, decomposed.resIsZero[j])),
)
@@ -178,7 +179,7 @@ func (decomposed decomposition) csFilter(comp *wizard.CompiledIOP) {
// filter[0] = 1 over is Active.
// this ensures that the first slice of the limb falls in the first column.
comp.InsertGlobal(0, "FIRST_SLICE_IN_FIRST_COLUMN",
comp.InsertGlobal(0, ifaces.QueryIDf("%v_FIRST_SLICE_IN_FIRST_COLUMN", decomposed.Inputs.Name),
sym.Sub(
decomposed.filter[0], decomposed.isActive),
)
@@ -222,6 +223,7 @@ func getDecompositionInputs(cleaning cleaningCtx, pckParam PackingInput) decompo
decInp := decompositionInputs{
cleaningCtx: cleaning,
param: pckParam.PackingParam,
Name: pckParam.Name,
}
return decInp
}

View File

@@ -20,6 +20,8 @@ type cleaningInputs struct {
// lookup table used for storing powers of 2^8,
// removing the redundant zeroes from Limbs.
lookup lookUpTables
// Name gives additional context for the input name
Name string
}
// cleaningCtx stores all the intermediate columns required for imposing the constraints.
@@ -38,7 +40,7 @@ type cleaningCtx struct {
// NewClean imposes the constraint for cleaning the limbs.
func NewClean(comp *wizard.CompiledIOP, inp cleaningInputs) cleaningCtx {
createCol := common.CreateColFn(comp, CLEANING, inp.imported.Limb.Size())
createCol := common.CreateColFn(comp, CLEANING+"_"+inp.Name, inp.imported.Limb.Size())
ctx := cleaningCtx{
CleanLimb: createCol("CleanLimb"),
nbZeros: createCol("NbZeroes"),
@@ -54,7 +56,7 @@ func NewClean(comp *wizard.CompiledIOP, inp cleaningInputs) cleaningCtx {
// impose the cleaning of limbs
limb := sym.Mul(ctx.powersNbZeros, ctx.CleanLimb)
comp.InsertGlobal(0, ifaces.QueryIDf("LimbCleaning"),
comp.InsertGlobal(0, ifaces.QueryIDf("LimbCleaning_%v", inp.Name),
sym.Sub(limb, inp.imported.Limb),
)
@@ -72,7 +74,7 @@ func (ctx cleaningCtx) csNbZeros(comp *wizard.CompiledIOP) {
)
// Equivalence of "PowersNbZeros" with "2^(NbZeros * 8)"
comp.InsertInclusion(0, ifaces.QueryIDf("NumToPowers"),
comp.InsertInclusion(0, ifaces.QueryIDf("NumToPowers_%v", ctx.Inputs.Name),
[]ifaces.Column{ctx.Inputs.lookup.colNumber, ctx.Inputs.lookup.colPowers},
[]ifaces.Column{ctx.nbZeros, ctx.powersNbZeros},
)
@@ -80,7 +82,7 @@ func (ctx cleaningCtx) csNbZeros(comp *wizard.CompiledIOP) {
// The constraint for nbZeros = (MaxBytes - NByte)* isActive
nbZeros := sym.Sub(MAXNBYTE, nByte)
comp.InsertGlobal(0, ifaces.QueryIDf("NbZeros"),
comp.InsertGlobal(0, ifaces.QueryIDf("NB_ZEROES_%v", ctx.Inputs.Name),
sym.Mul(
sym.Sub(
nbZeros, ctx.nbZeros),
@@ -129,6 +131,15 @@ func (ctx *cleaningCtx) assignNbZeros(run *wizard.ProverRuntime) {
var a big.Int
for row := 0; row < len(nByte); row++ {
b := nByte[row]
// @alex: it is possible that the "imported" is returning "inactive"
// zones when using Sha2.
if b.IsZero() {
nbZeros.PushZero()
powersNbZeros.PushOne()
continue
}
res.Sub(&fr16, &b)
nbZeros.PushField(res)
res.BigInt(&a)
@@ -140,10 +151,11 @@ func (ctx *cleaningCtx) assignNbZeros(run *wizard.ProverRuntime) {
powersNbZeros.PadAndAssign(run, field.One())
}
// it generates CleaningInputs
func getCleaningInputs(imported Importation, lookup lookUpTables) cleaningInputs {
// newCleaningInputs constructs CleaningInputs
func newCleaningInputs(imported Importation, lookup lookUpTables, name string) cleaningInputs {
return cleaningInputs{
imported: imported,
lookup: lookup,
Name: name,
}
}

View File

@@ -35,7 +35,7 @@ func makeTestCaseCleaningModule(uc generic.HashingUsecase) (
comp := build.CompiledIOP
imported = createImportationColumns(comp, size)
lookup := NewLookupTables(comp)
cleaning = NewClean(comp, getCleaningInputs(imported, lookup))
cleaning = NewClean(comp, newCleaningInputs(imported, lookup, "TEST"))
}
prover = func(run *wizard.ProverRuntime) {
var (

View File

@@ -1,13 +1,14 @@
package dedicated
import (
"strconv"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
sym "github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
@@ -37,7 +38,8 @@ The column IsMax indicate where the accumulator reaches the max,
type accumulateUpToMax struct {
Inputs AccumulatorInputs
// It is 1 when the accumulator reaches the max value.
IsMax ifaces.Column
IsMax ifaces.Column
IsActive ifaces.Column
// the ProverAction for IsZero()
pa wizard.ProverAction
// It accumulate the elements from ColA.
@@ -46,16 +48,18 @@ type accumulateUpToMax struct {
Size int
}
func AccumulateUpToMax(comp *wizard.CompiledIOP, maxValue int, colA ifaces.Column) *accumulateUpToMax {
func AccumulateUpToMax(comp *wizard.CompiledIOP, maxValue int, colA, isActive ifaces.Column) *accumulateUpToMax {
var (
uniqueID = strconv.Itoa(len(comp.ListCommitments()))
size = colA.Size()
createCol = common.CreateColFn(comp, "ACCUMULATE_UP_TO_MAX", size)
createCol = common.CreateColFn(comp, "ACCUMULATE_UP_TO_MAX_"+uniqueID, size)
)
acc := &accumulateUpToMax{
Inputs: AccumulatorInputs{MaxValue: maxValue,
ColA: colA},
Accumulator: createCol("Accumulator"),
IsActive: isActive,
Size: size,
}
@@ -63,19 +67,19 @@ func AccumulateUpToMax(comp *wizard.CompiledIOP, maxValue int, colA ifaces.Colum
// Constraints over the accumulator
// Accumulator[last] =ColA[last]
comp.InsertLocal(0, ifaces.QueryIDf("AccCLDLenSpaghetti_Loc"),
symbolic.Sub(
comp.InsertLocal(0, ifaces.QueryIDf("AccCLDLenSpaghetti_Loc_"+uniqueID),
sym.Sub(
column.Shift(acc.Accumulator, -1), column.Shift(acc.Inputs.ColA, -1),
),
)
// Accumulator[i] = Accumulator[i+1]*(1-acc.IsMax[i+1]) +ColA[i]; i standing for row-index.
res := symbolic.Sub(1, column.Shift(acc.IsMax, 1)) // 1-acc.IsMax[i+1]
res := sym.Sub(1, column.Shift(acc.IsMax, 1)) // 1-acc.IsMax[i+1]
comp.InsertGlobal(0, ifaces.QueryIDf("AccCLDLenSpaghetti_Glob"),
symbolic.Sub(
symbolic.Add(
symbolic.Mul(
comp.InsertGlobal(0, ifaces.QueryIDf("AccCLDLenSpaghetti_Glob_"+uniqueID),
sym.Sub(
sym.Add(
sym.Mul(
column.Shift(acc.Accumulator, 1), res),
acc.Inputs.ColA),
acc.Accumulator,
@@ -83,8 +87,8 @@ func AccumulateUpToMax(comp *wizard.CompiledIOP, maxValue int, colA ifaces.Colum
)
// IsMax[0] = 1
comp.InsertLocal(0, "IS_1_AT_POS_0",
sym.Sub(acc.IsMax, 1),
comp.InsertLocal(0, ifaces.QueryID("IS_1_AT_POS_0_"+uniqueID),
sym.Sub(acc.IsMax, acc.IsActive),
)
return acc

View File

@@ -6,6 +6,7 @@ import (
"testing"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column/verifiercol"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
@@ -32,7 +33,7 @@ func makeTestCaseLaneAlloc() (
colA := comp.InsertCommit(0, ifaces.ColIDf("COL_A"), size)
acc = AccumulateUpToMax(comp, maxValue, colA)
acc = AccumulateUpToMax(comp, maxValue, colA, verifiercol.NewConstantCol(field.One(), size))
}
prover = func(run *wizard.ProverRuntime) {

View File

@@ -2,6 +2,7 @@ package dedicated
import (
"slices"
"strconv"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
@@ -43,10 +44,11 @@ type lengthConsistency struct {
func LengthConsistency(comp *wizard.CompiledIOP, inp LcInputs) *lengthConsistency {
var (
name = strconv.Itoa(len(comp.ListCommitments()))
numCol = len(inp.Table)
size = inp.Table[0].Size()
numBytes = inp.MaxLen
createCol = common.CreateColFn(comp, "LENGTH_CONSISTENCY", size)
createCol = common.CreateColFn(comp, "LENGTH_CONSISTENCY_"+name, size)
)
res := &lengthConsistency{
@@ -82,7 +84,7 @@ func LengthConsistency(comp *wizard.CompiledIOP, inp LcInputs) *lengthConsistenc
// bytesLen is binary
commonconstraints.MustBeBinary(comp, res.bytesLen[j][k])
}
comp.InsertGlobal(0, ifaces.QueryIDf("CLDLen_%v", j), sym.Sub(sum, inp.TableLen[j]))
comp.InsertGlobal(0, ifaces.QueryIDf("%v_CLDLen_%v", name, j), sym.Sub(sum, inp.TableLen[j]))
}
return res
}

View File

@@ -44,11 +44,11 @@ type laneRepacking struct {
func newLane(comp *wizard.CompiledIOP, spaghetti spaghettiCtx, pckInp PackingInput) laneRepacking {
var (
size = utils.NextPowerOfTwo(pckInp.PackingParam.NbOfLanesPerBlock() * pckInp.MaxNumBlocks)
createCol = common.CreateColFn(comp, LANE, size)
createCol = common.CreateColFn(comp, LANE+"_"+pckInp.Name, size)
isFirstSliceOfNewHash = spaghetti.newHashSp
maxValue = pckInp.PackingParam.LaneSizeBytes()
decomposedLenSp = spaghetti.decLenSp
pa = dedicated.AccumulateUpToMax(comp, maxValue, decomposedLenSp)
pa = dedicated.AccumulateUpToMax(comp, maxValue, decomposedLenSp, spaghetti.filterSpaghetti)
spaghettiSize = spaghetti.spaghettiSize
)
@@ -61,7 +61,7 @@ func newLane(comp *wizard.CompiledIOP, spaghetti spaghettiCtx, pckInp PackingInp
Lanes: createCol("Lane"),
IsFirstLaneOfNewHash: createCol("IsFirstLaneOfNewHash"),
IsLaneActive: createCol("IsLaneActive"),
coeff: comp.InsertCommit(0, ifaces.ColIDf("Coefficient"), spaghettiSize),
coeff: comp.InsertCommit(0, ifaces.ColIDf("Coefficient_"+pckInp.Name), spaghettiSize),
paAccUpToMax: pa,
isLaneComplete: pa.IsMax,
@@ -81,7 +81,7 @@ func newLane(comp *wizard.CompiledIOP, spaghetti spaghettiCtx, pckInp PackingInp
// constraints over isFirstLaneOfNewHash
// Project the isFirstLaneOfNewHash from isFirstSliceOfNewHash
projection.InsertProjection(comp, ifaces.QueryIDf("Project_IsFirstLaneOfHash"),
projection.InsertProjection(comp, ifaces.QueryIDf("Project_IsFirstLaneOfHash_"+pckInp.Name),
[]ifaces.Column{isFirstSliceOfNewHash},
[]ifaces.Column{l.IsFirstLaneOfNewHash},
l.isLaneComplete, l.IsLaneActive)
@@ -98,7 +98,7 @@ func (l *laneRepacking) csCoeff(comp *wizard.CompiledIOP, s spaghettiCtx) {
// coeff[last-active-row] = 1
comp.InsertGlobal(
0, ifaces.QueryIDf("Coeff_In_Last_Active_Row"),
0, ifaces.QueryIDf("%v_Coeff_In_Last_Active_Row", l.Inputs.pckInp.Name),
sym.Mul(isActive,
sym.Sub(column.Shift(isActive, 1), 1),
sym.Sub(l.coeff,
@@ -107,7 +107,7 @@ func (l *laneRepacking) csCoeff(comp *wizard.CompiledIOP, s spaghettiCtx) {
// coeff[last] = 1 // to cover the case where; last-active-row == last-row
comp.InsertLocal(
0, ifaces.QueryIDf("Coeff-In_Last_Row"),
0, ifaces.QueryIDf("%v_Coeff-In_Last_Row", l.Inputs.pckInp.Name),
sym.Sub(column.Shift(l.coeff, -1), 1),
)
@@ -119,7 +119,7 @@ func (l *laneRepacking) csCoeff(comp *wizard.CompiledIOP, s spaghettiCtx) {
)
res = sym.Add(res, column.Shift(l.isLaneComplete, 1))
expr := sym.Mul(sym.Sub(l.coeff, res), column.Shift(isActive, 1))
comp.InsertGlobal(0, ifaces.QueryIDf("Coefficient_Glob"), expr)
comp.InsertGlobal(0, ifaces.QueryIDf("%v_Coefficient_Glob", l.Inputs.pckInp.Name), expr)
}
// It declares the constraints over the lanes
@@ -128,14 +128,14 @@ func (l *laneRepacking) csRecomposeToLanes(comp *wizard.CompiledIOP, s spaghetti
// compute the partitioned inner product
//ipTaker[i] = (decomposedLimbs[i] * coeff[i]) + ipTracker[i+1]* (1- isLaneComplete[i+1])
// Constraints on the Partitioned Inner-Products
ipTracker := dedicated.InsertPartitionedIP(comp, "PIP_For_LaneRePacking",
ipTracker := dedicated.InsertPartitionedIP(comp, l.Inputs.pckInp.Name+"_PIP_For_LaneRePacking",
s.decLimbSp,
l.coeff,
l.isLaneComplete,
)
// Project the lanes from ipTracker over the Lane column.
projection.InsertProjection(comp, ifaces.QueryIDf("ProjectOverLanes"),
projection.InsertProjection(comp, ifaces.QueryIDf("%v_ProjectOverLanes", l.Inputs.pckInp.Name),
[]ifaces.Column{ipTracker},
[]ifaces.Column{l.Lanes},
l.isLaneComplete, l.IsLaneActive,
@@ -202,8 +202,9 @@ func (l *laneRepacking) assignLane(run *wizard.ProverRuntime) {
blocks, flag = l.getBlocks(run, l.Inputs.pckInp)
)
var f field.Element
if len(flag) != len(blocks) {
utils.Panic("should have one flag per block")
utils.Panic("should have one flag per block numFlags=%v numBlocks=%v", len(flag), len(blocks))
}
for k, block := range blocks {
@@ -290,5 +291,14 @@ func (l *laneRepacking) getBlocks(run *wizard.ProverRuntime, inp PackingInput) (
utils.Panic("the number of the blocks %v passes the limit %v", ctr, inp.MaxNumBlocks)
}
}
// This corresponds to the edge-case were no blocks are being processed. In
// that situation we can simply return empty lists. This addressment is
// necessary because by default, [isFirstBlockOfHash] is initialized with
// one value while the blocks are not.
if len(block) == 0 {
return [][]byte{}, []int{}
}
return block, isFirstBlockOfHash
}

View File

@@ -52,6 +52,7 @@ type PackingInput struct {
// The columns in Imported should be of size;
// size := utils.NextPowerOfTwo(packingParam.blockSize * maxNumBlocks)
Imported Importation
Name string
}
// Packing implements the [wizard.ProverAction] receiving the limbs and relevant parameters,
@@ -85,7 +86,7 @@ func NewPack(comp *wizard.CompiledIOP, inp PackingInput) *Packing {
var (
isNewHash = inp.Imported.IsNewHash
lookup = NewLookupTables(comp)
cleaning = NewClean(comp, getCleaningInputs(inp.Imported, lookup))
cleaning = NewClean(comp, newCleaningInputs(inp.Imported, lookup, inp.Name))
decomposed = newDecomposition(comp, getDecompositionInputs(cleaning, inp))
spaghetti = spaghettiMaker(comp, decomposed, isNewHash)
lanes = newLane(comp, spaghetti, inp)
@@ -138,7 +139,7 @@ func spaghettiMaker(comp *wizard.CompiledIOP, decomposed decomposition, isNewHas
// Constraints over the spaghetti forms
inp := spaghettifier.SpaghettificationInput{
Name: SPAGHETTI,
Name: decomposed.Inputs.Name,
ContentMatrix: [][]ifaces.Column{
decomposed.decomposedLimbs,
decomposed.decomposedLen,

View File

@@ -38,6 +38,7 @@ func makeTestCasePackingModule(uc generic.HashingUsecase) (
MaxNumBlocks: maxNumBlock,
PackingParam: uc,
Imported: imported,
Name: "TESTING",
}
pck = NewPack(comp, inp)

View File

@@ -3,6 +3,7 @@
package sha2
import (
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/projection"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
@@ -53,16 +54,16 @@ func NewSha2ZkEvm(comp *wizard.CompiledIOP, s Settings) *Sha2SingleProvider {
},
Info: generic.GenInfoModule{
HashNum: comp.Columns.GetHandle("shakiradata.ID"),
HashLo: comp.Columns.GetHandle("shakira.LIMB"),
HashHi: comp.Columns.GetHandle("shakira.LIMB"),
IsHashLo: comp.Columns.GetHandle("shakiradata.SELECTOR_SHA2_RES_LO"),
HashLo: comp.Columns.GetHandle("shakiradata.LIMB"),
HashHi: comp.Columns.GetHandle("shakiradata.LIMB"),
IsHashLo: column.Shift(comp.Columns.GetHandle("shakiradata.SELECTOR_SHA2_RES_HI"), -1),
IsHashHi: comp.Columns.GetHandle("shakiradata.SELECTOR_SHA2_RES_HI"),
},
},
})
}
// newSha2SingleProvider implements the utilities for proving keccak hash
// newSha2SingleProvider implements the utilities for proving sha2 hash
// over the streams which are encoded inside a set of structs [generic.GenDataModule].
// It calls;
// - Padding module to insure the correct padding of the streams.
@@ -94,6 +95,7 @@ func newSha2SingleProvider(comp *wizard.CompiledIOP, inp Sha2SingleProviderInput
IsNewHash: imported.IsNewHash,
IsActive: imported.IsActive,
},
Name: "SHA2",
}
packing = packing.NewPack(comp, inpPck)
@@ -107,7 +109,7 @@ func newSha2SingleProvider(comp *wizard.CompiledIOP, inp Sha2SingleProviderInput
Selector: packing.Repacked.IsLaneActive,
IsFirstLaneOfNewHash: packing.Repacked.IsFirstLaneOfNewHash,
}
cSha2 = newSha2BlockModule(comp, cSha2Inp)
cSha2 = newSha2BlockModule(comp, cSha2Inp).WithCircuit(comp)
)
projection.InsertProjection(comp, "SHA2_RES_HI",
@@ -138,7 +140,7 @@ func newSha2SingleProvider(comp *wizard.CompiledIOP, inp Sha2SingleProviderInput
return m
}
// It implements [wizard.ProverAction] for keccak.
// It implements [wizard.ProverAction] for sha2.
func (m *Sha2SingleProvider) Run(run *wizard.ProverRuntime) {
// assign ImportAndPad module

View File

@@ -22,7 +22,7 @@ const (
// nbInstancePerCircuit256 and nbInstancePerCircuit4096 state how many
// instance of modexp are taken care of by a single gnark circuit in the
// "small" variant (256 bits) or the "large" variant (4096 bits)
nbInstancePerCircuit256, nbInstancePerCircuit4096 = 20, 1
nbInstancePerCircuit256, nbInstancePerCircuit4096 = 10, 1
)
// Module implements the wizard part responsible for checking the MODEXP

View File

@@ -3,6 +3,7 @@ package modexp
import (
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
)
@@ -35,35 +36,43 @@ func (mod *Module) Assign(run *wizard.ProverRuntime) {
for currPosition := 0; currPosition < len(limbs); {
if isModexp[currPosition].IsOne() {
isLarge := false
for k := 0; k < modexpNumRowsPerInstance; k++ {
if k%32 < 30 && !limbs[currPosition+k].IsZero() {
isLarge = true
break
}
}
for k := 0; k < modexpNumRowsPerInstance; k++ {
builder.isActive.PushOne()
builder.isSmall.PushBoolean(!isLarge)
builder.isLarge.PushBoolean(isLarge)
builder.limbs.PushField(limbs[currPosition+k])
if !isLarge && k%32 >= 30 {
builder.toSmallCirc.PushOne()
} else {
builder.toSmallCirc.PushZero()
}
}
currPosition += modexpNumRowsPerInstance
if isModexp[currPosition].IsZero() {
currPosition++
continue
}
currPosition++
// This sanity-check is purely defensive and will indicate that we
// missed the start of a Modexp instance
if len(limbs)-currPosition < modexpNumRowsPerInstance {
utils.Panic("A new modexp is starting but there is not enough rows (currPosition=%v len(ecdata.Limb)=%v)", currPosition, len(limbs))
}
isLarge := false
// An instance is considered large if any of the operand has more than
// 2 16-bytes limbs.
for k := 0; k < modexpNumRowsPerInstance; k++ {
if k%32 < 30 && !limbs[currPosition+k].IsZero() {
isLarge = true
break
}
}
for k := 0; k < modexpNumRowsPerInstance; k++ {
builder.isActive.PushOne()
builder.isSmall.PushBoolean(!isLarge)
builder.isLarge.PushBoolean(isLarge)
builder.limbs.PushField(limbs[currPosition+k])
if !isLarge && k%32 >= 30 {
builder.toSmallCirc.PushOne()
} else {
builder.toSmallCirc.PushZero()
}
}
currPosition += modexpNumRowsPerInstance
}
builder.isActive.PadAndAssign(run, field.Zero())

View File

@@ -2,10 +2,11 @@ package execution_data_collector
import (
"fmt"
arith "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/publicInput/arith_struct"
"strings"
"testing"
arith "github.com/consensys/zkevm-monorepo/prover/zkevm/prover/publicInput/arith_struct"
"github.com/consensys/zkevm-monorepo/prover/crypto/mimc"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
@@ -181,6 +182,7 @@ func TestExecutionDataCollectorAndHash(t *testing.T) {
IsNewHash: padding.IsNewHash,
IsActive: padding.IsActive,
},
Name: "TESTING",
}
// create a new packing module
packingMod = pack.NewPack(b.CompiledIOP, packingInp)

View File

@@ -67,6 +67,8 @@ func NewPublicInputZkEVM(comp *wizard.CompiledIOP, settings *Settings, ss *state
return comp.Columns.GetHandle(ifaces.ColID(s))
}
settings.Name = "PUBLIC_INPUT"
return newPublicInput(
comp,
&InputModules{
@@ -91,7 +93,7 @@ func NewPublicInputZkEVM(comp *wizard.CompiledIOP, settings *Settings, ss *state
},
RlpTxn: &arith.RlpTxn{
AbsTxNum: getCol("rlptxn.ABS_TX_NUM"),
AbsTxNumMax: getCol("rlptxn.ABS_TX_NUM_MAX"),
AbsTxNumMax: getCol("rlptxn.ABS_TX_NUM_INFINY"),
ToHashByProver: getCol("rlptxn.TO_HASH_BY_PROVER"),
Limb: getCol("rlptxn.LIMB"),
NBytes: getCol("rlptxn.nBYTES"),
@@ -107,8 +109,8 @@ func NewPublicInputZkEVM(comp *wizard.CompiledIOP, settings *Settings, ss *state
AbsLogNum: getCol("loginfo.ABS_LOG_NUM"),
AbsLogNumMax: getCol("loginfo.ABS_LOG_NUM_MAX"),
Ct: getCol("loginfo.CT"),
OutgoingHi: getCol("loginfo.ADDRESS_HI"),
OutgoingLo: getCol("loginfo.ADDRESS_LO"),
OutgoingHi: getCol("loginfo.ADDR_HI"),
OutgoingLo: getCol("loginfo.ADDR_LO"),
TxEmitsLogs: getCol("loginfo.TXN_EMITS_LOGS"),
},
StateSummary: ss,
@@ -194,6 +196,7 @@ func newPublicInput(
IsNewHash: padding.IsNewHash,
IsActive: padding.IsActive,
},
Name: "EXECUTION_DATA_MIMC",
}
packingMod := pack.NewPack(comp, packingInp)

View File

@@ -19,16 +19,18 @@ func lookupStateSummaryCodeHash(comp *wizard.CompiledIOP,
// Lookup between code hashes (Keccak and MiMC) between state summary initial account and MiMC code hash module
comp.InsertInclusionDoubleConditional(round,
ifaces.QueryID("LOOKUP_MIMC_CODEHASH_INITIAL_ACCOUNT_INTO_STATE_SUMMARY"),
[]ifaces.Column{codeHash.CodeHashHi, codeHash.CodeHashLo, codeHash.NewState, codeHash.CodeSize},
[]ifaces.Column{accountPeek.Initial.KeccakCodeHash.Hi, accountPeek.Initial.KeccakCodeHash.Lo, accountPeek.Initial.MiMCCodeHash, accountPeek.Initial.CodeSize},
[]ifaces.Column{codeHash.CodeHashHi, codeHash.CodeHashLo, codeHash.NewState, codeHash.CodeSize},
accountPeek.Initial.ExistsAndHasNonEmptyCodeHash,
codeHash.IsHashEnd,
accountPeek.Initial.Exists)
)
// Lookup between code hashes (Keccak and MiMC) between state summary final account and MiMC code hash module
comp.InsertInclusionDoubleConditional(round,
ifaces.QueryIDf("LOOKUP_MIMC_CODEHASH_FINAL_ACCOUNT_INTO_STATE_SUMMARY"),
[]ifaces.Column{codeHash.CodeHashHi, codeHash.CodeHashLo, codeHash.NewState, codeHash.CodeSize},
[]ifaces.Column{accountPeek.Final.KeccakCodeHash.Hi, accountPeek.Final.KeccakCodeHash.Lo, accountPeek.Final.MiMCCodeHash, accountPeek.Final.CodeSize},
[]ifaces.Column{codeHash.CodeHashHi, codeHash.CodeHashLo, codeHash.NewState, codeHash.CodeSize},
accountPeek.Final.ExistsAndHasNonEmptyCodeHash,
codeHash.IsHashEnd,
accountPeek.Final.Exists)
)
}

View File

@@ -66,7 +66,7 @@ func (mh *Module) Assign(run *wizard.ProverRuntime) {
for i := 0; i < length; i++ {
if cfi[i+1].IsZero() && !cfi[i].IsZero() {
if !cfi[i].IsZero() && ((i+1 == length) || cfi[i+1].IsZero()) {
// This is the last row in the active area of the rom input.
// We assign one more row to make the assignment of the last row
// for other columns below work correctly, we exclude codeHash and

View File

@@ -56,6 +56,39 @@ func NewStateManager(comp *wizard.CompiledIOP, settings Settings) *StateManager
return sm
}
// NewStateManager instantiate the [StateManager] module but ignores the
// connection with the Hub columns.
func NewStateManagerNoHub(comp *wizard.CompiledIOP, settings Settings) *StateManager {
sm := &StateManager{
StateSummary: statesummary.NewModule(comp, settings.stateSummarySize()),
accumulator: accumulator.NewModule(comp, settings.AccSettings),
mimcCodeHash: mimccodehash.NewModule(comp, mimccodehash.Inputs{
Name: "MiMCCodeHash",
Size: settings.MiMCCodeHashSize,
}),
}
sm.accumulatorSummaryConnector = *accumulatorsummary.NewModule(
comp,
accumulatorsummary.Inputs{
Name: "ACCUMULATOR_SUMMARY",
Accumulator: sm.accumulator,
},
)
sm.accumulatorSummaryConnector.ConnectToStateSummary(comp, &sm.StateSummary)
sm.mimcCodeHash.ConnectToRom(comp, rom(comp), romLex(comp))
// Waiting for the resolution of #
//
// https://github.com/Consensys/zkevm-monorepo/issues/3798
//
// lookupStateSummaryCodeHash(comp, &sm.StateSummary.Account, &sm.mimcCodeHash)
return sm
}
// Assign assignes the submodules of the state-manager. It requires the
// arithmetization columns to be assigned first.
func (sm *StateManager) Assign(run *wizard.ProverRuntime, shomeiTraces [][]statemanager.DecodedTrace) {

View File

@@ -2,6 +2,7 @@ package statesummary
import (
"github.com/consensys/zkevm-monorepo/prover/backend/execution/statemanager"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated/byte32cmp"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
@@ -11,6 +12,10 @@ import (
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
)
var (
emptyCodeHash = statemanager.EmptyCodeHash(statemanager.MIMC_CONFIG)
)
// AccountPeek contains the view of the State-summary module regarding accounts.
// Namely, it stores all the account-related columns: the peeked address, the
// initial account and the final account.
@@ -45,8 +50,8 @@ type AccountPeek struct {
// ComputeAddressHash is responsible for computing the AddressHash
ComputeAddressHash wizard.ProverAction
// AddressLimbs stores the limbs of the address
AddressLimbs byte32cmp.LimbColumns
// AddressHashLimbs stores the limbs of the address
AddressHashLimbs byte32cmp.LimbColumns
// ComputeAddressLimbs computes the [AddressLimbs] column.
ComputeAddressLimbs wizard.ProverAction
@@ -123,17 +128,17 @@ func newAccountPeek(comp *wizard.CompiledIOP, size int) AccountPeek {
},
)
accPeek.AddressLimbs, accPeek.ComputeAddressLimbs = byte32cmp.Decompose(
accPeek.AddressHashLimbs, accPeek.ComputeAddressLimbs = byte32cmp.Decompose(
comp,
accPeek.Address,
10, // numLimbs so that we have 20 bytes
accPeek.AddressHash,
16, // numLimbs so that we have 20 bytes
16, // number of bits per limbs (= 2 bytes)
)
accPeek.HasGreaterAddressAsPrev, accPeek.HasSameAddressAsPrev, _, accPeek.ComputeAddressComparison = byte32cmp.CmpMultiLimbs(
comp,
accPeek.AddressLimbs,
accPeek.AddressLimbs.Shift(-1),
accPeek.AddressHashLimbs,
accPeek.AddressHashLimbs.Shift(-1),
)
return accPeek
@@ -147,6 +152,11 @@ type Account struct {
Exists, Nonce, Balance, MiMCCodeHash, CodeSize, StorageRoot ifaces.Column
// KeccakCodeHash stores the keccak code hash of the account.
KeccakCodeHash HiLoColumns
// HasEmptyCodeHash is an indicator column indicating whether the current
// account has an empty codehash
HasEmptyCodeHash ifaces.Column
CptHasEmptyCodeHash wizard.ProverAction
ExistsAndHasNonEmptyCodeHash ifaces.Column
}
// newAccount returns a new AccountPeek with initialized and unconstrained
@@ -161,15 +171,44 @@ func newAccount(comp *wizard.CompiledIOP, size int, name string) Account {
)
}
return Account{
Exists: createCol("EXISTS"),
Nonce: createCol("NONCE"),
Balance: createCol("BALANCE"),
MiMCCodeHash: createCol("MIMC_CODEHASH"),
CodeSize: createCol("CODESIZE"),
StorageRoot: createCol("STORAGE_ROOT"),
KeccakCodeHash: newHiLoColumns(comp, size, name+"_KECCAK_CODE_HASH"),
acc := Account{
Exists: createCol("EXISTS"),
Nonce: createCol("NONCE"),
Balance: createCol("BALANCE"),
MiMCCodeHash: createCol("MIMC_CODEHASH"),
CodeSize: createCol("CODESIZE"),
StorageRoot: createCol("STORAGE_ROOT"),
KeccakCodeHash: newHiLoColumns(comp, size, name+"_KECCAK_CODE_HASH"),
ExistsAndHasNonEmptyCodeHash: createCol("EXISTS_AND_NON_EMPTY_CODEHASH"),
}
// There is no need for an IsActive mask here because the column will be
// multiplied by Exists which is already zero when inactive.
acc.HasEmptyCodeHash, acc.CptHasEmptyCodeHash = dedicated.IsZero(comp, acc.CodeSize)
comp.InsertGlobal(
0,
ifaces.QueryIDf("STATE_SUMMARY_%v_CPT_EXIST_AND_NONEMPTY_CODE", name),
sym.Sub(
acc.ExistsAndHasNonEmptyCodeHash,
sym.Mul(
sym.Sub(1, acc.HasEmptyCodeHash),
acc.Exists,
),
),
)
comp.InsertGlobal(
0,
ifaces.QueryIDf("STATE_SUMMARY_%v_MIMC_CODEHASH_FOR_EXISTING_BUT_EMPTY_CODE", name),
sym.Mul(
acc.Exists,
acc.HasEmptyCodeHash,
sym.Sub(acc.MiMCCodeHash, *new(field.Element).SetBytes(emptyCodeHash[:])),
),
)
return acc
}
// accountPeekAssignmentBuilder is a convenience structure storing column
@@ -193,19 +232,21 @@ func newAccountPeekAssignmentBuilder(ap *AccountPeek) accountPeekAssignmentBuild
type accountAssignmentBuilder struct {
exists, nonce, balance, miMCCodeHash, codeSize, storageRoot *common.VectorBuilder
keccakCodeHash hiLoAssignmentBuilder
existsAndHasNonEmptyCodeHash *common.VectorBuilder
}
// newAccountAssignmentBuilder returns a new [accountAssignmentBuilder] bound
// to an [Account].
func newAccountAssignmentBuilder(ap *Account) accountAssignmentBuilder {
return accountAssignmentBuilder{
exists: common.NewVectorBuilder(ap.Exists),
nonce: common.NewVectorBuilder(ap.Nonce),
balance: common.NewVectorBuilder(ap.Balance),
miMCCodeHash: common.NewVectorBuilder(ap.MiMCCodeHash),
codeSize: common.NewVectorBuilder(ap.CodeSize),
storageRoot: common.NewVectorBuilder(ap.StorageRoot),
keccakCodeHash: newHiLoAssignmentBuilder(ap.KeccakCodeHash),
exists: common.NewVectorBuilder(ap.Exists),
nonce: common.NewVectorBuilder(ap.Nonce),
balance: common.NewVectorBuilder(ap.Balance),
miMCCodeHash: common.NewVectorBuilder(ap.MiMCCodeHash),
codeSize: common.NewVectorBuilder(ap.CodeSize),
storageRoot: common.NewVectorBuilder(ap.StorageRoot),
existsAndHasNonEmptyCodeHash: common.NewVectorBuilder(ap.ExistsAndHasNonEmptyCodeHash),
keccakCodeHash: newHiLoAssignmentBuilder(ap.KeccakCodeHash),
}
}
@@ -230,6 +271,7 @@ func (ss *accountAssignmentBuilder) pushAll(acc types.Account) {
ss.codeSize.PushInt(int(acc.CodeSize))
ss.miMCCodeHash.PushBytes32(acc.MimcCodeHash)
ss.storageRoot.PushBytes32(acc.StorageRoot)
ss.existsAndHasNonEmptyCodeHash.PushBoolean(accountExists && acc.CodeSize > 0)
}
// pushOverrideStorageRoot is as [accountAssignmentBuilder.pushAll] but allows
@@ -257,6 +299,7 @@ func (ss *accountAssignmentBuilder) pushOverrideStorageRoot(
ss.codeSize.PushInt(int(acc.CodeSize))
ss.miMCCodeHash.PushBytes32(acc.MimcCodeHash)
ss.storageRoot.PushBytes32(storageRoot)
ss.existsAndHasNonEmptyCodeHash.PushBoolean(accountExists && acc.CodeSize > 0)
}
// PadAndAssign terminates the receiver by padding all the columns representing
@@ -270,4 +313,5 @@ func (ss *accountAssignmentBuilder) PadAndAssign(run *wizard.ProverRuntime) {
ss.miMCCodeHash.PadAndAssign(run)
ss.storageRoot.PadAndAssign(run)
ss.codeSize.PadAndAssign(run)
ss.existsAndHasNonEmptyCodeHash.PadAndAssign(run)
}

View File

@@ -2,6 +2,7 @@ package statesummary
import (
"github.com/consensys/zkevm-monorepo/prover/maths/field"
"github.com/consensys/zkevm-monorepo/prover/protocol/column"
"github.com/consensys/zkevm-monorepo/prover/protocol/dedicated"
"github.com/consensys/zkevm-monorepo/prover/protocol/ifaces"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
@@ -22,6 +23,10 @@ type AccumulatorStatement struct {
// flags indicating the type of traces being deferred to the accumulator
// module.
IsReadZero, IsReadNonZero, IsInsert, IsUpdate, IsDelete ifaces.Column
// SameTypeAsBefore is an indicator column indicating whether the current
// row has the same type of shomei operation as the previous one.
SameTypeAsBefore ifaces.Column
CptSameTypeAsBefore wizard.ProverAction
// StateDiff contains the relevant accumulator values
smCommon.StateDiff
@@ -66,6 +71,22 @@ func newAccumulatorStatement(comp *wizard.CompiledIOP, size int, name string) Ac
sym.Sub(res.StateDiff.FinalHVal, field.Zero()),
)
res.SameTypeAsBefore, res.CptSameTypeAsBefore = dedicated.IsZero(
comp,
sym.Sub(
sym.Add(
res.IsInsert,
res.IsUpdate,
res.IsDelete,
),
sym.Add(
column.Shift(res.IsInsert, -1),
column.Shift(res.IsUpdate, -1),
column.Shift(res.IsDelete, -1),
),
),
)
return res
}
@@ -149,3 +170,6 @@ func (as *AccumulatorStatementAssignmentBuilder) PadAndAssign(run *wizard.Prover
as.IsDelete.PadAndAssign(run)
as.SummaryBuilder.PadAndAssign(run)
}
// Type returns a code to identify the type of trace as a symbolic expression
//

View File

@@ -119,6 +119,15 @@ func (ss *stateSummaryAssignmentBuilder) pushBlockTraces(batchNumber int, traces
}
if newAddress != curAddress {
// This addresses the case where the segment is Read|ReadZero. In
// that situation, the account trace is at the beginning of the
// segment. When that happens, we want to be sure that the
// storage rows and the account segment arise in the same position.
if len(subSegment.storageTraces) > 0 {
curSegment[len(curSegment)-1].storageTraces = subSegment.storageTraces
subSegment = accountSubSegmentWitness{}
}
ss.pushAccountSegment(batchNumber, curSegment)
curSegment = accountSegmentWitness{}
curAddress = newAddress
@@ -135,6 +144,10 @@ func (ss *stateSummaryAssignmentBuilder) pushBlockTraces(batchNumber int, traces
subSegment.storageTraces = append(subSegment.storageTraces, trace)
}
if len(subSegment.storageTraces) > 0 {
curSegment[len(curSegment)-1].storageTraces = subSegment.storageTraces
}
ss.pushAccountSegment(batchNumber, curSegment)
}
@@ -301,15 +314,20 @@ func (ss *stateSummaryAssignmentBuilder) finalize(run *wizard.ProverRuntime) {
}
runConcurrent([]wizard.ProverAction{
ss.StateSummary.Account.Initial.CptHasEmptyCodeHash,
ss.StateSummary.Account.Final.CptHasEmptyCodeHash,
ss.StateSummary.Account.ComputeAddressHash,
ss.StateSummary.Account.ComputeAddressLimbs,
ss.StateSummary.Account.ComputeHashFinal,
ss.StateSummary.Account.ComputeHashInitial,
ss.StateSummary.Storage.ComputeKeyLimbsHi,
ss.StateSummary.Storage.ComputeKeyLimbsLo,
ss.StateSummary.Storage.ComputeKeyHash,
ss.StateSummary.Storage.ComputeOldValueHash,
ss.StateSummary.Storage.ComputeNewValueHash,
ss.StateSummary.AccumulatorStatement.CptSameTypeAsBefore,
})
runConcurrent([]wizard.ProverAction{
ss.StateSummary.Account.ComputeAddressLimbs,
ss.StateSummary.Storage.ComputeKeyLimbs,
})
runConcurrent([]wizard.ProverAction{

View File

@@ -628,9 +628,36 @@ func (ss *Module) csStoragePeek(comp *wizard.CompiledIOP) {
sym.Mul(
ss.IsStorage,
column.Shift(ss.IsStorage, -1),
ss.AccumulatorStatement.SameTypeAsBefore, // Note: we observed that shomei was in fact sorting by type and then by storage key
sym.Sub(1, ss.Storage.KeyIncreased),
),
)
diffRoW := sym.Sub(
sym.Add(
ss.AccumulatorStatement.IsInsert,
ss.AccumulatorStatement.IsUpdate,
ss.AccumulatorStatement.IsDelete,
),
sym.Add(
column.Shift(ss.AccumulatorStatement.IsInsert, -1),
column.Shift(ss.AccumulatorStatement.IsUpdate, -1),
column.Shift(ss.AccumulatorStatement.IsDelete, -1),
),
)
comp.InsertGlobal(
0,
ifaces.QueryIDf("STATE_SUMMARY_STORAGE_READS_THEN_WRITE"),
sym.Mul(
column.Shift(ss.IsStorage, -1),
ss.IsStorage,
sym.Sub(
sym.Mul(diffRoW, diffRoW),
diffRoW,
),
),
)
}
// csAccumulatorStatementFlags constraints the accumulator statement's flags.

View File

@@ -4,8 +4,10 @@ import (
"fmt"
"testing"
"github.com/consensys/zkevm-monorepo/prover/backend/execution/statemanager"
"github.com/consensys/zkevm-monorepo/prover/protocol/compiler/dummy"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils/types"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/statemanager/common"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/statemanager/mock"
)
@@ -46,7 +48,76 @@ func TestStateSummaryInternal(t *testing.T) {
t.Fatalf("verification failed: %v", err)
}
})
}
}
func TestStateSummaryReadZeroShomei(t *testing.T) {
var (
addresses = []types.EthAddress{
types.DummyAddress(32),
types.DummyAddress(64),
types.DummyAddress(23),
types.DummyAddress(54),
}
storageKeys = []types.FullBytes32{
types.DummyFullByte(102),
types.DummyFullByte(1002),
types.DummyFullByte(1012),
types.DummyFullByte(1023),
}
storageValues = []types.FullBytes32{
types.DummyFullByte(202),
types.DummyFullByte(2002),
types.DummyFullByte(2012),
types.DummyFullByte(2023),
}
)
state := mock.State{}
state.InsertContract(addresses[2], types.DummyBytes32(67), types.DummyFullByte(56), 100)
state.InsertContract(addresses[3], types.DummyBytes32(76), types.DummyFullByte(57), 102)
state.SetStorage(addresses[2], storageKeys[0], storageValues[0])
state.SetStorage(addresses[2], storageKeys[1], storageValues[1])
state.SetStorage(addresses[3], storageKeys[2], storageValues[2])
state.SetStorage(addresses[3], storageKeys[3], storageValues[3])
var (
shomeiState = mock.InitShomeiState(state)
logs = mock.NewStateLogBuilder(15, state).
WithAddress(addresses[2]).
IncNonce().
WithAddress(addresses[3]).
ReadStorage(storageKeys[2]).
ReadStorage(storageKeys[3]).
Done()
shomeiTraces = mock.StateLogsToShomeiTraces(shomeiState, logs)
ss Module
)
// Shuffle the logs to ensure they will be in the same order as shomei's
newTraces := [][]statemanager.DecodedTrace{make([]statemanager.DecodedTrace, 4)}
newTraces[0][0] = shomeiTraces[0][0]
newTraces[0][1] = shomeiTraces[0][3]
newTraces[0][2] = shomeiTraces[0][1]
newTraces[0][3] = shomeiTraces[0][2]
define := func(b *wizard.Builder) {
ss = NewModule(b.CompiledIOP, 1<<6)
}
prove := func(run *wizard.ProverRuntime) {
ss.Assign(run, newTraces)
}
comp := wizard.Compile(define, dummy.Compile)
proof := wizard.Prove(comp, prove)
err := wizard.Verify(comp, proof)
if err != nil {
t.Fatalf("verification failed: %v", err)
}
}

View File

@@ -33,9 +33,9 @@ type StoragePeek struct {
// KeyLimbs represents the key in limb decomposition.
KeyLimbs byte32cmp.LimbColumns
// ComputeKeyLimbsHi and ComputeKeyLimbLo are responsible for computing the
// ComputeKeyLimbs and ComputeKeyLimbLo are responsible for computing the
// "hi" and the "lo" limbs of the KeyLimbs.
ComputeKeyLimbsHi, ComputeKeyLimbsLo wizard.ProverAction
ComputeKeyLimbs wizard.ProverAction
// KeyIncreased is a column indicating whether the current storage
// key is strictly greater than the previous one.
@@ -115,10 +115,7 @@ func newStoragePeek(comp *wizard.CompiledIOP, size int, name string) StoragePeek
sym.Sub(res.NewValueHash, hashOfZeroStorage()),
)
var keyLimbsHi, keyLimbsLo byte32cmp.LimbColumns
keyLimbsHi, res.ComputeKeyLimbsHi = byte32cmp.Decompose(comp, res.Key.Hi, 16, 16)
keyLimbsLo, res.ComputeKeyLimbsLo = byte32cmp.Decompose(comp, res.Key.Lo, 16, 16)
res.KeyLimbs = byte32cmp.FuseLimbs(keyLimbsLo, keyLimbsHi)
res.KeyLimbs, res.ComputeKeyLimbs = byte32cmp.Decompose(comp, res.KeyHash, 16, 16)
res.KeyIncreased, _, _, res.ComputeKeyIncreased = byte32cmp.CmpMultiLimbs(
comp,

View File

@@ -1,6 +1,7 @@
package zkevm
import (
"fmt"
"math/big"
"github.com/consensys/zkevm-monorepo/prover/backend/ethereum"
@@ -22,5 +23,17 @@ type Witness struct {
}
func (w Witness) TxSignatureGetter(txHash []byte) (r, s, v *big.Int, err error) {
panic("unimplemented")
var (
sig, found = w.TxSignatures[[32]byte(txHash)]
)
if !found {
return nil, nil, nil, fmt.Errorf("could not find signature for tx hash = 0x%x", txHash)
}
r, _ = new(big.Int).SetString(sig.R, 0)
s, _ = new(big.Int).SetString(sig.S, 0)
v, _ = new(big.Int).SetString(sig.V, 0)
return r, s, v, nil
}

View File

@@ -1,6 +1,7 @@
package zkevm
import (
"github.com/consensys/zkevm-monorepo/prover/config"
"github.com/consensys/zkevm-monorepo/prover/protocol/serialization"
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/zkevm/arithmetization"
@@ -94,7 +95,7 @@ func newZkEVM(b *wizard.Builder, s *Settings) *ZkEvm {
comp = b.CompiledIOP
arith = arithmetization.NewArithmetization(b, s.Arithmetization)
ecdsa = ecdsa.NewEcdsaZkEvm(comp, &s.Ecdsa)
stateManager = statemanager.NewStateManager(comp, s.Statemanager)
stateManager = statemanager.NewStateManagerNoHub(comp, s.Statemanager)
keccak = keccak.NewKeccakZkEVM(comp, s.Keccak, ecdsa.GetProviders())
modexp = modexp.NewModuleZkEvm(comp, s.Modexp)
ecadd = ecarith.NewEcAddZkEvm(comp, &s.Ecadd)
@@ -129,8 +130,8 @@ func (z *ZkEvm) prove(input *Witness) (prover wizard.ProverStep) {
arithmetization.Assign(run, input.ExecTracesFPath)
// Assign the state-manager module
z.ecdsa.Assign(run, input.TxSignatureGetter, len(input.TxSignatures))
z.stateManager.Assign(run, input.SMTraces)
z.ecdsa.Assign(run, input.TxSignatureGetter)
z.keccak.Run(run)
z.modexp.Assign(run)
z.ecadd.Assign(run)
@@ -140,3 +141,9 @@ func (z *ZkEvm) prove(input *Witness) (prover wizard.ProverStep) {
z.PublicInput.Assign(run, input.L2BridgeAddress)
}
}
// Limits returns the configuration limits used to instantiate the current
// zk-EVM.
func (z *ZkEvm) Limits() *config.TracesLimits {
return z.arithmetization.Settings.Traces
}