Prover: couple of fixes betav1 (#377)

* fix(execution): a few fixes in the wizard verifier

* feat(dict): pass the dict path from config

* fix: makeBw6Proof returns circuitID instead of -1

* fix(circuitID): make bw6Proof returns the circuitID

* fix(config-testing)

* feat(config): sepolia-full uses full aggregation

* style(naming): renaming the rolling hash fields and documenting the checks in pi-interconnection

* feat: flag for target number of constraints

* fix refactoring oversight

---------

Co-authored-by: Arya Tabaie <arya.pourtabatabaie@gmail.com>
This commit is contained in:
AlexandreBelling
2024-12-10 15:55:20 +01:00
committed by GitHub
parent 8cf2866aa9
commit c511121317
22 changed files with 490 additions and 290 deletions

View File

@@ -57,7 +57,7 @@ zkevm/define/define.go: corset
## Generate the setup for the execution prover (to be run with S3 access)
##
setup: bin/prover
bin/prover setup --config ./config/config-integration-full.toml --dict ./lib/compressor/compressor_dict.bin --assets-dir ./prover-assets
bin/prover setup --config ./config/config-sepolia-full.toml --dict ./lib/compressor/compressor_dict.bin --assets-dir ./prover-assets
##
## Copy the prover assets to the S3 bucket (zkuat)
@@ -82,7 +82,7 @@ bin/controller:
bin/prover:
mkdir -p bin
rm -f $@
go build -o $@ ./cmd/prover
go build -tags debug -o $@ ./cmd/prover
##
## Compiles the state-manager inspector

View File

@@ -61,7 +61,7 @@ func makeProof(
proofBn254, err := makeBn254Proof(cfg, circuitID, proofBW6, publicInput)
if err != nil {
return "", fmt.Errorf("error when running the Bn254 proof: %w", err)
return "", fmt.Errorf("error when running the Bn254 proof circuitID=%v %w", circuitID, err)
}
return circuits.SerializeProofSolidityBn254(proofBn254), nil
@@ -105,6 +105,7 @@ func makePiProof(cfg *config.Config, cf *CollectedFields) (plonk.Proof, witness.
}
assignment, err := c.Assign(pi_interconnection.Request{
DictPath: cfg.BlobDecompression.DictPath,
Decompressions: cf.DecompressionPI,
Executions: cf.ExecutionPI,
Aggregation: cf.AggregationPublicInput(cfg),
@@ -172,12 +173,11 @@ func makeBw6Proof(
numProofClaims = len(cf.ProofClaims)
biggestAvailable = 0
bestSize = math.MaxInt
bestSetupPos = -1
bestAllowedVkForAggregation []string
)
// first we discover available setups
for _, maxNbProofs := range cfg.Aggregation.NumProofs {
for setupPos, maxNbProofs := range cfg.Aggregation.NumProofs {
biggestAvailable = max(biggestAvailable, maxNbProofs)
// That's the quickest reject condition we have
@@ -187,11 +187,11 @@ func makeBw6Proof(
}
// read the manifest and the allowed verifying keys digests
circuitID := circuits.CircuitID(fmt.Sprintf("%s-%d", string(circuits.AggregationCircuitID), maxNbProofs))
setupPath := cfg.PathForSetup(string(circuitID))
circuitIDStr := circuits.CircuitID(fmt.Sprintf("%s-%d", string(circuits.AggregationCircuitID), maxNbProofs))
setupPath := cfg.PathForSetup(string(circuitIDStr))
manifest, err := circuits.ReadSetupManifest(filepath.Join(setupPath, config.ManifestFileName))
if err != nil {
return nil, 0, fmt.Errorf("could not read the manifest for circuit %v: %w", circuitID, err)
return nil, 0, fmt.Errorf("could not read the manifest for circuit %v: %w", circuitIDStr, err)
}
allowedVkForAggregation, err := manifest.GetStringArray("allowedVkForAggregationDigests")
if err != nil {
@@ -205,6 +205,7 @@ func makeBw6Proof(
}
if maxNbProofs <= bestSize {
circuitID = setupPos
bestSize = maxNbProofs
bestAllowedVkForAggregation = allowedVkForAggregation
}
@@ -249,12 +250,12 @@ func makeBw6Proof(
ActualIndexes: pi_interconnection.InnerCircuitTypesToIndexes(&cfg.PublicInputInterconnection, cf.InnerCircuitTypes),
}
logrus.Infof("running the BW6 prover")
logrus.Infof("running the BW6 prover with circuit-ID=%v", circuitID)
proofBW6, err := aggregation.MakeProof(&setup, bestSize, cf.ProofClaims, piInfo, piBW6)
if err != nil {
return nil, 0, fmt.Errorf("could not create BW6 proof: %w", err)
}
return proofBW6, bestSetupPos, nil
return proofBW6, circuitID, nil
}
func makeBn254Proof(
@@ -279,7 +280,7 @@ func makeBn254Proof(
return nil, fmt.Errorf("could not parse the public input: %w", err)
}
logrus.Infof("running the Bn254 prover")
logrus.Infof("running the Bn254 prover circuitID=%v", circuitID)
proofBn254, err := emulation.MakeProof(&setup, circuitID, proofBw6, piBn254)
if err != nil {

View File

@@ -2,9 +2,10 @@ package execution
import (
"bytes"
public_input "github.com/consensys/linea-monorepo/prover/public-input"
"path"
public_input "github.com/consensys/linea-monorepo/prover/public-input"
"github.com/consensys/linea-monorepo/prover/backend/ethereum"
"github.com/consensys/linea-monorepo/prover/backend/execution/bridge"
"github.com/consensys/linea-monorepo/prover/backend/execution/statemanager"
@@ -200,9 +201,9 @@ func (rsp *Response) FuncInput() *public_input.Execution {
)
fi.InitialRollingHashUpdate = firstRHEvent.RollingHash
fi.FinalRollingHashUpdate = lastRHEvent.RollingHash
fi.InitialRollingHashMsgNumber = uint64(firstRHEvent.MessageNumber)
fi.FinalRollingHashMsgNumber = uint64(lastRHEvent.MessageNumber)
fi.LastRollingHashUpdate = lastRHEvent.RollingHash
fi.FirstRollingHashUpdateNumber = uint64(firstRHEvent.MessageNumber)
fi.LastRollingHashUpdateNumber = uint64(lastRHEvent.MessageNumber)
}
return fi

View File

@@ -173,7 +173,6 @@ func verifyClaimBatch(api frontend.API, vks []emVkey, claims []proofClaim) error
}
for i := 1; i < len(vks)-1; i++ { // TODO @Tabaie make sure these don't generate any constraints
fmt.Printf("checking base vk #%d/%d\n", i+1, len(vks)-1)
assertBaseKeyEquals(api, bvk, vks[i].BaseVerifyingKey)
}

View File

@@ -3,16 +3,15 @@ package main
import (
"flag"
"fmt"
"runtime"
"github.com/consensys/gnark-crypto/ecc"
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/frontend/cs/scs"
v1 "github.com/consensys/linea-monorepo/prover/circuits/blobdecompression/v1"
blob "github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v1"
"runtime"
)
const maxNbConstraints = 1 << 27
func nbConstraints(blobSize int) int {
fmt.Printf("*********************\nfor blob of size %dB or %.2fKB:\n", blobSize, float32(blobSize)/1024)
c := v1.Circuit{
@@ -22,15 +21,15 @@ func nbConstraints(blobSize int) int {
UseGkrMiMC: true,
}
runtime.GC()
if cs, err := frontend.Compile(ecc.BLS12_377.ScalarField(), scs.NewBuilder, &c, frontend.WithCapacity(maxNbConstraints*6/5)); err != nil {
if cs, err := frontend.Compile(ecc.BLS12_377.ScalarField(), scs.NewBuilder, &c, frontend.WithCapacity(*flagTargetNbConstraints*6/5)); err != nil {
panic(err)
} else {
res := cs.GetNbConstraints()
cmp := "match"
if res > maxNbConstraints {
if res > *flagTargetNbConstraints {
cmp = "over"
}
if res < maxNbConstraints {
if res < *flagTargetNbConstraints {
cmp = "under"
}
fmt.Printf("%d constraints (%s)\n", res, cmp)
@@ -39,8 +38,9 @@ func nbConstraints(blobSize int) int {
}
var (
flagCrawlStep = flag.Int("step", 1000, "the crawl step") // TODO @Tabaie fix mixed metaphor
flagStart = flag.Int("start", blob.MaxUncompressedBytes, "initial size in bytes")
flagCrawlStep = flag.Int("step", 1000, "the crawl step") // TODO @Tabaie fix mixed metaphor
flagStart = flag.Int("start", blob.MaxUncompressedBytes, "initial size in bytes")
flagTargetNbConstraints = flag.Int("target", 1<<27, "target number of constraints")
)
func main() {
@@ -49,24 +49,24 @@ func main() {
v := nbConstraints(*flagStart)
a, b := *flagStart, *flagStart
if v > maxNbConstraints {
if v > *flagTargetNbConstraints {
fmt.Println("crawling downward")
for v > maxNbConstraints {
for v > *flagTargetNbConstraints {
b = a
a = max(a-*flagCrawlStep, 0)
v = nbConstraints(a)
*flagCrawlStep *= 2
}
} else if v < maxNbConstraints {
} else if v < *flagTargetNbConstraints {
fmt.Println("crawling upward")
for v < maxNbConstraints {
for v < *flagTargetNbConstraints {
a = b
b += *flagCrawlStep
v = nbConstraints(b)
*flagCrawlStep *= 2
}
}
if v == maxNbConstraints {
if v == *flagTargetNbConstraints {
fmt.Println("wow what are the odds")
return
}
@@ -75,13 +75,13 @@ func main() {
for b > a {
m := (b + a) / 2
v = nbConstraints(m)
if v > maxNbConstraints {
if v > *flagTargetNbConstraints {
b = m
}
if v < maxNbConstraints {
if v < *flagTargetNbConstraints {
a = v
}
if v == maxNbConstraints {
if v == *flagTargetNbConstraints {
return
}
}

View File

@@ -2,6 +2,7 @@ package execution
import (
"fmt"
"github.com/consensys/gnark/frontend"
gnarkHash "github.com/consensys/gnark/std/hash"
"github.com/consensys/gnark/std/rangecheck"
@@ -14,16 +15,16 @@ import (
// FunctionalPublicInputQSnark the information on this execution that cannot be
// extracted from other input in the same aggregation batch
type FunctionalPublicInputQSnark struct {
DataChecksum frontend.Variable
L2MessageHashes L2MessageHashes
InitialBlockTimestamp frontend.Variable
FinalStateRootHash frontend.Variable
FinalBlockNumber frontend.Variable
FinalBlockTimestamp frontend.Variable
InitialRollingHashUpdate [32]frontend.Variable
InitialRollingHashMsgNumber frontend.Variable
FinalRollingHashUpdate [32]frontend.Variable
FinalRollingHashMsgNumber frontend.Variable
DataChecksum frontend.Variable
L2MessageHashes L2MessageHashes
InitialBlockTimestamp frontend.Variable
FinalStateRootHash frontend.Variable
FinalBlockNumber frontend.Variable
FinalBlockTimestamp frontend.Variable
InitialRollingHashUpdate [32]frontend.Variable
FirstRollingHashUpdateNumber frontend.Variable
FinalRollingHashUpdate [32]frontend.Variable
LastRollingHashUpdateNumber frontend.Variable
}
// L2MessageHashes is a wrapper for [Var32Slice] it is use to instantiate the
@@ -123,8 +124,8 @@ func (spiq *FunctionalPublicInputQSnark) RangeCheck(api frontend.API) {
rc.Check(spiq.FinalBlockNumber, 64)
rc.Check(spiq.FinalBlockTimestamp, 64)
rc.Check(spiq.InitialBlockTimestamp, 64)
rc.Check(spiq.InitialRollingHashMsgNumber, 64)
rc.Check(spiq.FinalRollingHashMsgNumber, 64)
rc.Check(spiq.FirstRollingHashUpdateNumber, 64)
rc.Check(spiq.LastRollingHashUpdateNumber, 64)
spiq.L2MessageHashes.RangeCheck(api)
}
@@ -139,8 +140,8 @@ func (spi *FunctionalPublicInputSnark) Sum(api frontend.API, hsh gnarkHash.Field
hsh.Reset()
hsh.Write(spi.DataChecksum, l2MessagesSum,
spi.FinalStateRootHash, spi.FinalBlockNumber, spi.FinalBlockTimestamp, finalRollingHash[0], finalRollingHash[1], spi.FinalRollingHashMsgNumber,
spi.InitialStateRootHash, spi.InitialBlockNumber, spi.InitialBlockTimestamp, initialRollingHash[0], initialRollingHash[1], spi.InitialRollingHashMsgNumber,
spi.FinalStateRootHash, spi.FinalBlockNumber, spi.FinalBlockTimestamp, finalRollingHash[0], finalRollingHash[1], spi.LastRollingHashUpdateNumber,
spi.InitialStateRootHash, spi.InitialBlockNumber, spi.InitialBlockTimestamp, initialRollingHash[0], initialRollingHash[1], spi.FirstRollingHashUpdateNumber,
spi.ChainID, spi.L2MessageServiceAddr)
return hsh.Sum()
@@ -163,10 +164,10 @@ func (spiq *FunctionalPublicInputQSnark) Assign(pi *public_input.Execution) erro
spiq.FinalStateRootHash = pi.FinalStateRootHash[:]
spiq.FinalBlockNumber = pi.FinalBlockNumber
spiq.FinalBlockTimestamp = pi.FinalBlockTimestamp
spiq.InitialRollingHashMsgNumber = pi.InitialRollingHashMsgNumber
spiq.FinalRollingHashMsgNumber = pi.FinalRollingHashMsgNumber
spiq.FirstRollingHashUpdateNumber = pi.FirstRollingHashUpdateNumber
spiq.LastRollingHashUpdateNumber = pi.LastRollingHashUpdateNumber
utils.Copy(spiq.FinalRollingHashUpdate[:], pi.FinalRollingHashUpdate[:])
utils.Copy(spiq.FinalRollingHashUpdate[:], pi.LastRollingHashUpdate[:])
utils.Copy(spiq.InitialRollingHashUpdate[:], pi.InitialRollingHashUpdate[:])
return spiq.L2MessageHashes.Assign(pi.L2MessageHashes)

View File

@@ -1,9 +1,10 @@
package execution
import (
"testing"
public_input "github.com/consensys/linea-monorepo/prover/public-input"
"github.com/stretchr/testify/require"
"testing"
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/std/hash/mimc"
@@ -13,14 +14,14 @@ import (
func TestPIConsistency(t *testing.T) {
pi := public_input.Execution{
L2MessageHashes: make([][32]byte, 2),
FinalBlockNumber: 4,
FinalBlockTimestamp: 5,
FinalRollingHashMsgNumber: 6,
InitialBlockNumber: 1,
InitialBlockTimestamp: 2,
InitialRollingHashMsgNumber: 3,
ChainID: 7,
L2MessageHashes: make([][32]byte, 2),
FinalBlockNumber: 4,
FinalBlockTimestamp: 5,
LastRollingHashUpdateNumber: 6,
InitialBlockNumber: 1,
InitialBlockTimestamp: 2,
FirstRollingHashUpdateNumber: 3,
ChainID: 7,
}
utils.FillRange(pi.DataChecksum[:], 10)
@@ -29,7 +30,7 @@ func TestPIConsistency(t *testing.T) {
utils.FillRange(pi.InitialStateRootHash[:], 130)
utils.FillRange(pi.InitialRollingHashUpdate[:], 170)
utils.FillRange(pi.FinalStateRootHash[:], 210)
utils.FillRange(pi.FinalRollingHashUpdate[:], 250)
utils.FillRange(pi.LastRollingHashUpdate[:], 250)
utils.FillRange(pi.L2MessageServiceAddr[:], 40)
// state root hashes are field elements

View File

@@ -20,9 +20,9 @@ func checkPublicInputs(
) {
var (
finalRollingHash = internal.CombineBytesIntoElements(api, gnarkFuncInp.FinalRollingHashUpdate)
initialRollingHash = internal.CombineBytesIntoElements(api, gnarkFuncInp.InitialRollingHashUpdate)
execDataHash = execDataHash(api, wvc, wizardFuncInp)
lastRollingHash = internal.CombineBytesIntoElements(api, gnarkFuncInp.FinalRollingHashUpdate)
firstRollingHash = internal.CombineBytesIntoElements(api, gnarkFuncInp.InitialRollingHashUpdate)
execDataHash = execDataHash(api, wvc, wizardFuncInp)
)
// As we have this issue, the execDataHash will not match what we have in the
@@ -55,18 +55,18 @@ func checkPublicInputs(
)
api.AssertIsEqual(
wvc.GetLocalPointEvalParams(wizardFuncInp.InitialRollingHash[0].ID).Y,
initialRollingHash[0],
wvc.GetLocalPointEvalParams(wizardFuncInp.FirstRollingHashUpdate[0].ID).Y,
firstRollingHash[0],
)
api.AssertIsEqual(
wvc.GetLocalPointEvalParams(wizardFuncInp.InitialRollingHash[1].ID).Y,
initialRollingHash[1],
wvc.GetLocalPointEvalParams(wizardFuncInp.FirstRollingHashUpdate[1].ID).Y,
firstRollingHash[1],
)
api.AssertIsEqual(
wvc.GetLocalPointEvalParams(wizardFuncInp.InitialRollingHashNumber.ID).Y,
gnarkFuncInp.InitialRollingHashMsgNumber,
wvc.GetLocalPointEvalParams(wizardFuncInp.FirstRollingHashUpdateNumber.ID).Y,
gnarkFuncInp.FirstRollingHashUpdateNumber,
)
api.AssertIsEqual(
@@ -85,18 +85,18 @@ func checkPublicInputs(
)
api.AssertIsEqual(
wvc.GetLocalPointEvalParams(wizardFuncInp.FinalRollingHash[0].ID).Y,
finalRollingHash[0],
wvc.GetLocalPointEvalParams(wizardFuncInp.LastRollingHashUpdate[0].ID).Y,
lastRollingHash[0],
)
api.AssertIsEqual(
wvc.GetLocalPointEvalParams(wizardFuncInp.FinalRollingHash[1].ID).Y,
finalRollingHash[1],
wvc.GetLocalPointEvalParams(wizardFuncInp.LastRollingHashUpdate[1].ID).Y,
lastRollingHash[1],
)
api.AssertIsEqual(
wvc.GetLocalPointEvalParams(wizardFuncInp.FinalRollingHashNumber.ID).Y,
gnarkFuncInp.FinalRollingHashMsgNumber,
wvc.GetLocalPointEvalParams(wizardFuncInp.LastRollingHashUpdateNumber.ID).Y,
gnarkFuncInp.LastRollingHashUpdateNumber,
)
var (
@@ -113,12 +113,22 @@ func checkPublicInputs(
)
)
// In principle, we should enforce a strict equality between the purported
// chainID and the one extracted from the traces. But in case, the executed
// block has only legacy transactions (e.g. transactions without a specified
// chainID) then the traces will return a chainID of zero.
api.AssertIsEqual(
api.Div(
api.Mul(
wvc.GetLocalPointEvalParams(wizardFuncInp.ChainID.ID).Y,
twoPow112,
api.Sub(
api.Div(
wvc.GetLocalPointEvalParams(wizardFuncInp.ChainID.ID).Y,
twoPow112,
),
gnarkFuncInp.ChainID,
),
),
gnarkFuncInp.ChainID,
0,
)
api.AssertIsEqual(bridgeAddress, gnarkFuncInp.L2MessageServiceAddr)

View File

@@ -4,9 +4,10 @@ import (
"bytes"
"encoding/base64"
"fmt"
"github.com/consensys/linea-monorepo/prover/crypto/mimc"
"hash"
"github.com/consensys/linea-monorepo/prover/crypto/mimc"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
"github.com/consensys/linea-monorepo/prover/backend/blobsubmission"
decompression "github.com/consensys/linea-monorepo/prover/circuits/blobdecompression/v1"
@@ -23,6 +24,9 @@ type Request struct {
Decompressions []blobsubmission.Response
Executions []public_input.Execution
Aggregation public_input.Aggregation
// Path to the compression dictionary. Used to extract the execution data
// for each execution.
DictPath string
}
func (c *Compiled) Assign(r Request) (a Circuit, err error) {
@@ -52,9 +56,11 @@ func (c *Compiled) Assign(r Request) (a Circuit, err error) {
return
}
dict, err := blob.GetDict() // TODO look up dict based on checksum
// @alex: We should pass that as a parameter. And also (@arya) pass a list
// of dictionnary because this function.
dict, err := blob.GetDict(r.DictPath)
if err != nil {
return
return Circuit{}, fmt.Errorf("could not find the dictionnary: path=%v err=%v", r.DictPath, err)
}
// For Shnarfs and Merkle Roots
@@ -67,6 +73,11 @@ func (c *Compiled) Assign(r Request) (a Circuit, err error) {
utils.Copy(a.ParentShnarf[:], prevShnarf)
hshM := mimc.NewMiMC()
// execDataChecksums is a list that we progressively fill to store the mimc
// hash of the executionData for every execution (conflation) batch. The
// is filled as we process the decompression proofs which store a list of
// the corresponding execution data hashes. These are then checked against
// the execution proof public inputs.
execDataChecksums := make([][]byte, 0, len(r.Executions))
shnarfs := make([][]byte, cfg.MaxNbDecompression)
// Decompression FPI
@@ -169,15 +180,17 @@ func (c *Compiled) Assign(r Request) (a Circuit, err error) {
return
}
// TODO @Tabaie combine the following two checks
// TODO @Tabaie combine the following two checks.
if len(r.Decompressions) != 0 && !bytes.Equal(shnarfs[len(r.Decompressions)-1], aggregationFPI.FinalShnarf[:]) { // first condition is an edge case for tests
err = fmt.Errorf("aggregation fails CHECK_FINAL_SHNARF:\n\tcomputed %x, given %x", shnarfs[len(r.Decompressions)-1], aggregationFPI.FinalShnarf)
return
}
if len(r.Decompressions) == 0 && !bytes.Equal(aggregationFPI.ParentShnarf[:], aggregationFPI.FinalShnarf[:]) {
err = fmt.Errorf("aggregation fails CHECK_FINAL_SHNARF:\n\tcomputed %x, given %x", aggregationFPI.ParentShnarf, aggregationFPI.FinalShnarf)
if len(r.Decompressions) == 0 || len(r.Executions) == 0 {
err = fmt.Errorf("aggregation fails NO EXECUTION OR NO COMPRESSION:\n\tnbDecompression %d, nbExecution %d", len(r.Decompressions), len(r.Executions))
return
}
aggregationFPI.NbDecompression = uint64(len(r.Decompressions))
a.AggregationFPIQSnark = aggregationFPI.ToSnarkType().AggregationFPIQSnark
@@ -185,7 +198,7 @@ func (c *Compiled) Assign(r Request) (a Circuit, err error) {
maxNbL2MessageHashes := cfg.L2MsgMaxNbMerkle * merkleNbLeaves
l2MessageHashes := make([][32]byte, 0, maxNbL2MessageHashes)
lastRollingHashUpdate, lastRollingHashMsg := aggregationFPI.LastFinalizedRollingHash, aggregationFPI.LastFinalizedRollingHashMsgNumber
lastRollingHash, lastRollingHashNumber := aggregationFPI.LastFinalizedRollingHash, aggregationFPI.LastFinalizedRollingHashMsgNumber
lastFinBlockNum, lastFinBlockTs := aggregationFPI.LastFinalizedBlockNumber, aggregationFPI.LastFinalizedBlockTimestamp
lastFinalizedStateRootHash := aggregationFPI.InitialStateRootHash
@@ -226,6 +239,10 @@ func (c *Compiled) Assign(r Request) (a Circuit, err error) {
err = fmt.Errorf("execution #%d fails CHECK_NUM_CONSEC:\n\tinitial block number %d is not right after to the last finalized %d", i, initial, lastFinBlockNum)
return
}
// This is asserted against a constant in the circuit. Thus we have
// different circuit for differents values of the msgSvcAddress and
// chainID.
if got, want := &executionFPI.L2MessageServiceAddr, &r.Aggregation.L2MessageServiceAddr; *got != *want {
err = fmt.Errorf("execution #%d fails CHECK_SVC_ADDR:\n\texpected L2 service address %x, encountered %x", i, *want, *got)
return
@@ -238,35 +255,44 @@ func (c *Compiled) Assign(r Request) (a Circuit, err error) {
err = fmt.Errorf("execution #%d fails CHECK_TIME_INCREASE:\n\tinitial block timestamp is not after the final block timestamp from previous execution %d≤%d", i, initial, lastFinBlockTs)
return
}
// @alex: This check is duplicating a check already done on the execution
// proof.
if first, last := executionFPI.InitialBlockNumber, executionFPI.FinalBlockNumber; first > last {
err = fmt.Errorf("execution #%d fails CHECK_NUM_NODECREASE:\n\tinitial block number is greater than the final block number %d>%d", i, first, last)
return
}
// @alex: This check is a duplicate of an execution proof check.
if first, last := executionFPI.InitialBlockTimestamp, executionFPI.FinalBlockTimestamp; first > last {
err = fmt.Errorf("execution #%d fails CHECK_TIME_NODECREASE:\n\tinitial block timestamp is greater than the final block timestamp %d>%d", i, first, last)
return
}
// if there is a first, there shall be a last, no lesser than the first
if executionFPI.FinalRollingHashMsgNumber < executionFPI.InitialRollingHashMsgNumber {
err = fmt.Errorf("execution #%d fails CHECK_RHASH_NODECREASE:\n\tfinal rolling hash message number %d is less than the initial %d", i, executionFPI.FinalRollingHashMsgNumber, executionFPI.InitialRollingHashMsgNumber)
if executionFPI.LastRollingHashUpdateNumber < executionFPI.FirstRollingHashUpdateNumber {
err = fmt.Errorf("execution #%d fails CHECK_RHASH_NODECREASE:\n\tfinal rolling hash message number %d is less than the initial %d", i, executionFPI.LastRollingHashUpdateNumber, executionFPI.FirstRollingHashUpdateNumber)
return
}
if (executionFPI.InitialRollingHashMsgNumber == 0) != (executionFPI.FinalRollingHashMsgNumber == 0) {
err = fmt.Errorf("execution #%d fails CHECK_RHASH_FIRSTLAST:\n\tif there is a rolling hash update there must be both a first and a last.\n\tfirst update msg num = %d, last update msg num = %d", i, executionFPI.InitialRollingHashMsgNumber, executionFPI.FinalRollingHashMsgNumber)
// @alex: This check is a duplicate of an execution proof check.
if (executionFPI.FirstRollingHashUpdateNumber == 0) != (executionFPI.LastRollingHashUpdateNumber == 0) {
err = fmt.Errorf("execution #%d fails CHECK_RHASH_FIRSTLAST:\n\tif there is a rolling hash update there must be both a first and a last.\n\tfirst update msg num = %d, last update msg num = %d", i, executionFPI.FirstRollingHashUpdateNumber, executionFPI.LastRollingHashUpdateNumber)
return
}
// TODO @Tabaie check that if the initial and final rolling hash msg nums were equal then so should the hashes, or decide not to
// consistency check and record keeping
if executionFPI.InitialRollingHashMsgNumber != 0 { // there is an update
if executionFPI.InitialRollingHashMsgNumber != lastRollingHashMsg+1 {
err = fmt.Errorf("execution #%d fails CHECK_RHASH_CONSEC:\n\tinitial rolling hash message number %d is not right after the last finalized one %d", i, executionFPI.InitialRollingHashMsgNumber, lastRollingHashMsg)
if executionFPI.FirstRollingHashUpdateNumber != 0 { // there is an update
// @alex: Not sure this check is a duplicate because we already check
// that the state root hash is well-propagated and this should be
// enough that the rolling hash update events are emitted in sequence.
if executionFPI.FirstRollingHashUpdateNumber != lastRollingHashNumber+1 {
err = fmt.Errorf("execution #%d fails CHECK_RHASH_CONSEC:\n\tinitial rolling hash message number %d is not right after the last finalized one %d", i, executionFPI.FirstRollingHashUpdateNumber, lastRollingHashNumber)
return
}
lastRollingHashMsg = executionFPI.FinalRollingHashMsgNumber
lastRollingHashUpdate = executionFPI.FinalRollingHashUpdate
lastRollingHashNumber = executionFPI.LastRollingHashUpdateNumber
lastRollingHash = executionFPI.LastRollingHashUpdate
}
lastFinBlockNum, lastFinBlockTs = executionFPI.FinalBlockNumber, executionFPI.FinalBlockTimestamp
@@ -290,13 +316,13 @@ func (c *Compiled) Assign(r Request) (a Circuit, err error) {
return
}
if lastRollingHashUpdate != aggregationFPI.FinalRollingHash {
err = fmt.Errorf("aggregation fails CHECK_FINAL_RHASH:\n\tfinal rolling hashes do not match: execution=%x, aggregation=%x", lastRollingHashUpdate, aggregationFPI.FinalRollingHash)
if lastRollingHash != aggregationFPI.FinalRollingHash {
err = fmt.Errorf("aggregation fails CHECK_FINAL_RHASH:\n\tfinal rolling hashes do not match: execution=%x, aggregation=%x", lastRollingHash, aggregationFPI.FinalRollingHash)
return
}
if lastRollingHashMsg != aggregationFPI.FinalRollingHashNumber {
err = fmt.Errorf("aggregation fails CHECK_FINAL_RHASH_NUM:\n\tfinal rolling hash numbers do not match: execution=%v, aggregation=%v", lastRollingHashMsg, aggregationFPI.FinalRollingHashNumber)
if lastRollingHashNumber != aggregationFPI.FinalRollingHashNumber {
err = fmt.Errorf("aggregation fails CHECK_FINAL_RHASH_NUM:\n\tfinal rolling hash numbers do not match: execution=%v, aggregation=%v", lastRollingHashNumber, aggregationFPI.FinalRollingHashNumber)
return
}

View File

@@ -2,10 +2,11 @@ package pi_interconnection
import (
"errors"
"github.com/sirupsen/logrus"
"math/big"
"slices"
"github.com/sirupsen/logrus"
"github.com/consensys/gnark-crypto/ecc"
"github.com/consensys/gnark/constraint"
"github.com/consensys/gnark/frontend/cs/scs"
@@ -83,6 +84,12 @@ func (c *Circuit) Define(api frontend.API) error {
nbBatchesSums := rDecompression.PartialSumsF(func(i int) frontend.Variable { return c.DecompressionFPIQ[i].NbBatches })
nbExecution := nbBatchesSums[len(nbBatchesSums)-1] // implicit: CHECK_NB_EXEC
// These two checks prevents constructing a proof where no execution or no
// compression proofs are provided. This is to prevent corner cases from
// arising.
api.AssertIsDifferent(c.NbDecompression, 0)
api.AssertIsDifferent(nbExecution, 0)
if c.MaxNbCircuits > 0 { // CHECK_CIRCUIT_LIMIT
api.AssertIsLessOrEqual(api.Add(nbExecution, c.NbDecompression), c.MaxNbCircuits)
}
@@ -156,6 +163,9 @@ func (c *Circuit) Define(api frontend.API) error {
for i, piq := range c.ExecutionFPIQ {
piq.RangeCheck(api) // CHECK_MSG_LIMIT
// inRange is a binary value indicating that the current execution
// being looked at in the current iteration is an actual execution and
// not some padding.
inRange := rExecution.InRange[i]
pi := execution.FunctionalPublicInputSnark{
@@ -166,18 +176,18 @@ func (c *Circuit) Define(api frontend.API) error {
L2MessageServiceAddr: c.L2MessageServiceAddr[:], // implicit CHECK_SVC_ADDR
}
comparator.AssertIsLessEq(pi.InitialBlockTimestamp, pi.FinalBlockTimestamp) // CHECK_TIME_NODECREASE
comparator.AssertIsLessEq(pi.InitialBlockNumber, pi.FinalBlockNumber) // CHECK_NUM_NODECREASE
comparator.AssertIsLess(finalBlockTime, pi.InitialBlockTimestamp) // CHECK_TIME_INCREASE
comparator.AssertIsLessEq(pi.InitialRollingHashMsgNumber, pi.FinalRollingHashMsgNumber) // CHECK_RHASH_NODECREASE
comparator.AssertIsLessEq(pi.InitialBlockTimestamp, pi.FinalBlockTimestamp) // CHECK_TIME_NODECREASE
comparator.AssertIsLessEq(pi.InitialBlockNumber, pi.FinalBlockNumber) // CHECK_NUM_NODECREASE
comparator.AssertIsLess(finalBlockTime, pi.InitialBlockTimestamp) // CHECK_TIME_INCREASE
comparator.AssertIsLessEq(pi.FirstRollingHashUpdateNumber, pi.LastRollingHashUpdateNumber) // CHECK_RHASH_NODECREASE
finalRhMsgNumZero := api.IsZero(piq.FinalRollingHashMsgNumber)
api.AssertIsEqual(finalRhMsgNumZero, api.IsZero(piq.InitialRollingHashMsgNumber)) // CHECK_RHASH_FIRSTLAST
finalRhMsgNumZero := api.IsZero(piq.LastRollingHashUpdateNumber)
api.AssertIsEqual(finalRhMsgNumZero, api.IsZero(piq.FirstRollingHashUpdateNumber)) // CHECK_RHASH_FIRSTLAST
rollingHashUpdated := api.Mul(inRange, api.Sub(1, finalRhMsgNumZero))
// CHECK_RHASH_CONSEC
internal.AssertEqualIf(api, rollingHashUpdated, pi.InitialRollingHashMsgNumber, api.Add(finalRollingHashMsgNum, 1))
finalRollingHashMsgNum = api.Select(rollingHashUpdated, pi.FinalRollingHashMsgNumber, finalRollingHashMsgNum)
internal.AssertEqualIf(api, rollingHashUpdated, pi.FirstRollingHashUpdateNumber, api.Add(finalRollingHashMsgNum, 1))
finalRollingHashMsgNum = api.Select(rollingHashUpdated, pi.LastRollingHashUpdateNumber, finalRollingHashMsgNum)
copy(finalRollingHash[:], internal.SelectMany(api, rollingHashUpdated, pi.FinalRollingHashUpdate[:], finalRollingHash[:]))
finalBlockTime = pi.FinalBlockTimestamp

View File

@@ -5,10 +5,11 @@ package pi_interconnection_test
import (
"encoding/base64"
"fmt"
"github.com/stretchr/testify/require"
"slices"
"testing"
"github.com/stretchr/testify/require"
"github.com/consensys/gnark-crypto/ecc"
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/frontend/cs/scs"
@@ -72,27 +73,27 @@ func TestTinyTwoBatchBlob(t *testing.T) {
}
execReq := []public_input.Execution{{
InitialBlockTimestamp: 6,
FinalStateRootHash: stateRootHashes[1],
FinalBlockNumber: 5,
FinalBlockTimestamp: 6,
FinalRollingHashUpdate: internal.Uint64To32Bytes(7),
FinalRollingHashMsgNumber: 8,
InitialRollingHashMsgNumber: 8,
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(3)},
InitialStateRootHash: stateRootHashes[0],
InitialBlockNumber: 5,
InitialBlockTimestamp: 6,
FinalStateRootHash: stateRootHashes[1],
FinalBlockNumber: 5,
FinalBlockTimestamp: 6,
LastRollingHashUpdate: internal.Uint64To32Bytes(7),
LastRollingHashUpdateNumber: 8,
FirstRollingHashUpdateNumber: 8,
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(3)},
InitialStateRootHash: stateRootHashes[0],
InitialBlockNumber: 5,
}, {
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(9)},
InitialBlockTimestamp: 7,
FinalStateRootHash: stateRootHashes[2],
FinalBlockNumber: 11,
FinalBlockTimestamp: 12,
FinalRollingHashUpdate: internal.Uint64To32Bytes(13),
FinalRollingHashMsgNumber: 14,
InitialRollingHashMsgNumber: 9,
InitialStateRootHash: stateRootHashes[1],
InitialBlockNumber: 6,
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(9)},
InitialBlockTimestamp: 7,
FinalStateRootHash: stateRootHashes[2],
FinalBlockNumber: 11,
FinalBlockTimestamp: 12,
LastRollingHashUpdate: internal.Uint64To32Bytes(13),
LastRollingHashUpdateNumber: 14,
FirstRollingHashUpdateNumber: 9,
InitialStateRootHash: stateRootHashes[1],
InitialBlockNumber: 6,
}}
blobReq := blobsubmission.Request{
@@ -120,9 +121,9 @@ func TestTinyTwoBatchBlob(t *testing.T) {
LastFinalizedBlockNumber: 4,
FinalBlockNumber: uint(execReq[1].FinalBlockNumber),
LastFinalizedL1RollingHash: utils.FmtIntHex32Bytes(13),
L1RollingHash: utils.HexEncodeToString(execReq[1].FinalRollingHashUpdate[:]),
L1RollingHash: utils.HexEncodeToString(execReq[1].LastRollingHashUpdate[:]),
LastFinalizedL1RollingHashMessageNumber: 7,
L1RollingHashMessageNumber: uint(execReq[1].FinalRollingHashMsgNumber),
L1RollingHashMessageNumber: uint(execReq[1].LastRollingHashUpdateNumber),
L2MsgRootHashes: merkleRoots,
L2MsgMerkleTreeDepth: 5,
},
@@ -135,49 +136,49 @@ func TestTwoTwoBatchBlobs(t *testing.T) {
blobs := blobtesting.ConsecutiveBlobs(t, 2, 2)
execReq := []public_input.Execution{{
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(3)},
InitialBlockTimestamp: 6,
FinalStateRootHash: internal.Uint64To32Bytes(4),
FinalBlockNumber: 5,
FinalBlockTimestamp: 6,
FinalRollingHashUpdate: internal.Uint64To32Bytes(7),
FinalRollingHashMsgNumber: 8,
InitialStateRootHash: internal.Uint64To32Bytes(1),
InitialBlockNumber: 5,
InitialRollingHashMsgNumber: 8,
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(3)},
InitialBlockTimestamp: 6,
FinalStateRootHash: internal.Uint64To32Bytes(4),
FinalBlockNumber: 5,
FinalBlockTimestamp: 6,
LastRollingHashUpdate: internal.Uint64To32Bytes(7),
LastRollingHashUpdateNumber: 8,
InitialStateRootHash: internal.Uint64To32Bytes(1),
InitialBlockNumber: 5,
FirstRollingHashUpdateNumber: 8,
}, {
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(9)},
InitialBlockTimestamp: 7,
InitialStateRootHash: internal.Uint64To32Bytes(4),
InitialBlockNumber: 6,
InitialRollingHashMsgNumber: 9,
FinalStateRootHash: internal.Uint64To32Bytes(10),
FinalBlockNumber: 11,
FinalBlockTimestamp: 12,
FinalRollingHashUpdate: internal.Uint64To32Bytes(13),
FinalRollingHashMsgNumber: 14,
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(9)},
InitialBlockTimestamp: 7,
InitialStateRootHash: internal.Uint64To32Bytes(4),
InitialBlockNumber: 6,
FirstRollingHashUpdateNumber: 9,
FinalStateRootHash: internal.Uint64To32Bytes(10),
FinalBlockNumber: 11,
FinalBlockTimestamp: 12,
LastRollingHashUpdate: internal.Uint64To32Bytes(13),
LastRollingHashUpdateNumber: 14,
}, {
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(15)},
InitialBlockTimestamp: 13,
InitialBlockNumber: 12,
InitialStateRootHash: internal.Uint64To32Bytes(10),
InitialRollingHashMsgNumber: 15,
FinalStateRootHash: internal.Uint64To32Bytes(16),
FinalBlockNumber: 17,
FinalBlockTimestamp: 18,
FinalRollingHashUpdate: internal.Uint64To32Bytes(19),
FinalRollingHashMsgNumber: 20,
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(15)},
InitialBlockTimestamp: 13,
InitialBlockNumber: 12,
InitialStateRootHash: internal.Uint64To32Bytes(10),
FirstRollingHashUpdateNumber: 15,
FinalStateRootHash: internal.Uint64To32Bytes(16),
FinalBlockNumber: 17,
FinalBlockTimestamp: 18,
LastRollingHashUpdate: internal.Uint64To32Bytes(19),
LastRollingHashUpdateNumber: 20,
}, {
InitialBlockNumber: 18,
InitialStateRootHash: internal.Uint64To32Bytes(16),
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(21)},
InitialBlockTimestamp: 19,
InitialRollingHashMsgNumber: 21,
FinalStateRootHash: internal.Uint64To32Bytes(22),
FinalBlockNumber: 23,
FinalBlockTimestamp: 24,
FinalRollingHashUpdate: internal.Uint64To32Bytes(25),
FinalRollingHashMsgNumber: 26,
InitialBlockNumber: 18,
InitialStateRootHash: internal.Uint64To32Bytes(16),
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(21)},
InitialBlockTimestamp: 19,
FirstRollingHashUpdateNumber: 21,
FinalStateRootHash: internal.Uint64To32Bytes(22),
FinalBlockNumber: 23,
FinalBlockTimestamp: 24,
LastRollingHashUpdate: internal.Uint64To32Bytes(25),
LastRollingHashUpdateNumber: 26,
}}
blobReq0 := blobsubmission.Request{
@@ -216,9 +217,9 @@ func TestTwoTwoBatchBlobs(t *testing.T) {
LastFinalizedBlockNumber: 4,
FinalBlockNumber: uint(execReq[3].FinalBlockNumber),
LastFinalizedL1RollingHash: utils.FmtIntHex32Bytes(7),
L1RollingHash: utils.HexEncodeToString(execReq[3].FinalRollingHashUpdate[:]),
L1RollingHash: utils.HexEncodeToString(execReq[3].LastRollingHashUpdate[:]),
LastFinalizedL1RollingHashMessageNumber: 7,
L1RollingHashMessageNumber: uint(execReq[3].FinalRollingHashMsgNumber),
L1RollingHashMessageNumber: uint(execReq[3].LastRollingHashUpdateNumber),
L2MsgRootHashes: merkleRoots,
L2MsgMerkleTreeDepth: 5,
},

View File

@@ -33,16 +33,16 @@ func AssignSingleBlockBlob(t require.TestingT) pi_interconnection.Request {
assert.NoError(t, err)
execReq := public_input.Execution{
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(4)},
InitialBlockTimestamp: 7,
FinalStateRootHash: finalStateRootHash,
FinalBlockNumber: 9,
FinalBlockTimestamp: 10,
FinalRollingHashUpdate: internal.Uint64To32Bytes(11),
FinalRollingHashMsgNumber: 9,
InitialRollingHashMsgNumber: 9,
InitialBlockNumber: 6,
InitialStateRootHash: internal.Uint64To32Bytes(1),
L2MessageHashes: [][32]byte{internal.Uint64To32Bytes(4)},
InitialBlockTimestamp: 7,
FinalStateRootHash: finalStateRootHash,
FinalBlockNumber: 9,
FinalBlockTimestamp: 10,
LastRollingHashUpdate: internal.Uint64To32Bytes(11),
LastRollingHashUpdateNumber: 9,
FirstRollingHashUpdateNumber: 9,
InitialBlockNumber: 6,
InitialStateRootHash: internal.Uint64To32Bytes(1),
}
merkleRoots := aggregation.PackInMiniTrees(test_utils.BlocksToHex(execReq.L2MessageHashes))
@@ -59,9 +59,9 @@ func AssignSingleBlockBlob(t require.TestingT) pi_interconnection.Request {
LastFinalizedBlockNumber: 5,
FinalBlockNumber: uint(execReq.FinalBlockNumber),
LastFinalizedL1RollingHash: utils.FmtIntHex32Bytes(7),
L1RollingHash: utils.HexEncodeToString(execReq.FinalRollingHashUpdate[:]),
L1RollingHash: utils.HexEncodeToString(execReq.LastRollingHashUpdate[:]),
LastFinalizedL1RollingHashMessageNumber: 8,
L1RollingHashMessageNumber: uint(execReq.FinalRollingHashMsgNumber),
L1RollingHashMessageNumber: uint(execReq.LastRollingHashUpdateNumber),
L2MsgRootHashes: merkleRoots,
L2MsgMerkleTreeDepth: 5,
},

View File

@@ -0,0 +1,137 @@
environment = "sepolia"
version = "4.0.0" # TODO @gbotrel hunt all version definitions.
assets_dir = "./prover-assets"
log_level = 4 # TODO @gbotrel will be refactored with new logger.
[controller]
retry_delays = [0, 1]
[execution]
prover_mode = "full"
conflated_traces_dir = "/home/ubuntu/sepolia-testing-full/traces/conflated"
requests_root_dir = "/home/ubuntu/sepolia-testing-full/prover-execution"
[blob_decompression]
prover_mode = "full"
requests_root_dir = "/home/ubuntu/sepolia-testing-full/prover-compression"
dict_path = "lib/compressor/compressor_dict.bin"
[aggregation]
prover_mode = "full"
requests_root_dir = "/home/ubuntu/sepolia-testing-full/prover-aggregation"
num_proofs = [10, 20, 50, 100, 200, 400]
allowed_inputs = ["execution-dummy", "execution", "execution-large", "blob-decompression-dummy", "blob-decompression-v0", "blob-decompression-v1"]
verifier_id = 1
[public_input_interconnection]
max_nb_decompression = 400
max_nb_execution = 400
max_nb_circuits = 400
execution_max_nb_msg = 16
l2_msg_merkle_depth = 5
l2_msg_max_nb_merkle = 200
[layer2]
chain_id = 59141
message_service_contract = "0x971e727e956690b9957be6d51Ec16E73AcAC83A7"
[traces_limits]
ADD = 524288
BIN = 262144
BLAKE_MODEXP_DATA = 16384
BLOCK_DATA = 1024
BLOCK_HASH = 512
EC_DATA = 262144
EUC = 65536
EXP = 8192
EXT = 1048576
GAS = 65536
HUB = 2097152
LOG_DATA = 65536
LOG_INFO = 4096
MMIO = 4194304
MMU = 4194304
MOD = 131072
MUL = 65536
MXP = 524288
OOB = 262144
RLP_ADDR = 4096
RLP_TXN = 131072
RLP_TXN_RCPT = 65536
ROM = 4194304
ROM_LEX = 1024
SHAKIRA_DATA = 32768
SHF = 65536
STP = 16384
TRM = 32768
TXN_DATA = 8192
WCP = 262144
PRECOMPILE_ECRECOVER_EFFECTIVE_CALLS = 128
PRECOMPILE_SHA2_BLOCKS = 671
PRECOMPILE_RIPEMD_BLOCKS = 671
PRECOMPILE_MODEXP_EFFECTIVE_CALLS = 4
PRECOMPILE_ECADD_EFFECTIVE_CALLS = 16384
PRECOMPILE_ECMUL_EFFECTIVE_CALLS = 32
PRECOMPILE_ECPAIRING_FINAL_EXPONENTIATIONS = 16
PRECOMPILE_ECPAIRING_MILLER_LOOPS = 64
PRECOMPILE_ECPAIRING_G2_MEMBERSHIP_CALLS = 64
PRECOMPILE_BLAKE_EFFECTIVE_CALLS = 600
PRECOMPILE_BLAKE_ROUNDS = 600
BLOCK_KECCAK = 8192
BLOCK_L1_SIZE = 1000000
BLOCK_L2_L1_LOGS = 16
BLOCK_TRANSACTIONS = 200
BIN_REFERENCE_TABLE = 262144
SHF_REFERENCE_TABLE = 4096
INSTRUCTION_DECODER = 512
[traces_limits_large]
ADD = 1048576
BIN = 524288
BLAKE_MODEXP_DATA = 32768
BLOCK_DATA = 2048
BLOCK_HASH = 1024
EC_DATA = 524288
EUC = 131072
EXP = 16384
EXT = 2097152
GAS = 131072
HUB = 4194304
LOG_DATA = 131072
LOG_INFO = 8192
MMIO = 8388608
MMU = 8388608
MOD = 262144
MUL = 131072
MXP = 1048576
OOB = 524288
RLP_ADDR = 8192
RLP_TXN = 262144
RLP_TXN_RCPT = 131072
ROM = 8388608
ROM_LEX = 2048
SHAKIRA_DATA = 65536
SHF = 131072
STP = 32768
TRM = 65536
TXN_DATA = 16384
WCP = 524288
PRECOMPILE_ECRECOVER_EFFECTIVE_CALLS = 256
PRECOMPILE_SHA2_BLOCKS = 671
PRECOMPILE_RIPEMD_BLOCKS = 671
PRECOMPILE_MODEXP_EFFECTIVE_CALLS = 8
PRECOMPILE_ECADD_EFFECTIVE_CALLS = 32768
PRECOMPILE_ECMUL_EFFECTIVE_CALLS = 64
PRECOMPILE_ECPAIRING_FINAL_EXPONENTIATIONS = 32
PRECOMPILE_ECPAIRING_MILLER_LOOPS = 128
PRECOMPILE_ECPAIRING_G2_MEMBERSHIP_CALLS = 128
PRECOMPILE_BLAKE_EFFECTIVE_CALLS = 600
PRECOMPILE_BLAKE_ROUNDS = 600
BLOCK_KECCAK = 8192
BLOCK_L1_SIZE = 1000000
BLOCK_L2_L1_LOGS = 16
BLOCK_TRANSACTIONS = 200
BIN_REFERENCE_TABLE = 262144
SHF_REFERENCE_TABLE = 4096
INSTRUCTION_DECODER = 512

View File

@@ -3,6 +3,7 @@ package config
import (
"os"
"regexp"
"strings"
"testing"
"github.com/spf13/viper"
@@ -43,8 +44,15 @@ func TestEnvironment(t *testing.T) {
config, err := NewConfigFromFile(file.Name())
assert.NoError(err, "when processing %s", file.Name())
// take the first word of both the match and the environment
// sepolia-full -> sepolia
var (
matchFirst = strings.Split(matches[1], "-")[0]
envFirst = strings.Split(config.Environment, "-")[0]
)
// check that the environment is set
assert.Equal(matches[1], config.Environment)
assert.Equal(matchFirst, envFirst)
})
}

View File

@@ -4,8 +4,6 @@ import (
"bytes"
"errors"
"os"
"path/filepath"
"strings"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/dictionary"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/encode"
@@ -25,27 +23,7 @@ func GetVersion(blob []byte) uint16 {
return 0
}
// GetRepoRootPath assumes that current working directory is within the repo
func GetRepoRootPath() (string, error) {
wd, err := os.Getwd()
if err != nil {
return "", err
}
const repoName = "linea-monorepo"
i := strings.LastIndex(wd, repoName)
if i == -1 {
return "", errors.New("could not find repo root")
}
i += len(repoName)
return wd[:i], nil
}
func GetDict() ([]byte, error) {
repoRoot, err := GetRepoRootPath()
if err != nil {
return nil, err
}
dictPath := filepath.Join(repoRoot, "prover/lib/compressor/compressor_dict.bin")
func GetDict(dictPath string) ([]byte, error) {
return os.ReadFile(dictPath)
}

View File

@@ -9,15 +9,15 @@ import (
"encoding/hex"
"errors"
"fmt"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/dictionary"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/encode"
"math/big"
"math/rand"
"os"
"path/filepath"
"testing"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/dictionary"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/encode"
v1 "github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v1"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v1/test_utils"
@@ -484,7 +484,7 @@ func BenchmarkWrite(b *testing.B) {
var testBlocks [][]byte
func init() {
rootPath, err := blob.GetRepoRootPath()
rootPath, err := test_utils.GetRepoRootPath()
if err != nil {
panic(err)
}

View File

@@ -5,6 +5,7 @@ import (
"crypto/rand"
"encoding/binary"
"encoding/json"
"errors"
"os"
"path/filepath"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/consensys/compress/lzss"
fr381 "github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
"github.com/consensys/linea-monorepo/prover/backend/execution"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/encode"
v1 "github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v1"
"github.com/stretchr/testify/assert"
@@ -84,7 +84,7 @@ func RandIntn(n int) int { // TODO @Tabaie remove
func EmptyBlob(t require.TestingT) []byte {
var headerB bytes.Buffer
repoRoot, err := blob.GetRepoRootPath()
repoRoot, err := GetRepoRootPath()
assert.NoError(t, err)
// Init bm
bm, err := v1.NewBlobMaker(1000, filepath.Join(repoRoot, "prover/lib/compressor/compressor_dict.bin"))
@@ -151,7 +151,7 @@ func ConsecutiveBlobs(t require.TestingT, n ...int) [][]byte {
}
func TestBlocksAndBlobMaker(t require.TestingT) ([][]byte, *v1.BlobMaker) {
repoRoot, err := blob.GetRepoRootPath()
repoRoot, err := GetRepoRootPath()
assert.NoError(t, err)
testBlocks, err := LoadTestBlocks(filepath.Join(repoRoot, "testdata/prover-v2/prover-execution/requests"))
assert.NoError(t, err)
@@ -162,7 +162,31 @@ func TestBlocksAndBlobMaker(t require.TestingT) ([][]byte, *v1.BlobMaker) {
}
func GetDict(t require.TestingT) []byte {
dict, err := blob.GetDict()
dict, err := getDictForTest()
require.NoError(t, err)
return dict
}
// GetRepoRootPath assumes that current working directory is within the repo
func GetRepoRootPath() (string, error) {
wd, err := os.Getwd()
if err != nil {
return "", err
}
const repoName = "linea-monorepo"
i := strings.LastIndex(wd, repoName)
if i == -1 {
return "", errors.New("could not find repo root")
}
i += len(repoName)
return wd[:i], nil
}
func getDictForTest() ([]byte, error) {
repoRoot, err := GetRepoRootPath()
if err != nil {
return nil, err
}
dictPath := filepath.Join(repoRoot, "prover/lib/compressor/compressor_dict.bin")
return os.ReadFile(dictPath)
}

View File

@@ -179,39 +179,42 @@ func (c *WizardVerifierCircuit) generateAllRandomCoins(_ frontend.API) {
toBeConsumed := c.Spec.Coins.AllKeysAt(currRound - 1)
c.Coins.Exists(toBeConsumed...)
// Make sure that all messages have been written and use them
// to update the FS state. Note that we do not need to update
// FS using the last round of the prover because he is always
// the last one to "talk" in the protocol.
toUpdateFS := c.Spec.Columns.AllKeysProofAt(currRound - 1)
for _, msg := range toUpdateFS {
if !c.Spec.DummyCompiled {
msgID := c.columnsIDs.MustGet(msg)
msgContent := c.Columns[msgID]
// Make sure that all messages have been written and use them
// to update the FS state. Note that we do not need to update
// FS using the last round of the prover because he is always
// the last one to "talk" in the protocol.
toUpdateFS := c.Spec.Columns.AllKeysProofAt(currRound - 1)
for _, msg := range toUpdateFS {
logrus.Tracef("VERIFIER CIRCUIT : Updating the FS oracle with a message - %v", msg)
c.FS.UpdateVec(msgContent)
}
msgID := c.columnsIDs.MustGet(msg)
msgContent := c.Columns[msgID]
toUpdateFS = c.Spec.Columns.AllKeysPublicInputAt(currRound - 1)
for _, msg := range toUpdateFS {
logrus.Tracef("VERIFIER CIRCUIT : Updating the FS oracle with a message - %v", msg)
c.FS.UpdateVec(msgContent)
}
msgID := c.columnsIDs.MustGet(msg)
msgContent := c.Columns[msgID]
toUpdateFS = c.Spec.Columns.AllKeysPublicInputAt(currRound - 1)
for _, msg := range toUpdateFS {
logrus.Tracef("VERIFIER CIRCUIT : Updating the FS oracle with public input - %v", msg)
c.FS.UpdateVec(msgContent)
}
msgID := c.columnsIDs.MustGet(msg)
msgContent := c.Columns[msgID]
/*
Also include the prover's allegations for all evaluations
*/
queries := c.Spec.QueriesParams.AllKeysAt(currRound - 1)
for _, qName := range queries {
// Implicitly, this will panic whenever we start supporting
// a new type of query params
params := c.GetParams(qName)
params.UpdateFS(c.FS)
logrus.Tracef("VERIFIER CIRCUIT : Updating the FS oracle with public input - %v", msg)
c.FS.UpdateVec(msgContent)
}
/*
Also include the prover's allegations for all evaluations
*/
queries := c.Spec.QueriesParams.AllKeysAt(currRound - 1)
for _, qName := range queries {
// Implicitly, this will panic whenever we start supporting
// a new type of query params
params := c.GetParams(qName)
params.UpdateFS(c.FS)
}
}
}

View File

@@ -1,10 +1,11 @@
package public_input
import (
"golang.org/x/crypto/sha3"
"hash"
"slices"
"golang.org/x/crypto/sha3"
bn254fr "github.com/consensys/gnark-crypto/ecc/bn254/fr"
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/std/math/emulated"
@@ -105,7 +106,6 @@ type AggregationFPI struct {
LastFinalizedRollingHashMsgNumber uint64
ChainID uint64 // for now we're forcing all executions to have the same chain ID
L2MessageServiceAddr types.EthAddress
NbL2Messages uint64 // TODO not used in hash. delete if not necessary
L2MsgMerkleTreeRoots [][32]byte
FinalBlockNumber uint64
FinalBlockTimestamp uint64

View File

@@ -11,20 +11,20 @@ import (
)
type Execution struct {
L2MessageServiceAddr types.EthAddress
ChainID uint64
InitialBlockTimestamp uint64
FinalStateRootHash [32]byte
FinalBlockNumber uint64
FinalBlockTimestamp uint64
FinalRollingHashUpdate [32]byte
FinalRollingHashMsgNumber uint64
InitialRollingHashUpdate [32]byte
InitialRollingHashMsgNumber uint64
DataChecksum [32]byte
L2MessageHashes [][32]byte
InitialStateRootHash [32]byte
InitialBlockNumber uint64
L2MessageServiceAddr types.EthAddress
ChainID uint64
InitialBlockTimestamp uint64
FinalStateRootHash [32]byte
FinalBlockNumber uint64
FinalBlockTimestamp uint64
LastRollingHashUpdate [32]byte
LastRollingHashUpdateNumber uint64
InitialRollingHashUpdate [32]byte
FirstRollingHashUpdateNumber uint64
DataChecksum [32]byte
L2MessageHashes [][32]byte
InitialStateRootHash [32]byte
InitialBlockNumber uint64
}
func (pi *Execution) Sum(hsh hash.Hash) []byte {
@@ -47,15 +47,15 @@ func (pi *Execution) Sum(hsh hash.Hash) []byte {
writeNum(hsh, pi.FinalBlockNumber)
writeNum(hsh, pi.FinalBlockTimestamp)
hsh.Write(pi.FinalRollingHashUpdate[:16])
hsh.Write(pi.FinalRollingHashUpdate[16:])
writeNum(hsh, pi.FinalRollingHashMsgNumber)
hsh.Write(pi.LastRollingHashUpdate[:16])
hsh.Write(pi.LastRollingHashUpdate[16:])
writeNum(hsh, pi.LastRollingHashUpdateNumber)
hsh.Write(pi.InitialStateRootHash[:])
writeNum(hsh, pi.InitialBlockNumber)
writeNum(hsh, pi.InitialBlockTimestamp)
hsh.Write(pi.InitialRollingHashUpdate[:16])
hsh.Write(pi.InitialRollingHashUpdate[16:])
writeNum(hsh, pi.InitialRollingHashMsgNumber)
writeNum(hsh, pi.FirstRollingHashUpdateNumber)
writeNum(hsh, pi.ChainID)
hsh.Write(pi.L2MessageServiceAddr[:])

View File

@@ -28,11 +28,11 @@ type FunctionalInputExtractor struct {
// InitialStateRootHash and FinalStateRootHash are resp the initial and
// root hash of the state for the
InitialStateRootHash, FinalStateRootHash query.LocalOpening
InitialBlockNumber, FinalBlockNumber query.LocalOpening
InitialBlockTimestamp, FinalBlockTimestamp query.LocalOpening
InitialRollingHash, FinalRollingHash [2]query.LocalOpening
InitialRollingHashNumber, FinalRollingHashNumber query.LocalOpening
InitialStateRootHash, FinalStateRootHash query.LocalOpening
InitialBlockNumber, FinalBlockNumber query.LocalOpening
InitialBlockTimestamp, FinalBlockTimestamp query.LocalOpening
FirstRollingHashUpdate, LastRollingHashUpdate [2]query.LocalOpening
FirstRollingHashUpdateNumber, LastRollingHashUpdateNumber query.LocalOpening
ChainID query.LocalOpening
NBytesChainID query.LocalOpening
@@ -53,15 +53,15 @@ func (fie *FunctionalInputExtractor) Run(run *wizard.ProverRuntime) {
assignLO(fie.InitialStateRootHash)
assignLO(fie.InitialBlockNumber)
assignLO(fie.InitialBlockTimestamp)
assignLO(fie.InitialRollingHash[0])
assignLO(fie.InitialRollingHash[1])
assignLO(fie.InitialRollingHashNumber)
assignLO(fie.FirstRollingHashUpdate[0])
assignLO(fie.FirstRollingHashUpdate[1])
assignLO(fie.FirstRollingHashUpdateNumber)
assignLO(fie.FinalStateRootHash)
assignLO(fie.FinalBlockNumber)
assignLO(fie.FinalBlockTimestamp)
assignLO(fie.FinalRollingHash[0])
assignLO(fie.FinalRollingHash[1])
assignLO(fie.FinalRollingHashNumber)
assignLO(fie.LastRollingHashUpdate[0])
assignLO(fie.LastRollingHashUpdate[1])
assignLO(fie.LastRollingHashUpdateNumber)
assignLO(fie.ChainID)
assignLO(fie.NBytesChainID)
}

View File

@@ -284,22 +284,22 @@ func (pi *PublicInput) generateExtractor(comp *wizard.CompiledIOP) {
}
pi.Extractor = FunctionalInputExtractor{
DataNbBytes: createNewLocalOpening(pi.DataNbBytes),
DataChecksum: createNewLocalOpening(pi.ExecMiMCHasher.HashFinal),
L2MessageHash: createNewLocalOpening(pi.LogHasher.HashFinal),
InitialStateRootHash: createNewLocalOpening(pi.RootHashFetcher.First),
FinalStateRootHash: createNewLocalOpening(pi.RootHashFetcher.Last),
InitialBlockNumber: createNewLocalOpening(pi.TimestampFetcher.FirstBlockID),
FinalBlockNumber: createNewLocalOpening(pi.TimestampFetcher.LastBlockID),
InitialBlockTimestamp: createNewLocalOpening(pi.TimestampFetcher.First),
FinalBlockTimestamp: createNewLocalOpening(pi.TimestampFetcher.Last),
InitialRollingHash: initialRollingHash,
FinalRollingHash: finalRollingHash,
InitialRollingHashNumber: createNewLocalOpening(pi.RollingHashFetcher.FirstMessageNo),
FinalRollingHashNumber: createNewLocalOpening(pi.RollingHashFetcher.LastMessageNo),
ChainID: createNewLocalOpening(pi.ChainID),
NBytesChainID: createNewLocalOpening(pi.ChainIDNBytes),
L2MessageServiceAddrHi: accessors.NewFromPublicColumn(pi.Aux.logSelectors.L2BridgeAddressColHI, 0),
L2MessageServiceAddrLo: accessors.NewFromPublicColumn(pi.Aux.logSelectors.L2BridgeAddressColLo, 0),
DataNbBytes: createNewLocalOpening(pi.DataNbBytes),
DataChecksum: createNewLocalOpening(pi.ExecMiMCHasher.HashFinal),
L2MessageHash: createNewLocalOpening(pi.LogHasher.HashFinal),
InitialStateRootHash: createNewLocalOpening(pi.RootHashFetcher.First),
FinalStateRootHash: createNewLocalOpening(pi.RootHashFetcher.Last),
InitialBlockNumber: createNewLocalOpening(pi.TimestampFetcher.FirstBlockID),
FinalBlockNumber: createNewLocalOpening(pi.TimestampFetcher.LastBlockID),
InitialBlockTimestamp: createNewLocalOpening(pi.TimestampFetcher.First),
FinalBlockTimestamp: createNewLocalOpening(pi.TimestampFetcher.Last),
FirstRollingHashUpdate: initialRollingHash,
LastRollingHashUpdate: finalRollingHash,
FirstRollingHashUpdateNumber: createNewLocalOpening(pi.RollingHashFetcher.FirstMessageNo),
LastRollingHashUpdateNumber: createNewLocalOpening(pi.RollingHashFetcher.LastMessageNo),
ChainID: createNewLocalOpening(pi.ChainID),
NBytesChainID: createNewLocalOpening(pi.ChainIDNBytes),
L2MessageServiceAddrHi: accessors.NewFromPublicColumn(pi.Aux.logSelectors.L2BridgeAddressColHI, 0),
L2MessageServiceAddrLo: accessors.NewFromPublicColumn(pi.Aux.logSelectors.L2BridgeAddressColLo, 0),
}
}