Feat/multi dict prover (#553)

* refactor: disallow explicit dictionaries for anything other than v0 decompression setup

* refactor: pass around dictionary stores instead of individual dictionaries

* feat: add new dictionary

---------

Co-authored-by: Arya Tabaie <15056835+Tabaie@users.noreply.github.com>
This commit is contained in:
Arya Tabaie
2025-01-21 09:38:45 -06:00
committed by GitHub
parent 39679c7cfb
commit 543aedd23f
24 changed files with 111 additions and 189 deletions

View File

@@ -13,7 +13,7 @@ requests_root_dir = "/data/prover/v3/execution"
[blob_decompression]
prover_mode = "dev"
requests_root_dir = "/data/prover/v3/compression"
dict_path = "/opt/linea/prover/lib/compressor/compressor_dict.bin"
dict_paths = ["/opt/linea/prover/lib/compressor/compressor_dict.bin"]
[aggregation]
prover_mode = "dev"

View File

@@ -105,11 +105,10 @@ func makePiProof(cfg *config.Config, cf *CollectedFields) (plonk.Proof, witness.
}
assignment, err := c.Assign(pi_interconnection.Request{
DictPath: cfg.BlobDecompression.DictPath,
Decompressions: cf.DecompressionPI,
Executions: cf.ExecutionPI,
Aggregation: cf.AggregationPublicInput(cfg),
})
}, cfg.BlobDecompressionDictStore(string(circuits.BlobDecompressionV1CircuitID))) // TODO @Tabaie: when there is a version 2, input the compressor version to use here
if err != nil {
return nil, nil, fmt.Errorf("could not assign the public input circuit: %w", err)
}

View File

@@ -4,8 +4,6 @@ import (
"bytes"
"encoding/base64"
"fmt"
"os"
blob_v0 "github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v0"
blob_v1 "github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v1"
@@ -68,14 +66,9 @@ func Prove(cfg *config.Config, req *Request) (*Response, error) {
return nil, fmt.Errorf("unsupported blob version: %v", version)
}
dictPath := cfg.BlobDecompressionDictPath(string(circuitID))
logrus.Info("reading dictionaries")
logrus.Infof("reading the dictionary at %v", dictPath)
dict, err := os.ReadFile(dictPath)
if err != nil {
return nil, fmt.Errorf("error reading the dictionary: %w", err)
}
dictStore := cfg.BlobDecompressionDictStore(string(circuitID))
// This computes the assignment
@@ -88,7 +81,7 @@ func Prove(cfg *config.Config, req *Request) (*Response, error) {
assignment, pubInput, _snarkHash, err := blobdecompression.Assign(
utils.RightPad(blobBytes, expectedMaxUsableBytes),
dict,
dictStore,
req.Eip4844Enabled,
xBytes,
y,

View File

@@ -2,6 +2,7 @@ package blobdecompression
import (
"errors"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/dictionary"
"github.com/consensys/gnark-crypto/ecc/bls12-377/fr"
fr381 "github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
@@ -19,12 +20,12 @@ func Compile(dictionaryNbBytes int) constraint.ConstraintSystem {
// Assign the circuit with concrete data. Returns the assigned circuit and the
// public input computed during the assignment.
func Assign(blobData []byte, dict []byte, eip4844Enabled bool, x [32]byte, y fr381.Element) (circuit frontend.Circuit, publicInput fr.Element, snarkHash []byte, err error) {
func Assign(blobData []byte, dictStore dictionary.Store, eip4844Enabled bool, x [32]byte, y fr381.Element) (circuit frontend.Circuit, publicInput fr.Element, snarkHash []byte, err error) {
switch blob.GetVersion(blobData) {
case 1:
return v1.Assign(blobData, dict, eip4844Enabled, x, y)
return v1.Assign(blobData, dictStore, eip4844Enabled, x, y)
case 0:
return v0.Assign(blobData, dict, eip4844Enabled, x, y)
return v0.Assign(blobData, dictStore, eip4844Enabled, x, y)
}
err = errors.New("decompression circuit assignment : unsupported blob version")
return

View File

@@ -23,7 +23,11 @@ import (
)
func TestBlobV0(t *testing.T) {
resp, blobBytes, dict := mustGetTestCompressedData(t)
dict := lzss.AugmentDict(test_utils.GetDict(t))
dictStore, err := dictionary.SingletonStore(dict, 0)
assert.NoError(t, err)
resp, blobBytes := mustGetTestCompressedData(t, dictStore)
circ := v0.Allocate(dict)
logrus.Infof("Building the constraint system")
@@ -46,7 +50,7 @@ func TestBlobV0(t *testing.T) {
givenSnarkHash, err := utils.HexDecodeString(resp.SnarkHash)
assert.NoError(t, err)
a, _, snarkHash, err := blobdecompression.Assign(blobBytes, dict, true, x, y)
a, _, snarkHash, err := blobdecompression.Assign(blobBytes, dictStore, true, x, y)
assert.NoError(t, err)
_, ok := a.(*v0.Circuit)
assert.True(t, ok)
@@ -64,9 +68,7 @@ func TestBlobV0(t *testing.T) {
// mustGetTestCompressedData is a test utility function that we use to get
// actual compressed data from the
func mustGetTestCompressedData(t *testing.T) (resp blobsubmission.Response, blobBytes []byte, dict []byte) {
dict = lzss.AugmentDict(test_utils.GetDict(t))
func mustGetTestCompressedData(t *testing.T, dictStore dictionary.Store) (resp blobsubmission.Response, blobBytes []byte) {
respJson, err := os.ReadFile("sample-blob.json")
assert.NoError(t, err)
@@ -75,8 +77,6 @@ func mustGetTestCompressedData(t *testing.T) (resp blobsubmission.Response, blob
blobBytes, err = base64.StdEncoding.DecodeString(resp.CompressedData)
assert.NoError(t, err)
dictStore, err := dictionary.SingletonStore(dict, 0)
assert.NoError(t, err)
_, _, _, err = blob.DecompressBlob(blobBytes, dictStore)
assert.NoError(t, err)

View File

@@ -46,7 +46,7 @@ func MakeCS(dict []byte) constraint.ConstraintSystem {
// Assign the circuit with concrete data. Returns the assigned circuit and the
// public input computed during the assignment.
// @alexandre.belling should we instead compute snarkHash independently here? Seems like it doesn't need to be included in the req received by Prove
func Assign(blobData, dict []byte, eip4844Enabled bool, x [32]byte, y fr381.Element) (assignment frontend.Circuit, publicInput fr.Element, snarkHash []byte, err error) {
func Assign(blobData []byte, dictStore dictionary.Store, eip4844Enabled bool, x [32]byte, y fr381.Element) (assignment frontend.Circuit, publicInput fr.Element, snarkHash []byte, err error) {
const maxCLen = blob.MaxUsableBytes
const maxDLen = blob.MaxUncompressedBytes
@@ -56,11 +56,6 @@ func Assign(blobData, dict []byte, eip4844Enabled bool, x [32]byte, y fr381.Elem
return
}
dictStore, err := dictionary.SingletonStore(dict, 0)
if err != nil {
err = fmt.Errorf("failed to create dictionary store %w", err)
return
}
header, uncompressedData, _, err := blob.DecompressBlob(blobData, dictStore)
if err != nil {
err = fmt.Errorf("decompression circuit assignment : could not decompress the data : %w", err)

View File

@@ -30,7 +30,7 @@ func prepare(t require.TestingT, blobBytes []byte) (c *v1.Circuit, a frontend.Ci
dictStore, err := dictionary.SingletonStore(blobtestutils.GetDict(t), 1)
assert.NoError(t, err)
_, payload, _, err := blobcompressorv1.DecompressBlob(blobBytes, dictStore)
_, payload, _, dict, err := blobcompressorv1.DecompressBlob(blobBytes, dictStore)
assert.NoError(t, err)
resp, err := blobsubmission.CraftResponse(&blobsubmission.Request{
@@ -50,8 +50,7 @@ func prepare(t require.TestingT, blobBytes []byte) (c *v1.Circuit, a frontend.Ci
y.SetBytes(b)
blobBytes = append(blobBytes, make([]byte, blobcompressorv1.MaxUsableBytes-len(blobBytes))...)
dict := blobtestutils.GetDict(t)
a, _, snarkHash, err := blobdecompression.Assign(blobBytes, dict, true, x, y)
a, _, snarkHash, err := blobdecompression.Assign(blobBytes, dictStore, true, x, y)
assert.NoError(t, err)
_, ok := a.(*v1.Circuit)

View File

@@ -247,18 +247,13 @@ func Compile(dictionaryLength int) constraint.ConstraintSystem {
}
}
func AssignFPI(blobBytes, dict []byte, eip4844Enabled bool, x [32]byte, y fr381.Element) (fpi FunctionalPublicInput, err error) {
func AssignFPI(blobBytes []byte, dictStore dictionary.Store, eip4844Enabled bool, x [32]byte, y fr381.Element) (fpi FunctionalPublicInput, dict []byte, err error) {
if len(blobBytes) != blob.MaxUsableBytes {
err = fmt.Errorf("decompression circuit assignment : invalid blob length : %d. expected %d", len(blobBytes), blob.MaxUsableBytes)
return
}
dictStore, err := dictionary.SingletonStore(dict, 1)
if err != nil {
err = fmt.Errorf("failed to create dictionary store %w", err)
return
}
header, payload, _, err := blob.DecompressBlob(blobBytes, dictStore)
header, payload, _, dict, err := blob.DecompressBlob(blobBytes, dictStore)
if err != nil {
return
}
@@ -294,9 +289,9 @@ func AssignFPI(blobBytes, dict []byte, eip4844Enabled bool, x [32]byte, y fr381.
return
}
func Assign(blobBytes, dict []byte, eip4844Enabled bool, x [32]byte, y fr381.Element) (assignment frontend.Circuit, publicInput fr377.Element, snarkHash []byte, err error) {
func Assign(blobBytes []byte, dictStore dictionary.Store, eip4844Enabled bool, x [32]byte, y fr381.Element) (assignment frontend.Circuit, publicInput fr377.Element, snarkHash []byte, err error) {
fpi, err := AssignFPI(blobBytes, dict, eip4844Enabled, x, y)
fpi, dict, err := AssignFPI(blobBytes, dictStore, eip4844Enabled, x, y)
if err != nil {
return
}

View File

@@ -55,7 +55,7 @@ func TestParseHeader(t *testing.T) {
for _, blobData := range blobs {
header, _, blocks, err := blob.DecompressBlob(blobData, dictStore)
header, _, blocks, _, err := blob.DecompressBlob(blobData, dictStore)
assert.NoError(t, err)
assert.LessOrEqual(t, len(blocks), MaxNbBatches, "too many batches")
@@ -347,7 +347,7 @@ func TestDictHash(t *testing.T) {
dict := blobtestutils.GetDict(t)
dictStore, err := dictionary.SingletonStore(blobtestutils.GetDict(t), 1)
assert.NoError(t, err)
header, _, _, err := blob.DecompressBlob(blobBytes, dictStore) // a bit roundabout, but the header field is not public
header, _, _, _, err := blob.DecompressBlob(blobBytes, dictStore) // a bit roundabout, but the header field is not public
assert.NoError(t, err)
circuit := testDataDictHashCircuit{

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/base64"
"fmt"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/dictionary"
"hash"
"github.com/consensys/linea-monorepo/prover/crypto/mimc"
@@ -13,7 +14,6 @@ import (
decompression "github.com/consensys/linea-monorepo/prover/circuits/blobdecompression/v1"
"github.com/consensys/linea-monorepo/prover/circuits/internal"
"github.com/consensys/linea-monorepo/prover/circuits/pi-interconnection/keccak"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob"
public_input "github.com/consensys/linea-monorepo/prover/public-input"
"github.com/consensys/linea-monorepo/prover/utils"
"github.com/sirupsen/logrus"
@@ -24,12 +24,9 @@ type Request struct {
Decompressions []blobsubmission.Response
Executions []public_input.Execution
Aggregation public_input.Aggregation
// Path to the compression dictionary. Used to extract the execution data
// for each execution.
DictPath string
}
func (c *Compiled) Assign(r Request) (a Circuit, err error) {
func (c *Compiled) Assign(r Request, dictStore dictionary.Store) (a Circuit, err error) {
internal.RegisterHints()
keccak.RegisterHints()
utils.RegisterHints()
@@ -56,13 +53,6 @@ func (c *Compiled) Assign(r Request) (a Circuit, err error) {
return
}
// @alex: We should pass that as a parameter. And also (@arya) pass a list
// of dictionnary because this function.
dict, err := blob.GetDict(r.DictPath)
if err != nil {
return Circuit{}, fmt.Errorf("could not find the dictionnary: path=%v err=%v", r.DictPath, err)
}
// For Shnarfs and Merkle Roots
hshK := c.Keccak.GetHasher()
@@ -111,7 +101,7 @@ func (c *Compiled) Assign(r Request) (a Circuit, err error) {
fpi decompression.FunctionalPublicInput
sfpi decompression.FunctionalPublicInputSnark
)
if fpi, err = decompression.AssignFPI(blobData[:], dict, p.Eip4844Enabled, x, y); err != nil {
if fpi, _, err = decompression.AssignFPI(blobData[:], dictStore, p.Eip4844Enabled, x, y); err != nil {
return
}
execDataChecksums = append(execDataChecksums, fpi.BatchSums...) // len(execDataChecksums) = index of the first execution associated with the next blob

View File

@@ -1,57 +0,0 @@
package main
import (
"fmt"
"time"
"github.com/consensys/linea-monorepo/prover/utils/test_utils"
"github.com/consensys/gnark-crypto/ecc"
"github.com/consensys/gnark/backend/plonk"
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/frontend/cs/scs"
"github.com/consensys/gnark/test/unsafekzg"
pi_interconnection "github.com/consensys/linea-monorepo/prover/circuits/pi-interconnection"
pitesting "github.com/consensys/linea-monorepo/prover/circuits/pi-interconnection/test_utils"
"github.com/consensys/linea-monorepo/prover/config"
"github.com/consensys/linea-monorepo/prover/protocol/compiler/dummy"
"github.com/stretchr/testify/assert"
)
func main() {
var b test_utils.FakeTestingT
req := pitesting.AssignSingleBlockBlob(b)
c, err := pi_interconnection.Compile(config.PublicInput{
MaxNbDecompression: 400,
MaxNbExecution: 400,
ExecutionMaxNbMsg: 16,
L2MsgMerkleDepth: 5,
L2MsgMaxNbMerkle: 10,
}, dummy.Compile) // note that the solving/proving time will not reflect the wizard proof or verification
assert.NoError(b, err)
a, err := c.Assign(req)
assert.NoError(b, err)
c.Circuit.UseGkrMimc = true
cs, err := frontend.Compile(ecc.BLS12_377.ScalarField(), scs.NewBuilder, c.Circuit, frontend.WithCapacity(40_000_000))
assert.NoError(b, err)
kzgc, kzgl, err := unsafekzg.NewSRS(cs)
assert.NoError(b, err)
pk, _, err := plonk.Setup(cs, kzgc, kzgl)
assert.NoError(b, err)
secondsStart := time.Now().Unix()
w, err := frontend.NewWitness(&a, ecc.BLS12_377.ScalarField())
assert.NoError(b, err)
_, err = plonk.Prove(cs, pk, w)
assert.NoError(b, err)
fmt.Println(time.Now().Unix()-secondsStart, "seconds")
}

View File

@@ -5,6 +5,7 @@ package pi_interconnection_test
import (
"encoding/base64"
"fmt"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/dictionary"
"slices"
"testing"
@@ -47,7 +48,10 @@ func TestSingleBlockBlobE2E(t *testing.T) {
compiled, err := pi_interconnection.Compile(cfg, dummy.Compile)
assert.NoError(t, err)
a, err := compiled.Assign(req)
dictStore, err := dictionary.SingletonStore(blobtesting.GetDict(t), 1)
assert.NoError(t, err)
a, err := compiled.Assign(req, dictStore)
assert.NoError(t, err)
cs, err := frontend.Compile(ecc.BLS12_377.ScalarField(), scs.NewBuilder, compiled.Circuit, frontend.WithCapacity(3_000_000))
@@ -112,7 +116,6 @@ func TestTinyTwoBatchBlob(t *testing.T) {
req := pi_interconnection.Request{
Decompressions: []blobsubmission.Response{*blobResp},
Executions: execReq,
DictPath: "../../lib/compressor/compressor_dict.bin",
Aggregation: public_input.Aggregation{
FinalShnarf: blobResp.ExpectedShnarf,
ParentAggregationFinalShnarf: blobReq.PrevShnarf,
@@ -209,7 +212,6 @@ func TestTwoTwoBatchBlobs(t *testing.T) {
req := pi_interconnection.Request{
Decompressions: []blobsubmission.Response{*blobResp0, *blobResp1},
Executions: execReq,
DictPath: "../../lib/compressor/compressor_dict.bin",
Aggregation: public_input.Aggregation{
FinalShnarf: blobResp1.ExpectedShnarf,
ParentAggregationFinalShnarf: blobReq0.PrevShnarf,
@@ -255,6 +257,9 @@ func testPI(t *testing.T, req pi_interconnection.Request, options ...testPIOptio
slackIterationNum := len(cfg.slack) * len(cfg.slack)
slackIterationNum *= slackIterationNum
dictStore, err := dictionary.SingletonStore(blobtesting.GetDict(t), 1)
assert.NoError(t, err)
var slack [4]int
for i := 0; i < slackIterationNum; i++ {
@@ -277,7 +282,7 @@ func testPI(t *testing.T, req pi_interconnection.Request, options ...testPIOptio
compiled, err := pi_interconnection.Compile(cfg, dummy.Compile)
assert.NoError(t, err)
a, err := compiled.Assign(req)
a, err := compiled.Assign(req, dictStore)
assert.NoError(t, err)
assert.NoError(t, test.IsSolved(compiled.Circuit, &a, ecc.BLS12_377.ScalarField()))

View File

@@ -48,7 +48,6 @@ func AssignSingleBlockBlob(t require.TestingT) pi_interconnection.Request {
merkleRoots := aggregation.PackInMiniTrees(test_utils.BlocksToHex(execReq.L2MessageHashes))
return pi_interconnection.Request{
DictPath: "../../lib/compressor/compressor_dict.bin",
Decompressions: []blobsubmission.Response{*blobResp},
Executions: []public_input.Execution{execReq},
Aggregation: public_input.Aggregation{

View File

@@ -3,6 +3,7 @@ package cmd
import (
"context"
"crypto/sha256"
"errors"
"fmt"
"io"
"os"
@@ -32,7 +33,8 @@ import (
type SetupArgs struct {
Force bool
Circuits string
DictPath string
DictPath string // to be deprecated; only used for compiling v0 blob decompression circuit
DictSize int
AssetsDir string
ConfigFile string
}
@@ -66,7 +68,7 @@ func Setup(context context.Context, args SetupArgs) error {
// parse inCircuits
inCircuits := make(map[circuits.CircuitID]bool)
for _, c := range AllCircuits {
inCircuits[circuits.CircuitID(c)] = false
inCircuits[c] = false
}
_inCircuits := strings.Split(args.Circuits, ",")
for _, c := range _inCircuits {
@@ -85,6 +87,8 @@ func Setup(context context.Context, args SetupArgs) error {
if err != nil {
return fmt.Errorf("%s failed to create SRS provider: %w", cmdName, err)
}
var foundDecompressionV0 bool // this is a temporary mechanism to make sure we phase out the practice
// of providing entire dictionaries for setup.
// for each circuit, we start by compiling the circuit
// then we do a sha sum and compare against the one in the manifest.json
@@ -98,7 +102,6 @@ func Setup(context context.Context, args SetupArgs) error {
logrus.Infof("setting up %s", c)
var builder circuits.Builder
var dict []byte
extraFlags := make(map[string]any)
// let's compile the circuit.
@@ -111,21 +114,21 @@ func Setup(context context.Context, args SetupArgs) error {
extraFlags["cfg_checksum"] = limits.Checksum()
zkEvm := zkevm.FullZkEvm(&limits)
builder = execution.NewBuilder(zkEvm)
case circuits.BlobDecompressionV0CircuitID, circuits.BlobDecompressionV1CircuitID:
dict, err = os.ReadFile(args.DictPath)
case circuits.BlobDecompressionV0CircuitID:
dict, err := os.ReadFile(args.DictPath)
if err != nil {
return fmt.Errorf("%s failed to read dictionary file: %w", cmdName, err)
}
foundDecompressionV0 = true
extraFlags["maxUsableBytes"] = blob_v0.MaxUsableBytes
extraFlags["maxUncompressedBytes"] = blob_v0.MaxUncompressedBytes
builder = v0.NewBuilder(dict)
case circuits.BlobDecompressionV1CircuitID:
extraFlags["maxUsableBytes"] = blob_v1.MaxUsableBytes
extraFlags["maxUncompressedBytes"] = blob_v1.MaxUncompressedBytes
builder = v1.NewBuilder(args.DictSize)
if c == circuits.BlobDecompressionV0CircuitID {
extraFlags["maxUsableBytes"] = blob_v0.MaxUsableBytes
extraFlags["maxUncompressedBytes"] = blob_v0.MaxUncompressedBytes
builder = v0.NewBuilder(dict)
} else if c == circuits.BlobDecompressionV1CircuitID {
extraFlags["maxUsableBytes"] = blob_v1.MaxUsableBytes
extraFlags["maxUncompressedBytes"] = blob_v1.MaxUncompressedBytes
builder = v1.NewBuilder(len(dict))
}
case circuits.PublicInputInterconnectionCircuitID:
builder = pi_interconnection.NewBuilder(cfg.PublicInputInterconnection)
case circuits.EmulationDummyCircuitID:
@@ -138,14 +141,10 @@ func Setup(context context.Context, args SetupArgs) error {
if err := updateSetup(context, cfg, args.Force, srsProvider, c, builder, extraFlags); err != nil {
return err
}
if dict != nil {
// we save the dictionary to disk
dictPath := cfg.BlobDecompressionDictPath(string(c))
if err := os.WriteFile(dictPath, dict, 0600); err != nil {
return fmt.Errorf("%s failed to write dictionary file: %w", cmdName, err)
}
}
}
if !foundDecompressionV0 && args.DictPath != "" {
return errors.New("explicit provision of a dictionary is only allowed for backwards compatibility with v0 blob decompression")
}
if !(inCircuits[circuits.AggregationCircuitID] || inCircuits[circuits.EmulationCircuitID]) {

View File

@@ -55,7 +55,8 @@ func init() {
rootCmd.AddCommand(setupCmd)
setupCmd.Flags().BoolVar(&setupArgs.Force, "force", false, "overwrites existing files")
setupCmd.Flags().StringVar(&setupArgs.Circuits, "circuits", strings.Join(allCircuitList(), ","), "comma separated list of circuits to setup")
setupCmd.Flags().StringVar(&setupArgs.DictPath, "dict", "", "path to the dictionary file used in blob (de)compression")
setupCmd.Flags().StringVar(&setupArgs.DictPath, "dict", "", "path to the dictionary file used in blob (de)compression (for v0 only)")
setupCmd.Flags().IntVar(&setupArgs.DictSize, "dict-size", 65536, "size in bytes of the dictionary used in blob (de)compression")
setupCmd.Flags().StringVar(&setupArgs.AssetsDir, "assets-dir", "", "path to the directory where the assets are stored (override conf)")
viper.BindPFlag("assets_dir", setupCmd.Flags().Lookup("assets-dir"))

View File

@@ -1,7 +1,7 @@
environment = "sepolia"
version = "4.0.0" # TODO @gbotrel hunt all version definitions.
assets_dir = "./prover-assets"
log_level = 4 # TODO @gbotrel will be refactored with new logger.
log_level = 4 # TODO @gbotrel will be refactored with new logger.
[controller]
retry_delays = [0, 1]
@@ -15,7 +15,7 @@ requests_root_dir = "/home/ubuntu/sepolia-testing-full/prover-execution"
[blob_decompression]
prover_mode = "full"
requests_root_dir = "/home/ubuntu/sepolia-testing-full/prover-compression"
dict_path = "lib/compressor/compressor_dict.bin"
dict_paths = ["lib/compressor/compressor_dict.bin"]
[aggregation]
prover_mode = "full"
@@ -134,4 +134,4 @@ BLOCK_L2_L1_LOGS = 16
BLOCK_TRANSACTIONS = 200
BIN_REFERENCE_TABLE = 262144
SHF_REFERENCE_TABLE = 4096
INSTRUCTION_DECODER = 512
INSTRUCTION_DECODER = 512

View File

@@ -2,6 +2,7 @@ package config
import (
"fmt"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/dictionary"
"os"
"path"
"path/filepath"
@@ -218,13 +219,13 @@ type BlobDecompression struct {
// ProverMode stores the kind of prover to use.
ProverMode ProverMode `mapstructure:"prover_mode" validate:"required,oneof=dev full"`
// DictPath is an optional parameters allowing the user to specificy explicitly
// where to look for the compression dictionary. If the input is not provided
// DictPaths is an optional parameters allowing the user to specify explicitly
// where to look for the compression dictionaries. If the input is not provided
// then the dictionary will be fetched in <assets_dir>/<version>/<circuitID>/compression_dict.bin.
//
// We stress that the feature should not be used in production and should
// only be used in E2E testing context.
DictPath string `mapstructure:"dict_path"`
// only be used in E2E testing context. TODO @Tabaie @alexandre.belling revise this warning, seems to no longer apply
DictPaths []string `mapstructure:"dict_paths"`
}
type Aggregation struct {
@@ -280,15 +281,16 @@ type PublicInput struct {
}
// BlobDecompressionDictPath returns the filepath where to look for the blob
// decompression dictionary file. If provided in the config, the function returns
// in priority the provided [BlobDecompression.DictPath] or it returns a
// BlobDecompressionDictStore returns a decompression dictionary store
// loaded from paths specified in [BlobDecompression.DictPaths].
// If no such path is provided, it loads one from the
// prover assets path depending on the provided circuitID.
func (cfg *Config) BlobDecompressionDictPath(circuitID string) string {
func (cfg *Config) BlobDecompressionDictStore(circuitID string) dictionary.Store {
if len(cfg.BlobDecompression.DictPath) > 0 {
return cfg.BlobDecompression.DictPath
paths := cfg.BlobDecompression.DictPaths
if len(paths) == 0 {
paths = []string{filepath.Join(cfg.PathForSetup(circuitID), DefaultDictionaryFileName)}
}
return filepath.Join(cfg.PathForSetup(string(circuitID)), DefaultDictionaryFileName)
return dictionary.NewStore(paths...)
}

View File

@@ -23,7 +23,7 @@ func GetVersion(blob []byte) uint16 {
return 0
}
func GetDict(dictPath string) ([]byte, error) {
func LoadDict(dictPath string) ([]byte, error) {
return os.ReadFile(dictPath)
}
@@ -43,7 +43,7 @@ func DecompressBlob(blob []byte, dictStore dictionary.Store) ([]byte, error) {
_, _, blocks, err = v0.DecompressBlob(blob, dictStore)
blockDecoder = v0.DecodeBlockFromUncompressed
case 1:
_, _, blocks, err = v1.DecompressBlob(blob, dictStore)
_, _, blocks, _, err = v1.DecompressBlob(blob, dictStore)
blockDecoder = v1.DecodeBlockFromUncompressed
default:
return nil, errors.New("unrecognized blob version")

View File

@@ -115,7 +115,7 @@ func (bm *BlobMaker) Written() int {
func (bm *BlobMaker) Bytes() []byte {
if bm.currentBlobLength > 0 {
// sanity check that we can always decompress.
header, rawBlocks, _, err := DecompressBlob(bm.currentBlob[:bm.currentBlobLength], bm.dictStore)
header, rawBlocks, _, _, err := DecompressBlob(bm.currentBlob[:bm.currentBlobLength], bm.dictStore)
if err != nil {
var sbb strings.Builder
fmt.Fprintf(&sbb, "invalid blob: %v\n", err)
@@ -302,23 +302,22 @@ func (bm *BlobMaker) Equals(other *BlobMaker) bool {
}
// DecompressBlob decompresses a blob and returns the header and the blocks as they were compressed.
func DecompressBlob(b []byte, dictStore dictionary.Store) (blobHeader *Header, rawPayload []byte, blocks [][]byte, err error) {
func DecompressBlob(b []byte, dictStore dictionary.Store) (blobHeader *Header, rawPayload []byte, blocks [][]byte, dict []byte, err error) {
// UnpackAlign the blob
b, err = encode.UnpackAlign(b, fr381.Bits-1, false)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
// read the header
blobHeader = new(Header)
read, err := blobHeader.ReadFrom(bytes.NewReader(b))
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to read blob header: %w", err)
return nil, nil, nil, nil, fmt.Errorf("failed to read blob header: %w", err)
}
// retrieve dict
dict, err := dictStore.Get(blobHeader.DictChecksum[:], 1)
if err != nil {
return nil, nil, nil, err
if dict, err = dictStore.Get(blobHeader.DictChecksum[:], 1); err != nil {
return nil, nil, nil, nil, err
}
b = b[read:]
@@ -326,7 +325,7 @@ func DecompressBlob(b []byte, dictStore dictionary.Store) (blobHeader *Header, r
// decompress the data
rawPayload, err = lzss.Decompress(b, dict)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to decompress blob body: %w", err)
return nil, nil, nil, nil, fmt.Errorf("failed to decompress blob body: %w", err)
}
offset := 0
@@ -335,7 +334,7 @@ func DecompressBlob(b []byte, dictStore dictionary.Store) (blobHeader *Header, r
batchOffset := offset
for offset < batchOffset+batchLen {
if blockLen, err := ScanBlockByteLen(rawPayload[offset:]); err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
} else {
blocks = append(blocks, rawPayload[offset:offset+blockLen])
offset += blockLen
@@ -343,11 +342,11 @@ func DecompressBlob(b []byte, dictStore dictionary.Store) (blobHeader *Header, r
}
if offset != batchOffset+batchLen {
return nil, nil, nil, errors.New("incorrect batch length")
return nil, nil, nil, nil, errors.New("incorrect batch length")
}
}
return blobHeader, rawPayload, blocks, nil
return blobHeader, rawPayload, blocks, dict, nil
}
// WorstCompressedBlockSize returns the size of the given block, as compressed by an "empty" blob maker.

View File

@@ -7,6 +7,7 @@ import (
cRand "crypto/rand"
"encoding/binary"
"encoding/hex"
"github.com/consensys/linea-monorepo/prover/utils/test_utils"
"math/big"
"math/rand/v2"
"os"
@@ -17,7 +18,7 @@ import (
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/encode"
v1 "github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v1"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v1/test_utils"
v1Testing "github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v1/test_utils"
fr381 "github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
@@ -55,7 +56,7 @@ func testCompressorSingleSmallBatch(t *testing.T, blocks [][]byte) {
dict, err := os.ReadFile(testDictPath)
assert.NoError(t, err)
dictStore, err := dictionary.SingletonStore(dict, 1)
_, _, blocksBack, err := v1.DecompressBlob(bm.Bytes(), dictStore)
_, _, blocksBack, _, err := v1.DecompressBlob(bm.Bytes(), dictStore)
assert.NoError(t, err)
assert.Equal(t, len(blocks), len(blocksBack), "number of blocks should match")
// TODO compare the blocks
@@ -484,7 +485,7 @@ func init() {
panic(err)
}
if testBlocks, err = test_utils.LoadTestBlocks(filepath.Join(rootPath, "testdata/prover-v2/prover-execution/requests")); err != nil {
if testBlocks, err = v1Testing.LoadTestBlocks(filepath.Join(rootPath, "testdata/prover-v2/prover-execution/requests")); err != nil {
panic(err)
}

View File

@@ -247,7 +247,7 @@ func decompressBlob(b []byte) ([][][]byte, error) {
if err != nil {
return nil, err
}
header, _, blocks, err := v1.DecompressBlob(b, dictStore)
header, _, blocks, _, err := v1.DecompressBlob(b, dictStore)
if err != nil {
return nil, fmt.Errorf("can't decompress blob: %w", err)
}

View File

@@ -5,7 +5,7 @@ import (
"crypto/rand"
"encoding/binary"
"encoding/json"
"errors"
"github.com/consensys/linea-monorepo/prover/utils/test_utils"
"os"
"path/filepath"
"strings"
@@ -125,7 +125,7 @@ func ConsecutiveBlobs(t require.TestingT, n ...int) [][]byte {
}
func TestBlocksAndBlobMaker(t require.TestingT) ([][]byte, *v1.BlobMaker) {
repoRoot, err := GetRepoRootPath()
repoRoot, err := test_utils.GetRepoRootPath()
assert.NoError(t, err)
testBlocks, err := LoadTestBlocks(filepath.Join(repoRoot, "testdata/prover-v2/prover-execution/requests"))
assert.NoError(t, err)
@@ -141,23 +141,8 @@ func GetDict(t require.TestingT) []byte {
return dict
}
// GetRepoRootPath assumes that current working directory is within the repo
func GetRepoRootPath() (string, error) {
wd, err := os.Getwd()
if err != nil {
return "", err
}
const repoName = "linea-monorepo"
i := strings.LastIndex(wd, repoName)
if i == -1 {
return "", errors.New("could not find repo root")
}
i += len(repoName)
return wd[:i], nil
}
func getDictForTest() ([]byte, error) {
repoRoot, err := GetRepoRootPath()
repoRoot, err := test_utils.GetRepoRootPath()
if err != nil {
return nil, err
}

Binary file not shown.

View File

@@ -6,6 +6,7 @@ import (
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"hash"
"io"
@@ -347,3 +348,18 @@ func spaceOutFromRight(s string) string {
}
return bb.String()
}
// GetRepoRootPath assumes that current working directory is within the repo
func GetRepoRootPath() (string, error) {
wd, err := os.Getwd()
if err != nil {
return "", err
}
const repoName = "linea-monorepo"
i := strings.LastIndex(wd, repoName)
if i == -1 {
return "", errors.New("could not find repo root")
}
i += len(repoName)
return wd[:i], nil
}