refactor: Response object for DecompressBlob (#738)

* refactor: response object for DecompressBlob
This commit is contained in:
Arya Tabaie
2025-03-06 09:32:13 -06:00
committed by GitHub
parent 3446e364ae
commit 9cd4ace5ba
13 changed files with 71 additions and 58 deletions

View File

@@ -20,7 +20,7 @@ import (
emPlonk "github.com/consensys/gnark/std/recursion/plonk"
)
// Generates a concrete proof for the decompression of the blob
// Prove generates a concrete proof for the decompression of the blob
func Prove(cfg *config.Config, req *Request) (*Response, error) {
// Parsing / validating the request

View File

@@ -1,7 +1,7 @@
package blobdecompression
import (
"errors"
"fmt"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/dictionary"
"github.com/consensys/gnark-crypto/ecc/bls12-377/fr"
@@ -21,12 +21,13 @@ func Compile(dictionaryNbBytes int) constraint.ConstraintSystem {
// Assign the circuit with concrete data. Returns the assigned circuit and the
// public input computed during the assignment.
func Assign(blobData []byte, dictStore dictionary.Store, eip4844Enabled bool, x [32]byte, y fr381.Element) (circuit frontend.Circuit, publicInput fr.Element, snarkHash []byte, err error) {
switch blob.GetVersion(blobData) {
vsn := blob.GetVersion(blobData)
switch vsn {
case 1:
return v1.Assign(blobData, dictStore, eip4844Enabled, x, y)
case 0:
return v0.Assign(blobData, dictStore, eip4844Enabled, x, y)
}
err = errors.New("decompression circuit assignment : unsupported blob version")
err = fmt.Errorf("decompression circuit assignment : unsupported blob version %d", vsn)
return
}

View File

@@ -30,7 +30,7 @@ func prepare(t require.TestingT, blobBytes []byte) (c *v1.Circuit, a frontend.Ci
dictStore, err := dictionary.SingletonStore(blobtestutils.GetDict(t), 1)
assert.NoError(t, err)
_, payload, _, dict, err := blobcompressorv1.DecompressBlob(blobBytes, dictStore)
r, err := blobcompressorv1.DecompressBlob(blobBytes, dictStore)
assert.NoError(t, err)
resp, err := blobsubmission.CraftResponse(&blobsubmission.Request{
@@ -59,9 +59,9 @@ func prepare(t require.TestingT, blobBytes []byte) (c *v1.Circuit, a frontend.Ci
assert.Equal(t, resp.SnarkHash[2:], hex.EncodeToString(snarkHash))
return &v1.Circuit{
Dict: make([]frontend.Variable, len(dict)),
Dict: make([]frontend.Variable, len(r.Dict)),
BlobBytes: make([]frontend.Variable, blobcompressorv1.MaxUsableBytes),
MaxBlobPayloadNbBytes: len(payload) * 3 / 2, // small max blobcompressorv1 size so it compiles in manageable time
MaxBlobPayloadNbBytes: len(r.RawPayload) * 3 / 2, // small max blobcompressorv1 size so it compiles in manageable time
}, a
}

View File

@@ -253,24 +253,24 @@ func AssignFPI(blobBytes []byte, dictStore dictionary.Store, eip4844Enabled bool
return
}
header, payload, _, dict, err := blob.DecompressBlob(blobBytes, dictStore)
r, err := blob.DecompressBlob(blobBytes, dictStore)
if err != nil {
return
}
if header.NbBatches() > MaxNbBatches {
err = fmt.Errorf("decompression circuit assignment : too many batches in the header : %d. max %d", header.NbBatches(), MaxNbBatches)
if r.Header.NbBatches() > MaxNbBatches {
err = fmt.Errorf("decompression circuit assignment : too many batches in the header : %d. max %d", r.Header.NbBatches(), MaxNbBatches)
return
}
batchEnds := make([]int, header.NbBatches())
if header.NbBatches() > 0 {
batchEnds[0] = header.BatchSizes[0]
batchEnds := make([]int, r.Header.NbBatches())
if r.Header.NbBatches() > 0 {
batchEnds[0] = r.Header.BatchSizes[0]
}
for i := 1; i < len(header.BatchSizes); i++ {
batchEnds[i] = batchEnds[i-1] + header.BatchSizes[i]
for i := 1; i < len(r.Header.BatchSizes); i++ {
batchEnds[i] = batchEnds[i-1] + r.Header.BatchSizes[i]
}
fpi.BatchSums = BatchesChecksumAssign(batchEnds, payload)
fpi.BatchSums = BatchesChecksumAssign(batchEnds, r.RawPayload)
fpi.X = x

View File

@@ -55,24 +55,24 @@ func TestParseHeader(t *testing.T) {
for _, blobData := range blobs {
header, _, blocks, _, err := blob.DecompressBlob(blobData, dictStore)
r, err := blob.DecompressBlob(blobData, dictStore)
assert.NoError(t, err)
assert.LessOrEqual(t, len(blocks), MaxNbBatches, "too many batches")
assert.LessOrEqual(t, len(r.Blocks), MaxNbBatches, "too many batches")
unpacked, err := encode.UnpackAlign(blobData, fr381.Bits-1, false)
require.NoError(t, err)
assignment := &testParseHeaderCircuit{
Blob: test_utils.PadBytes(unpacked, maxBlobSize),
HeaderLen: header.ByteSize(),
NbBatches: header.NbBatches(),
HeaderLen: r.Header.ByteSize(),
NbBatches: r.Header.NbBatches(),
BlobLen: len(unpacked),
}
for i := range assignment.BlocksPerBatch {
if i < header.NbBatches() {
assignment.BlocksPerBatch[i] = header.BatchSizes[i]
if i < r.Header.NbBatches() {
assignment.BlocksPerBatch[i] = r.Header.BatchSizes[i]
} else {
assignment.BlocksPerBatch[i] = 0
}
@@ -347,7 +347,7 @@ func TestDictHash(t *testing.T) {
dict := blobtestutils.GetDict(t)
dictStore, err := dictionary.SingletonStore(blobtestutils.GetDict(t), 1)
assert.NoError(t, err)
header, _, _, _, err := blob.DecompressBlob(blobBytes, dictStore) // a bit roundabout, but the header field is not public
r, err := blob.DecompressBlob(blobBytes, dictStore) // a bit roundabout, but the header field is not public
assert.NoError(t, err)
circuit := testDataDictHashCircuit{
@@ -355,7 +355,7 @@ func TestDictHash(t *testing.T) {
}
assignment := testDataDictHashCircuit{
DictBytes: utils.ToVariableSlice(dict),
Checksum: header.DictChecksum[:],
Checksum: r.Header.DictChecksum[:],
}
assert.NoError(t, test.IsSolved(&circuit, &assignment, ecc.BLS12_377.ScalarField()))

View File

@@ -6,6 +6,7 @@ import (
"encoding/hex"
"errors"
"fmt"
"github.com/consensys/gnark/std/compress"
"math/big"
"slices"
"strings"
@@ -14,7 +15,6 @@ import (
"github.com/consensys/gnark-crypto/ecc"
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/test"
"github.com/consensys/linea-monorepo/prover/circuits/blobdecompression/v0/compress"
"github.com/consensys/linea-monorepo/prover/utils"
"github.com/stretchr/testify/assert"
)

View File

@@ -2,6 +2,7 @@ package keccak
import (
"errors"
"github.com/consensys/gnark/std/compress"
"github.com/consensys/linea-monorepo/prover/circuits/internal"
"github.com/sirupsen/logrus"
"math/big"
@@ -9,7 +10,6 @@ import (
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/std/lookup/logderivlookup"
"github.com/consensys/gnark/std/rangecheck"
"github.com/consensys/linea-monorepo/prover/circuits/blobdecompression/v0/compress"
"github.com/consensys/linea-monorepo/prover/circuits/internal/plonk"
"github.com/consensys/linea-monorepo/prover/protocol/serialization"
"github.com/consensys/linea-monorepo/prover/protocol/wizard"

View File

@@ -2,6 +2,7 @@ package keccak
import (
"fmt"
"github.com/consensys/gnark/std/compress"
"github.com/consensys/linea-monorepo/prover/circuits/internal/test_utils"
"math/big"
"testing"
@@ -11,7 +12,6 @@ import (
"github.com/consensys/gnark-crypto/ecc"
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/test"
"github.com/consensys/linea-monorepo/prover/circuits/blobdecompression/v0/compress"
"github.com/consensys/linea-monorepo/prover/circuits/internal"
"github.com/consensys/linea-monorepo/prover/protocol/wizard"
"github.com/consensys/linea-monorepo/prover/utils"

View File

@@ -43,7 +43,9 @@ func DecompressBlob(blob []byte, dictStore dictionary.Store) ([]byte, error) {
_, _, blocks, err = v0.DecompressBlob(blob, dictStore)
blockDecoder = v0.DecodeBlockFromUncompressed
case 1:
_, _, blocks, _, err = v1.DecompressBlob(blob, dictStore)
r, _err := v1.DecompressBlob(blob, dictStore)
blocks = r.Blocks
err = _err
blockDecoder = v1.DecodeBlockFromUncompressed
default:
return nil, errors.New("unrecognized blob version")

View File

@@ -5,7 +5,7 @@ import (
"errors"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
"github.com/consensys/gnark-crypto/hash"
"github.com/consensys/linea-monorepo/prover/circuits/blobdecompression/v0/compress"
"github.com/consensys/gnark/std/compress"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/encode"
"os"
)

View File

@@ -115,7 +115,7 @@ func (bm *BlobMaker) Written() int {
func (bm *BlobMaker) Bytes() []byte {
if bm.currentBlobLength > 0 {
// sanity check that we can always decompress.
header, rawBlocks, _, _, err := DecompressBlob(bm.currentBlob[:bm.currentBlobLength], bm.dictStore)
resp, err := DecompressBlob(bm.currentBlob[:bm.currentBlobLength], bm.dictStore)
if err != nil {
var sbb strings.Builder
fmt.Fprintf(&sbb, "invalid blob: %v\n", err)
@@ -126,11 +126,11 @@ func (bm *BlobMaker) Bytes() []byte {
panic(sbb.String())
}
// compare the header
if !header.Equals(&bm.header) {
if !resp.Header.Equals(&bm.header) {
panic("invalid blob: header mismatch")
}
if !bytes.Equal(rawBlocks, bm.compressor.WrittenBytes()) {
panic(fmt.Sprintf("invalid blob: body mismatch expected %x, got %x", rawBlocks, bm.compressor.WrittenBytes()))
if !bytes.Equal(resp.RawPayload, bm.compressor.WrittenBytes()) {
panic(fmt.Sprintf("invalid blob: body mismatch expected %x, got %x", resp.RawPayload, bm.compressor.WrittenBytes()))
}
}
return bm.currentBlob[:bm.currentBlobLength]
@@ -301,52 +301,62 @@ func (bm *BlobMaker) Equals(other *BlobMaker) bool {
return true
}
type BlobDecompressionResponse struct {
Header *Header
Blocks [][]byte
RawPayload []byte
Dict []byte
}
// DecompressBlob decompresses a blob and returns the header and the blocks as they were compressed.
func DecompressBlob(b []byte, dictStore dictionary.Store) (blobHeader *Header, rawPayload []byte, blocks [][]byte, dict []byte, err error) {
func DecompressBlob(b []byte, dictStore dictionary.Store) (resp BlobDecompressionResponse, err error) {
// UnpackAlign the blob
b, err = encode.UnpackAlign(b, fr381.Bits-1, false)
if err != nil {
return nil, nil, nil, nil, err
return
}
// read the header
blobHeader = new(Header)
read, err := blobHeader.ReadFrom(bytes.NewReader(b))
resp.Header = new(Header)
read, err := resp.Header.ReadFrom(bytes.NewReader(b))
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to read blob header: %w", err)
err = fmt.Errorf("failed to read blob header: %w", err)
return
}
// retrieve dict
if dict, err = dictStore.Get(blobHeader.DictChecksum[:], 1); err != nil {
return nil, nil, nil, nil, err
// retrieve dictionary
if resp.Dict, err = dictStore.Get(resp.Header.DictChecksum[:], 1); err != nil {
return
}
b = b[read:]
// decompress the data
rawPayload, err = lzss.Decompress(b, dict)
resp.RawPayload, err = lzss.Decompress(b, resp.Dict)
if err != nil {
return nil, nil, nil, nil, fmt.Errorf("failed to decompress blob body: %w", err)
err = fmt.Errorf("failed to decompress blob body: %w", err)
return
}
offset := 0
for _, batchLen := range blobHeader.BatchSizes {
for _, batchLen := range resp.Header.BatchSizes {
batchOffset := offset
for offset < batchOffset+batchLen {
if blockLen, err := ScanBlockByteLen(rawPayload[offset:]); err != nil {
return nil, nil, nil, nil, err
if blockLen, err := ScanBlockByteLen(resp.RawPayload[offset:]); err != nil {
return resp, err
} else {
blocks = append(blocks, rawPayload[offset:offset+blockLen])
resp.Blocks = append(resp.Blocks, resp.RawPayload[offset:offset+blockLen])
offset += blockLen
}
}
if offset != batchOffset+batchLen {
return nil, nil, nil, nil, errors.New("incorrect batch length")
err = errors.New("incorrect batch length")
return
}
}
return blobHeader, rawPayload, blocks, dict, nil
return
}
// WorstCompressedBlockSize returns the size of the given block, as compressed by an "empty" blob maker.

View File

@@ -56,9 +56,9 @@ func testCompressorSingleSmallBatch(t *testing.T, blocks [][]byte) {
dict, err := os.ReadFile(testDictPath)
assert.NoError(t, err)
dictStore, err := dictionary.SingletonStore(dict, 1)
_, _, blocksBack, _, err := v1.DecompressBlob(bm.Bytes(), dictStore)
r, err := v1.DecompressBlob(bm.Bytes(), dictStore)
assert.NoError(t, err)
assert.Equal(t, len(blocks), len(blocksBack), "number of blocks should match")
assert.Equal(t, len(blocks), len(r.Blocks), "number of blocks should match")
// TODO compare the blocks
}

View File

@@ -247,25 +247,25 @@ func decompressBlob(b []byte) ([][][]byte, error) {
if err != nil {
return nil, err
}
header, _, blocks, _, err := v1.DecompressBlob(b, dictStore)
r, err := v1.DecompressBlob(b, dictStore)
if err != nil {
return nil, fmt.Errorf("can't decompress blob: %w", err)
}
batches := make([][][]byte, len(header.BatchSizes))
for i, batchNbBytes := range header.BatchSizes {
batches := make([][][]byte, len(r.Header.BatchSizes))
for i, batchNbBytes := range r.Header.BatchSizes {
batches[i] = make([][]byte, 0)
batchLenYet := 0
for batchLenYet < batchNbBytes {
batches[i] = append(batches[i], blocks[0])
batchLenYet += len(blocks[0])
blocks = blocks[1:]
batches[i] = append(batches[i], r.Blocks[0])
batchLenYet += len(r.Blocks[0])
r.Blocks = r.Blocks[1:]
}
if batchLenYet != batchNbBytes {
return nil, errors.New("invalid batch size")
}
}
if len(blocks) != 0 {
if len(r.Blocks) != 0 {
return nil, errors.New("not all blocks were consumed")
}