Implement parallel verification for KZG proofs in VerifyCellKZGProofBatch

This commit is contained in:
Manu NALEPA
2026-01-06 23:20:34 +01:00
parent 1a6252ade4
commit c4eeb043a4
4 changed files with 176 additions and 6 deletions

View File

@@ -17,6 +17,7 @@ go_library(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -1,7 +1,10 @@
package kzg
import (
"runtime"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
@@ -25,6 +28,9 @@ type Cell [BytesPerCell]byte
// Commitment represent a KZG commitment to a Blob.
type Commitment [48]byte
// errInvalidProof is returned when KZG proof verification fails.
var errInvalidProof = errors.New("invalid KZG proof")
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
type Proof [BytesPerProof]byte
@@ -103,16 +109,69 @@ func ComputeCellsAndKZGProofs(blob *Blob) ([]Cell, []Proof, error) {
return cells, proofs, nil
}
// VerifyCellKZGProofBatch verifies the KZG proofs for a given slice of commitments, cells indices, cells and proofs.
// Note: It is way more efficient to call once this function with big slices than calling it multiple times with small slices.
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
// Convert `Cell` type to `ckzg4844.Cell`
ckzgCells := make([]ckzg4844.Cell, len(cells))
// chunkBounds represents the start and end indices of a chunk.
type chunkBounds struct {
start, end int
}
// VerifyCellKZGProofBatch verifies the KZG proofs for a given slice of commitments, cells indices, cells and proofs.
// The verification is parallelized across CPU cores by splitting the input into chunks.
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
count := len(cells)
// Validate all input slices have the same length
if len(commitmentsBytes) != count || len(cellIndices) != count || len(proofsBytes) != count {
return false, errors.New("input slices must have equal length")
}
// Convert `Cell` type to `ckzg4844.Cell`
ckzgCells := make([]ckzg4844.Cell, count)
for i := range cells {
copy(ckzgCells[i][:], cells[i][:])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
if count == 0 {
return true, nil
}
workerCount := min(count, runtime.GOMAXPROCS(0))
chunks := computeChunkBounds(count, workerCount)
var wg errgroup.Group
for workerIdx := range workerCount {
bounds := chunks[workerIdx]
wg.Go(func() error {
// Verify this chunk
valid, err := ckzg4844.VerifyCellKZGProofBatch(
commitmentsBytes[bounds.start:bounds.end],
cellIndices[bounds.start:bounds.end],
ckzgCells[bounds.start:bounds.end],
proofsBytes[bounds.start:bounds.end],
)
if err != nil {
return err
}
if !valid {
return errInvalidProof
}
return nil
})
}
// Wait for all workers to complete
if err := wg.Wait(); err != nil {
if errors.Is(err, errInvalidProof) {
return false, nil
}
return false, err
}
return true, nil
}
// RecoverCells recovers the complete cells from a given set of cell indices and partial cells.
@@ -164,3 +223,30 @@ func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) ([]Cell
return cells, proofs, nil
}
// computeChunkBounds calculates evenly distributed chunk boundaries for parallel processing.
// It splits itemsCount into chunks, distributing any remainder across the first chunks.
func computeChunkBounds(itemsCount, workerCount int) []chunkBounds {
actualWorkers := min(itemsCount, workerCount)
if actualWorkers == 0 {
return []chunkBounds{}
}
chunkSize := itemsCount / actualWorkers
remainder := itemsCount % actualWorkers
chunks := make([]chunkBounds, 0, actualWorkers)
offset := 0
for i := range actualWorkers {
size := chunkSize
if i < remainder {
size++
}
chunks = append(chunks, chunkBounds{start: offset, end: offset + size})
offset += size
}
return chunks
}

View File

@@ -111,6 +111,48 @@ func TestVerifyCellKZGProofBatch(t *testing.T) {
require.NotNil(t, err)
require.Equal(t, false, valid)
})
t.Run("empty inputs should return true", func(t *testing.T) {
// Empty slices should be considered valid
commitmentsBytes := []Bytes48{}
cellIndices := []uint64{}
cells := []Cell{}
proofsBytes := []Bytes48{}
valid, err := VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, cells, proofsBytes)
require.NoError(t, err)
require.Equal(t, true, valid)
})
t.Run("mismatched input lengths should fail", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
cells, proofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Create mismatched length inputs
cellIndices := []uint64{0, 1, 2}
selectedCells := []Cell{cells[0], cells[1], cells[2]}
commitmentsBytes := make([]Bytes48, 3)
for i := range commitmentsBytes {
copy(commitmentsBytes[i][:], commitment[:])
}
// Only 2 proofs instead of 3
proofsBytes := make([]Bytes48, 2)
copy(proofsBytes[0][:], proofs[0][:])
copy(proofsBytes[1][:], proofs[1][:])
valid, err := VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, selectedCells, proofsBytes)
require.NotNil(t, err)
require.Equal(t, false, valid)
require.Equal(t, "input slices must have equal length", err.Error())
})
}
func TestRecoverCells(t *testing.T) {
@@ -234,3 +276,41 @@ func TestBlobToKZGCommitment(t *testing.T) {
require.Equal(t, commitment, commitment2)
})
}
func TestComputeChunkBounds(t *testing.T) {
t.Run("evenly divisible items", func(t *testing.T) {
chunks := computeChunkBounds(100, 4)
require.Equal(t, 4, len(chunks))
require.Equal(t, chunkBounds{start: 0, end: 25}, chunks[0])
require.Equal(t, chunkBounds{start: 25, end: 50}, chunks[1])
require.Equal(t, chunkBounds{start: 50, end: 75}, chunks[2])
require.Equal(t, chunkBounds{start: 75, end: 100}, chunks[3])
})
t.Run("items with remainder distributed to first chunks", func(t *testing.T) {
chunks := computeChunkBounds(10, 3)
require.Equal(t, 3, len(chunks))
require.Equal(t, chunkBounds{start: 0, end: 4}, chunks[0]) // gets extra item
require.Equal(t, chunkBounds{start: 4, end: 7}, chunks[1]) // gets extra item
require.Equal(t, chunkBounds{start: 7, end: 10}, chunks[2]) // normal size
})
t.Run("fewer items than workers returns min(items, workers) chunks", func(t *testing.T) {
chunks := computeChunkBounds(3, 5)
require.Equal(t, 3, len(chunks)) // Only 3 chunks, not 5
require.Equal(t, chunkBounds{start: 0, end: 1}, chunks[0])
require.Equal(t, chunkBounds{start: 1, end: 2}, chunks[1])
require.Equal(t, chunkBounds{start: 2, end: 3}, chunks[2])
})
t.Run("single worker gets all items", func(t *testing.T) {
chunks := computeChunkBounds(100, 1)
require.Equal(t, 1, len(chunks))
require.Equal(t, chunkBounds{start: 0, end: 100}, chunks[0])
})
t.Run("no items produces no chunks", func(t *testing.T) {
chunks := computeChunkBounds(0, 4)
require.Equal(t, 0, len(chunks)) // No chunks when no items
})
}

View File

@@ -0,0 +1,3 @@
### Changed
- Parallelized KZG proof batch verification across CPU cores.