PeerDAS: Refactor reconstruction and improve DataColumnStorage.Get. (#15309)

* PeerDAS: Refactor reconstruction.

* DB Columns filesystem: Add missing tests and exit early in `Get`.

* Add changelog.

* Fix Preston's comment.

* Fix `TestDataColumnsAlignWithBlock`.
This commit is contained in:
Manu NALEPA
2025-05-23 15:22:53 +02:00
committed by GitHub
parent 58f08672c0
commit 58b5aac201
21 changed files with 951 additions and 478 deletions

View File

@@ -8,8 +8,8 @@ go_library(
"metrics.go", "metrics.go",
"p2p_interface.go", "p2p_interface.go",
"reconstruction.go", "reconstruction.go",
"util.go",
"validator.go", "validator.go",
"verification.go",
], ],
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas", importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
@@ -47,6 +47,7 @@ go_test(
"reconstruction_test.go", "reconstruction_test.go",
"utils_test.go", "utils_test.go",
"validator_test.go", "validator_test.go",
"verification_test.go",
], ],
deps = [ deps = [
":go_default_library", ":go_default_library",
@@ -67,5 +68,6 @@ go_test(
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_pkg_errors//:go_default_library", "@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
], ],
) )

View File

@@ -20,10 +20,12 @@ import (
var ( var (
// Custom errors // Custom errors
ErrCustodyGroupTooLarge = errors.New("custody group too large") ErrCustodyGroupTooLarge = errors.New("custody group too large")
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large") ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
ErrMismatchSize = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs") ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen") ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
// maxUint256 is the maximum value of an uint256. // maxUint256 is the maximum value of an uint256.
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64} maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
@@ -139,7 +141,7 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsA
} }
if len(blobKzgCommitments) != len(cellsAndProofs) { if len(blobKzgCommitments) != len(cellsAndProofs) {
return nil, ErrMismatchSize return nil, ErrSizeMismatch
} }
signedBlockHeader, err := signedBlock.Header() signedBlockHeader, err := signedBlock.Header()
@@ -152,19 +154,72 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsA
return nil, errors.Wrap(err, "merkle proof ZKG commitments") return nil, errors.Wrap(err, "merkle proof ZKG commitments")
} }
dataColumnSidecars, err := DataColumnsSidecarsFromItems(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs) dataColumnSidecars, err := dataColumnsSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "data column sidecars from items") return nil, errors.Wrap(err, "data column sidecars")
} }
return dataColumnSidecars, nil return dataColumnSidecars, nil
} }
// DataColumnsSidecarsFromItems computes the data column sidecars from the signed block header, the blob KZG commiments, // ComputeCustodyGroupForColumn computes the custody group for a given column.
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
beaconConfig := params.BeaconConfig()
numberOfColumns := beaconConfig.NumberOfColumns
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
if columnIndex >= numberOfColumns {
return 0, ErrIndexTooLarge
}
return columnIndex % numberOfCustodyGroups, nil
}
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#custody-sampling
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
if ct == Actual {
custodyGroupCount = custodyInfo.ActualGroupCount()
}
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
return max(samplesPerSlot, custodyGroupCount)
}
// CustodyColumns computes the custody columns from the custody groups.
func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
custodyGroupCount := len(custodyGroups)
// Compute the columns for each custody group.
columns := make(map[uint64]bool, custodyGroupCount)
for _, group := range custodyGroups {
if group >= numberOfCustodyGroups {
return nil, ErrCustodyGroupTooLarge
}
groupColumns, err := ComputeColumnsForCustodyGroup(group)
if err != nil {
return nil, errors.Wrap(err, "compute columns for custody group")
}
for _, column := range groupColumns {
columns[column] = true
}
}
return columns, nil
}
// dataColumnsSidecars computes the data column sidecars from the signed block header, the blob KZG commiments,
// the KZG commitment includion proofs and cells and cell proofs. // the KZG commitment includion proofs and cells and cell proofs.
// The returned value contains pointers to function parameters. // The returned value contains pointers to function parameters.
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.) // (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
func DataColumnsSidecarsFromItems( func dataColumnsSidecars(
signedBlockHeader *ethpb.SignedBeaconBlockHeader, signedBlockHeader *ethpb.SignedBeaconBlockHeader,
blobKzgCommitments [][]byte, blobKzgCommitments [][]byte,
kzgCommitmentsInclusionProof [][]byte, kzgCommitmentsInclusionProof [][]byte,
@@ -172,7 +227,7 @@ func DataColumnsSidecarsFromItems(
) ([]*ethpb.DataColumnSidecar, error) { ) ([]*ethpb.DataColumnSidecar, error) {
start := time.Now() start := time.Now()
if len(blobKzgCommitments) != len(cellsAndProofs) { if len(blobKzgCommitments) != len(cellsAndProofs) {
return nil, ErrMismatchSize return nil, ErrSizeMismatch
} }
numberOfColumns := params.BeaconConfig().NumberOfColumns numberOfColumns := params.BeaconConfig().NumberOfColumns
@@ -219,184 +274,3 @@ func DataColumnsSidecarsFromItems(
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds())) dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return sidecars, nil return sidecars, nil
} }
// ComputeCustodyGroupForColumn computes the custody group for a given column.
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
beaconConfig := params.BeaconConfig()
numberOfColumns := beaconConfig.NumberOfColumns
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
if columnIndex >= numberOfColumns {
return 0, ErrIndexTooLarge
}
return columnIndex % numberOfCustodyGroups, nil
}
// Blobs extract blobs from `dataColumnsSidecar`.
// This can be seen as the reciprocal function of DataColumnSidecars.
// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix,
// else an error will be returned.
// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.)
func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Compute the number of needed columns, including the number of columns is odd case.
neededColumnCount := (numberOfColumns + 1) / 2
// Check if all needed columns are present.
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
for i := range dataColumnsSidecar {
dataColumnSideCar := dataColumnsSidecar[i]
index := dataColumnSideCar.Index
if index < neededColumnCount {
sliceIndexFromColumnIndex[index] = i
}
}
actualColumnCount := uint64(len(sliceIndexFromColumnIndex))
// Get missing columns.
if actualColumnCount < neededColumnCount {
var missingColumnsSlice []uint64
for i := range neededColumnCount {
if _, ok := sliceIndexFromColumnIndex[i]; !ok {
missingColumnsSlice = append(missingColumnsSlice, i)
}
}
slices.Sort[[]uint64](missingColumnsSlice)
return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice)
}
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
firstDataColumnSidecar := dataColumnsSidecar[0]
blobCount := uint64(len(firstDataColumnSidecar.Column))
// Check all colums have te same length.
for i := range dataColumnsSidecar {
if uint64(len(dataColumnsSidecar[i].Column)) != blobCount {
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].Column))
}
}
// Reconstruct verified RO blobs from columns.
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
// Populate and filter indices.
indicesSlice := populateAndFilterIndices(indices, blobCount)
for _, blobIndex := range indicesSlice {
var blob kzg.Blob
// Compute the content of the blob.
for columnIndex := range neededColumnCount {
sliceIndex, ok := sliceIndexFromColumnIndex[columnIndex]
if !ok {
return nil, errors.Errorf("missing column %d, this should never happen", columnIndex)
}
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
cell := dataColumnSideCar.Column[blobIndex]
for i := range cell {
blob[columnIndex*kzg.BytesPerCell+uint64(i)] = cell[i]
}
}
// Retrieve the blob KZG commitment.
blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex])
// Compute the blob KZG proof.
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment)
if err != nil {
return nil, errors.Wrap(err, "compute blob KZG proof")
}
blobSidecar := &ethpb.BlobSidecar{
Index: blobIndex,
Blob: blob[:],
KzgCommitment: blobKZGCommitment[:],
KzgProof: blobKzgProof[:],
SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader,
CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof,
}
roBlob, err := blocks.NewROBlob(blobSidecar)
if err != nil {
return nil, errors.Wrap(err, "new RO blob")
}
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
}
return verifiedROBlobs, nil
}
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#custody-sampling
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
if ct == Actual {
custodyGroupCount = custodyInfo.ActualGroupCount()
}
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
return max(samplesPerSlot, custodyGroupCount)
}
// CustodyColumns computes the custody columns from the custody groups.
func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
custodyGroupCount := len(custodyGroups)
// Compute the columns for each custody group.
columns := make(map[uint64]bool, custodyGroupCount)
for _, group := range custodyGroups {
if group >= numberOfCustodyGroups {
return nil, ErrCustodyGroupTooLarge
}
groupColumns, err := ComputeColumnsForCustodyGroup(group)
if err != nil {
return nil, errors.Wrap(err, "compute columns for custody group")
}
for _, column := range groupColumns {
columns[column] = true
}
}
return columns, nil
}
// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided,
// and filtering out indices higher than the blob count.
func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 {
// If no indices are provided, provide all blobs.
if len(indices) == 0 {
for i := range blobCount {
indices[i] = true
}
}
// Filter blobs index higher than the blob count.
indicesSlice := make([]uint64, 0, len(indices))
for i := range indices {
if i < blobCount {
indicesSlice = append(indicesSlice, i)
}
}
// Sort the indices.
slices.Sort[[]uint64](indicesSlice)
return indicesSlice
}

View File

@@ -11,18 +11,21 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/require" "github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util" "github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/pkg/errors"
) )
func TestCustodyGroups(t *testing.T) { func TestCustodyGroups(t *testing.T) {
// --------------------------------------------
// The happy path is unit tested in spec tests. // The happy path is unit tested in spec tests.
// --------------------------------------------
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
_, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroup+1) _, err := peerdas.CustodyGroups(enode.ID{}, numberOfCustodyGroup+1)
require.ErrorIs(t, err, peerdas.ErrCustodyGroupCountTooLarge) require.ErrorIs(t, err, peerdas.ErrCustodyGroupCountTooLarge)
} }
func TestComputeColumnsForCustodyGroup(t *testing.T) { func TestComputeColumnsForCustodyGroup(t *testing.T) {
// --------------------------------------------
// The happy path is unit tested in spec tests. // The happy path is unit tested in spec tests.
// --------------------------------------------
numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups numberOfCustodyGroup := params.BeaconConfig().NumberOfCustodyGroups
_, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroup) _, err := peerdas.ComputeColumnsForCustodyGroup(numberOfCustodyGroup)
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge) require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
@@ -62,14 +65,10 @@ func TestDataColumnSidecars(t *testing.T) {
cellsAndProofs := make([]kzg.CellsAndProofs, 1) cellsAndProofs := make([]kzg.CellsAndProofs, 1)
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs) _, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.ErrorIs(t, err, peerdas.ErrMismatchSize) require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
}) })
} }
// --------------------------------------------------------------------------------------------------------------------------------------
// DataColumnsSidecarsFromItems is tested as part of the DataColumnSidecars tests, in the TestDataColumnsSidecarsBlobsRoundtrip function.
// --------------------------------------------------------------------------------------------------------------------------------------
func TestComputeCustodyGroupForColumn(t *testing.T) { func TestComputeCustodyGroupForColumn(t *testing.T) {
params.SetupTestConfigCleanup(t) params.SetupTestConfigCleanup(t)
config := params.BeaconConfig() config := params.BeaconConfig()
@@ -105,136 +104,6 @@ func TestComputeCustodyGroupForColumn(t *testing.T) {
}) })
} }
func TestBlobs(t *testing.T) {
blobsIndice := map[uint64]bool{}
numberOfColumns := params.BeaconConfig().NumberOfColumns
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
for i := uint64(2); i < numberOfColumns/2+2; i++ {
almostAllColumns = append(almostAllColumns, &ethpb.DataColumnSidecar{
Index: i,
})
}
testCases := []struct {
name string
input []*ethpb.DataColumnSidecar
expected []*blocks.VerifiedROBlob
err error
}{
{
name: "empty input",
input: []*ethpb.DataColumnSidecar{},
expected: nil,
err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"),
},
{
name: "missing columns",
input: almostAllColumns,
expected: nil,
err: errors.New("some columns are missing: [0 1]"),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual, err := peerdas.Blobs(blobsIndice, tc.input)
if tc.err != nil {
require.Equal(t, tc.err.Error(), err.Error())
} else {
require.NoError(t, err)
}
require.DeepSSZEqual(t, tc.expected, actual)
})
}
}
func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
const blobCount = 5
blobsIndex := map[uint64]bool{}
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Create a protobuf signed beacon block.
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
// Generate random blobs and their corresponding commitments and proofs.
blobs := make([]kzg.Blob, 0, blobCount)
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
for blobIndex := range blobCount {
// Create a random blob.
blob := getRandBlob(int64(blobIndex))
blobs = append(blobs, blob)
// Generate a blobKZGCommitment for the blob.
blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob)
require.NoError(t, err)
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
blobKzgProofs = append(blobKzgProofs, proof)
}
// Set the commitments into the block.
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
for _, blobKZGCommitment := range blobKzgCommitments {
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
}
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
// Generate verified RO blobs.
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
require.NoError(t, err)
for blobIndex := range blobCount {
blob := blobs[blobIndex]
blobKZGCommitment := blobKzgCommitments[blobIndex]
blobKzgProof := blobKzgProofs[blobIndex]
// Get the signed beacon block header.
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
require.NoError(t, err)
blobSidecar := &ethpb.BlobSidecar{
Index: uint64(blobIndex),
Blob: blob[:],
KzgCommitment: blobKZGCommitment[:],
KzgProof: blobKzgProof[:],
SignedBlockHeader: signedBeaconBlockHeader,
CommitmentInclusionProof: commitmentInclusionProof,
}
roBlob, err := blocks.NewROBlob(blobSidecar)
require.NoError(t, err)
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
}
// Compute data columns sidecars from the signed beacon block and from the blobs.
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.NoError(t, err)
// Compute the blobs from the data columns sidecar.
roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar)
require.NoError(t, err)
// Check that the blobs are the same.
require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs)
}
func TestCustodyGroupSamplingSize(t *testing.T) { func TestCustodyGroupSamplingSize(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string

View File

@@ -117,7 +117,7 @@ func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 {
// CustodyGroupCount returns the number of groups we should participate in for custody. // CustodyGroupCount returns the number of groups we should participate in for custody.
func (tcgc *targetCustodyGroupCount) Get() uint64 { func (tcgc *targetCustodyGroupCount) Get() uint64 {
// If subscribed to all subnets, return the number of custody groups. // If subscribed to all subnets, return the number of custody groups.
if flags.Get().SubscribeToAllSubnets { if flags.Get().SubscribeAllDataSubnets {
return params.BeaconConfig().NumberOfCustodyGroups return params.BeaconConfig().NumberOfCustodyGroups
} }
@@ -144,7 +144,7 @@ func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint6
// Get returns the to advertise custody group count. // Get returns the to advertise custody group count.
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 { func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
// If subscribed to all subnets, return the number of custody groups. // If subscribed to all subnets, return the number of custody groups.
if flags.Get().SubscribeToAllSubnets { if flags.Get().SubscribeAllDataSubnets {
return params.BeaconConfig().NumberOfCustodyGroups return params.BeaconConfig().NumberOfCustodyGroups
} }

View File

@@ -30,25 +30,25 @@ func TestInfo(t *testing.T) {
func TestTargetCustodyGroupCount(t *testing.T) { func TestTargetCustodyGroupCount(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
subscribeToAllSubnets bool subscribeToAllColumns bool
validatorsCustodyRequirement uint64 validatorsCustodyRequirement uint64
expected uint64 expected uint64
}{ }{
{ {
name: "subscribed to all subnets", name: "subscribed to all data subnets",
subscribeToAllSubnets: true, subscribeToAllColumns: true,
validatorsCustodyRequirement: 100, validatorsCustodyRequirement: 100,
expected: 128, expected: 128,
}, },
{ {
name: "no validators attached", name: "no validators attached",
subscribeToAllSubnets: false, subscribeToAllColumns: false,
validatorsCustodyRequirement: 0, validatorsCustodyRequirement: 0,
expected: 4, expected: 4,
}, },
{ {
name: "some validators attached", name: "some validators attached",
subscribeToAllSubnets: false, subscribeToAllColumns: false,
validatorsCustodyRequirement: 100, validatorsCustodyRequirement: 100,
expected: 100, expected: 100,
}, },
@@ -57,10 +57,10 @@ func TestTargetCustodyGroupCount(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
// Subscribe to all subnets if needed. // Subscribe to all subnets if needed.
if tc.subscribeToAllSubnets { if tc.subscribeToAllColumns {
resetFlags := flags.Get() resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags) gFlags := new(flags.GlobalFlags)
gFlags.SubscribeToAllSubnets = true gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags) flags.Init(gFlags)
defer flags.Init(resetFlags) defer flags.Init(resetFlags)
} }
@@ -82,25 +82,25 @@ func TestTargetCustodyGroupCount(t *testing.T) {
func TestToAdvertiseCustodyGroupCount(t *testing.T) { func TestToAdvertiseCustodyGroupCount(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
subscribeToAllSubnets bool subscribeToAllColumns bool
toAdvertiseCustodyGroupCount uint64 toAdvertiseCustodyGroupCount uint64
expected uint64 expected uint64
}{ }{
{ {
name: "subscribed to all subnets", name: "subscribed to all subnets",
subscribeToAllSubnets: true, subscribeToAllColumns: true,
toAdvertiseCustodyGroupCount: 100, toAdvertiseCustodyGroupCount: 100,
expected: 128, expected: 128,
}, },
{ {
name: "higher than custody requirement", name: "higher than custody requirement",
subscribeToAllSubnets: false, subscribeToAllColumns: false,
toAdvertiseCustodyGroupCount: 100, toAdvertiseCustodyGroupCount: 100,
expected: 100, expected: 100,
}, },
{ {
name: "lower than custody requirement", name: "lower than custody requirement",
subscribeToAllSubnets: false, subscribeToAllColumns: false,
toAdvertiseCustodyGroupCount: 1, toAdvertiseCustodyGroupCount: 1,
expected: 4, expected: 4,
}, },
@@ -109,10 +109,10 @@ func TestToAdvertiseCustodyGroupCount(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
// Subscribe to all subnets if needed. // Subscribe to all subnets if needed.
if tc.subscribeToAllSubnets { if tc.subscribeToAllColumns {
resetFlags := flags.Get() resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags) gFlags := new(flags.GlobalFlags)
gFlags.SubscribeToAllSubnets = true gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags) flags.Init(gFlags)
defer flags.Init(resetFlags) defer flags.Init(resetFlags)
} }

View File

@@ -2,75 +2,321 @@ package peerdas
import ( import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg" "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params" "github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1" ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
// CanSelfReconstruct returns true if the node can self-reconstruct all the data columns from its custody group count. var (
func CanSelfReconstruct(custodyGroupCount uint64) bool { ErrColumnLengthsDiffer = errors.New("columns do not have the same length")
total := params.BeaconConfig().NumberOfCustodyGroups ErrBlobIndexTooHigh = errors.New("blob index is too high")
// If total is odd, then we need total / 2 + 1 columns to reconstruct. ErrBlockRootMismatch = errors.New("block root mismatch")
// If total is even, then we need total / 2 columns to reconstruct. ErrBlobsCellsProofsMismatch = errors.New("blobs and cells proofs mismatch")
return custodyGroupCount >= (total+1)/2 )
// MinimumColumnsCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
func MinimumColumnsCountToReconstruct() uint64 {
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
// If the number of columns is even, then we need total / 2 columns to reconstruct.
return (params.BeaconConfig().NumberOfColumns + 1) / 2
} }
// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars. // ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
func RecoverCellsAndProofs(dataColumnSideCars []*ethpb.DataColumnSidecar) ([]kzg.CellsAndProofs, error) { // All input sidecars must be committed to the same block.
var wg errgroup.Group // `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
dataColumnSideCarsCount := len(dataColumnSideCars) // Check if there is at least one input sidecar.
if len(inVerifiedRoSidecars) == 0 {
if dataColumnSideCarsCount == 0 { return nil, ErrNotEnoughDataColumnSidecars
return nil, errors.New("no data column sidecars")
} }
// Check if all columns have the same length. // Safely retrieve the first sidecar as a reference.
blobCount := len(dataColumnSideCars[0].Column) referenceSidecar := inVerifiedRoSidecars[0]
for _, sidecar := range dataColumnSideCars {
length := len(sidecar.Column)
if length != blobCount { // Check if all columns have the same length and are commmitted to the same block.
return nil, errors.New("columns do not have the same length") blobCount := len(referenceSidecar.Column)
blockRoot := referenceSidecar.BlockRoot()
for _, sidecar := range inVerifiedRoSidecars[1:] {
if len(sidecar.Column) != blobCount {
return nil, ErrColumnLengthsDiffer
}
if sidecar.BlockRoot() != blockRoot {
return nil, ErrBlockRootMismatch
} }
} }
// Deduplicate sidecars.
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(inVerifiedRoSidecars))
for _, inVerifiedRoSidecar := range inVerifiedRoSidecars {
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
}
// Check if there is enough sidecars to reconstruct the missing columns.
sidecarCount := len(sidecarByIndex)
if uint64(sidecarCount) < MinimumColumnsCountToReconstruct() {
return nil, ErrNotEnoughDataColumnSidecars
}
// Sidecars are verified and are committed to the same block.
// All signed block headers, KZG commitments, and inclusion proofs are the same.
signedBlockHeader := referenceSidecar.SignedBlockHeader
kzgCommitments := referenceSidecar.KzgCommitments
kzgCommitmentsInclusionProof := referenceSidecar.KzgCommitmentsInclusionProof
// Recover cells and compute proofs in parallel. // Recover cells and compute proofs in parallel.
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount) var wg errgroup.Group
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
for blobIndex := 0; blobIndex < blobCount; blobIndex++ { for blobIndex := range uint64(blobCount) {
bIndex := blobIndex
wg.Go(func() error { wg.Go(func() error {
cellsIndices := make([]uint64, 0, dataColumnSideCarsCount) cellsIndices := make([]uint64, 0, sidecarCount)
cells := make([]kzg.Cell, 0, dataColumnSideCarsCount) cells := make([]kzg.Cell, 0, sidecarCount)
for _, sidecar := range dataColumnSideCars {
// Build the cell indices.
cellsIndices = append(cellsIndices, sidecar.Index)
// Get the cell.
column := sidecar.Column
cell := column[bIndex]
for columnIndex, sidecar := range sidecarByIndex {
cell := sidecar.Column[blobIndex]
cells = append(cells, kzg.Cell(cell)) cells = append(cells, kzg.Cell(cell))
cellsIndices = append(cellsIndices, columnIndex)
} }
// Recover the cells and proofs for the corresponding blob // Recover the cells and proofs for the corresponding blob
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells) cellsAndProofsForBlob, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
if err != nil { if err != nil {
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex) return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", blobIndex)
} }
recoveredCellsAndProofs[bIndex] = cellsAndProofs // It is safe for multiple goroutines to concurrently write to the same slice,
// as long as they are writing to different indices, which is the case here.
cellsAndProofs[blobIndex] = cellsAndProofsForBlob
return nil return nil
}) })
} }
if err := wg.Wait(); err != nil { if err := wg.Wait(); err != nil {
return nil, err return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
} }
return recoveredCellsAndProofs, nil outSidecars, err := dataColumnsSidecars(signedBlockHeader, kzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
if err != nil {
return nil, errors.Wrap(err, "data column sidecars from items")
}
// Input sidecars are verified, and we reconstructed ourselves the missing sidecars.
// As a consequence, reconstructed sidecars are also verified.
outVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
for _, sidecar := range outSidecars {
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "new RO data column with root")
}
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
outVerifiedRoSidecars = append(outVerifiedRoSidecars, verifiedRoSidecar)
}
return outVerifiedRoSidecars, nil
}
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
// cell proofs corresponding the extended blobs. The main purpose of this function is to
// construct data columns sidecars from data obtained from the execution client via:
// - `engine_getBlobsV2` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2, or
// - `engine_getPayloadV5` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getpayloadv5
// Note: In this function, to stick with the `BlobsBundleV2` format returned by the execution client in `engine_getPayloadV5`,
// cell proofs are "flattened".
func ConstructDataColumnSidecars(block interfaces.ReadOnlySignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
// Check if the cells count is equal to the cell proofs count.
numberOfColumns := params.BeaconConfig().NumberOfColumns
blobCount := uint64(len(blobs))
cellProofsCount := uint64(len(cellProofs))
cellsCount := blobCount * numberOfColumns
if cellsCount != cellProofsCount {
return nil, ErrBlobsCellsProofsMismatch
}
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
for i, blob := range blobs {
var kzgBlob kzg.Blob
if copy(kzgBlob[:], blob) != len(kzgBlob) {
return nil, errors.New("wrong blob size - should never happen")
}
// Compute the extended cells from the (non-extended) blob.
cells, err := kzg.ComputeCells(&kzgBlob)
if err != nil {
return nil, errors.Wrap(err, "compute cells")
}
var proofs []kzg.Proof
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
var kzgProof kzg.Proof
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
return nil, errors.New("wrong KZG proof size - should never happen")
}
proofs = append(proofs, kzgProof)
}
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
cellsAndProofs = append(cellsAndProofs, cellsProofs)
}
dataColumnSidecars, err := DataColumnSidecars(block, cellsAndProofs)
if err != nil {
return nil, errors.Wrap(err, "data column sidcars")
}
return dataColumnSidecars, nil
}
// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars.
// The following constraints must be satisfied:
// - All `dataColumnSidecars` has to be committed to the same block, and
// - `dataColumnSidecars` must be sorted by index and should not contain duplicates.
// - `dataColumnSidecars` must contain either all sidecars corresponding to (non-extended) blobs,
// or either enough sidecars to reconstruct the blobs.
func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
// Return early if no blobs are requested.
if len(indices) == 0 {
return nil, nil
}
if len(verifiedDataColumnSidecars) == 0 {
return nil, ErrNotEnoughDataColumnSidecars
}
// Check if the sidecars are sorted by index and do not contain duplicates.
previousColumnIndex := verifiedDataColumnSidecars[0].Index
for _, dataColumnSidecar := range verifiedDataColumnSidecars[1:] {
columnIndex := dataColumnSidecar.Index
if columnIndex <= previousColumnIndex {
return nil, ErrDataColumnSidecarsNotSortedByIndex
}
previousColumnIndex = columnIndex
}
// Check if we have enough columns.
cellsPerBlob := fieldparams.CellsPerBlob
if len(verifiedDataColumnSidecars) < cellsPerBlob {
return nil, ErrNotEnoughDataColumnSidecars
}
// Check if the blob index is too high.
commitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
for _, blobIndex := range indices {
if blobIndex >= len(commitments) {
return nil, ErrBlobIndexTooHigh
}
}
// Check if the data column sidecars are aligned with the block.
dataColumnSidecars := make([]blocks.RODataColumn, 0, len(verifiedDataColumnSidecars))
for _, verifiedDataColumnSidecar := range verifiedDataColumnSidecars {
dataColumnSicecar := verifiedDataColumnSidecar.RODataColumn
dataColumnSidecars = append(dataColumnSidecars, dataColumnSicecar)
}
if err := DataColumnsAlignWithBlock(block, dataColumnSidecars); err != nil {
return nil, errors.Wrap(err, "data columns align with block")
}
// If all column sidecars corresponding to (non-extended) blobs are present, no need to reconstruct.
if verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) {
// Convert verified data column sidecars to verified blob sidecars.
blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, verifiedDataColumnSidecars, indices)
if err != nil {
return nil, errors.Wrap(err, "blob sidecars from data column sidecars")
}
return blobSidecars, nil
}
// We need to reconstruct the blobs.
reconstructedDataColumnSidecars, err := ReconstructDataColumnSidecars(verifiedDataColumnSidecars)
if err != nil {
return nil, errors.Wrap(err, "reconstruct data column sidecars")
}
// Convert verified data column sidecars to verified blob sidecars.
blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, reconstructedDataColumnSidecars, indices)
if err != nil {
return nil, errors.Wrap(err, "blob sidecars from data column sidecars")
}
return blobSidecars, nil
}
// blobSidecarsFromDataColumnSidecars converts verified data column sidecars to verified blob sidecars.
func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
referenceSidecar := dataColumnSidecars[0]
kzgCommitments := referenceSidecar.KzgCommitments
signedBlockHeader := referenceSidecar.SignedBlockHeader
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, len(indices))
for _, blobIndex := range indices {
var blob kzg.Blob
// Compute the content of the blob.
for columnIndex := range fieldparams.CellsPerBlob {
dataColumnSidecar := dataColumnSidecars[columnIndex]
cell := dataColumnSidecar.Column[blobIndex]
if copy(blob[kzg.BytesPerCell*columnIndex:], cell) != kzg.BytesPerCell {
return nil, errors.New("wrong cell size - should never happen")
}
}
// Extract the KZG commitment.
var kzgCommitment kzg.Commitment
if copy(kzgCommitment[:], kzgCommitments[blobIndex]) != len(kzgCommitment) {
return nil, errors.New("wrong KZG commitment size - should never happen")
}
// Compute the blob KZG proof.
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, kzgCommitment)
if err != nil {
return nil, errors.Wrap(err, "compute blob KZG proof")
}
// Build the inclusion proof for the blob.
var kzgBlob kzg.Blob
if copy(kzgBlob[:], blob[:]) != len(kzgBlob) {
return nil, errors.New("wrong blob size - should never happen")
}
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitment(roBlock.Block().Body(), blobIndex)
if err != nil {
return nil, errors.Wrap(err, "merkle proof KZG commitment")
}
// Build the blob sidecar.
blobSidecar := &ethpb.BlobSidecar{
Index: uint64(blobIndex),
Blob: blob[:],
KzgCommitment: kzgCommitment[:],
KzgProof: blobKzgProof[:],
SignedBlockHeader: signedBlockHeader,
CommitmentInclusionProof: commitmentInclusionProof,
}
roBlob, err := blocks.NewROBlob(blobSidecar)
if err != nil {
return nil, errors.Wrap(err, "new RO blob")
}
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
}
return verifiedROBlobs, nil
} }

View File

@@ -1,43 +1,41 @@
package peerdas_test package peerdas_test
import ( import (
"encoding/binary"
"testing" "testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params" "github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require" "github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
) )
func TestCanSelfReconstruct(t *testing.T) { func TestMinimumColumnsCountToReconstruct(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
totalNumberOfCustodyGroups uint64 numberOfColumns uint64
custodyNumberOfGroups uint64 expected uint64
expected bool
}{ }{
{ {
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=31", name: "numberOfColumns=128",
totalNumberOfCustodyGroups: 64, numberOfColumns: 128,
custodyNumberOfGroups: 31, expected: 64,
expected: false,
}, },
{ {
name: "totalNumberOfCustodyGroups=64, custodyNumberOfGroups=32", name: "numberOfColumns=129",
totalNumberOfCustodyGroups: 64, numberOfColumns: 129,
custodyNumberOfGroups: 32, expected: 65,
expected: true,
}, },
{ {
name: "totalNumberOfCustodyGroups=65, custodyNumberOfGroups=32", name: "numberOfColumns=130",
totalNumberOfCustodyGroups: 65, numberOfColumns: 130,
custodyNumberOfGroups: 32, expected: 65,
expected: false,
},
{
name: "totalNumberOfCustodyGroups=63, custodyNumberOfGroups=33",
totalNumberOfCustodyGroups: 65,
custodyNumberOfGroups: 33,
expected: true,
}, },
} }
@@ -46,12 +44,278 @@ func TestCanSelfReconstruct(t *testing.T) {
// Set the total number of columns. // Set the total number of columns.
params.SetupTestConfigCleanup(t) params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy() cfg := params.BeaconConfig().Copy()
cfg.NumberOfCustodyGroups = tc.totalNumberOfCustodyGroups cfg.NumberOfColumns = tc.numberOfColumns
params.OverrideBeaconConfig(cfg) params.OverrideBeaconConfig(cfg)
// Check if reconstuction is possible. // Compute the minimum number of columns needed to reconstruct.
actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfGroups) actual := peerdas.MinimumColumnsCountToReconstruct()
require.Equal(t, tc.expected, actual) require.Equal(t, tc.expected, actual)
}) })
} }
} }
func TestReconstructDataColumnSidecars(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
t.Run("empty input", func(t *testing.T) {
_, err := peerdas.ReconstructDataColumnSidecars(nil)
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
t.Run("columns lengths differ", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
// Arbitrarily alter the column with index 3
verifiedRoSidecars[3].Column = verifiedRoSidecars[3].Column[1:]
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars)
require.ErrorIs(t, err, peerdas.ErrColumnLengthsDiffer)
})
t.Run("roots differ", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}))
_, _, verifiedRoSidecarsAlter := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}))
// Arbitrarily alter the column with index 3
verifiedRoSidecars[3] = verifiedRoSidecarsAlter[3]
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars)
require.ErrorIs(t, err, peerdas.ErrBlockRootMismatch)
})
const blobCount = 6
signedBeaconBlockPb := util.NewBeaconBlockFulu()
block := signedBeaconBlockPb.Block
commitments := make([][]byte, 0, blobCount)
for i := range uint64(blobCount) {
var commitment [fieldparams.KzgCommitmentSize]byte
binary.BigEndian.PutUint64(commitment[:], i)
commitments = append(commitments, commitment[:])
}
block.Body.BlobKzgCommitments = commitments
t.Run("not enough columns to enable reconstruction", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
minimum := peerdas.MinimumColumnsCountToReconstruct()
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars[:minimum-1])
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
t.Run("nominal", func(t *testing.T) {
// Build a full set of verified data column sidecars.
_, _, inputVerifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
// Arbitrarily keep only the even sicars.
filteredVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(inputVerifiedRoSidecars)/2)
for i := 0; i < len(inputVerifiedRoSidecars); i += 2 {
filteredVerifiedRoSidecars = append(filteredVerifiedRoSidecars, inputVerifiedRoSidecars[i])
}
// Reconstruct the data column sidecars.
reconstructedVerifiedRoSidecars, err := peerdas.ReconstructDataColumnSidecars(filteredVerifiedRoSidecars)
require.NoError(t, err)
// Verify that the reconstructed sidecars are equal to the original ones.
require.DeepSSZEqual(t, inputVerifiedRoSidecars, reconstructedVerifiedRoSidecars)
})
}
func TestConstructDataColumnSidecars(t *testing.T) {
const (
blobCount = 3
cellsPerBlob = fieldparams.CellsPerBlob
)
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
roBlock, _, baseVerifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
// Extract blobs and proofs from the sidecars.
blobs := make([][]byte, 0, blobCount)
cellProofs := make([][]byte, 0, cellsPerBlob)
for blobIndex := range blobCount {
blob := make([]byte, 0, cellsPerBlob)
for columnIndex := range cellsPerBlob {
cell := baseVerifiedRoSidecars[columnIndex].Column[blobIndex]
blob = append(blob, cell...)
}
blobs = append(blobs, blob)
for columnIndex := range numberOfColumns {
cellProof := baseVerifiedRoSidecars[columnIndex].KzgProofs[blobIndex]
cellProofs = append(cellProofs, cellProof)
}
}
actual, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
require.NoError(t, err)
// Extract the base verified ro sidecars into sidecars.
expected := make([]*ethpb.DataColumnSidecar, 0, len(baseVerifiedRoSidecars))
for _, verifiedRoSidecar := range baseVerifiedRoSidecars {
expected = append(expected, verifiedRoSidecar.DataColumnSidecar)
}
require.DeepSSZEqual(t, expected, actual)
}
func TestReconstructBlobs(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
var emptyBlock blocks.ROBlock
t.Run("no index", func(t *testing.T) {
actual, err := peerdas.ReconstructBlobs(emptyBlock, nil, nil)
require.NoError(t, err)
require.IsNil(t, actual)
})
t.Run("empty input", func(t *testing.T) {
_, err := peerdas.ReconstructBlobs(emptyBlock, nil, []int{0})
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
t.Run("not sorted", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
// Arbitrarily change the order of the sidecars.
verifiedRoSidecars[3], verifiedRoSidecars[2] = verifiedRoSidecars[2], verifiedRoSidecars[3]
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
})
t.Run("not enough columns", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
inputSidecars := verifiedRoSidecars[:fieldparams.CellsPerBlob-1]
_, err := peerdas.ReconstructBlobs(emptyBlock, inputSidecars, []int{0})
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
t.Run("index too high", func(t *testing.T) {
const blobCount = 3
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{1, blobCount})
require.ErrorIs(t, err, peerdas.ErrBlobIndexTooHigh)
})
t.Run("not committed to the same block", func(t *testing.T) {
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}))
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}))
_, err = peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
require.ErrorContains(t, peerdas.ErrRootMismatch.Error(), err)
})
t.Run("nominal", func(t *testing.T) {
const blobCount = 3
numberOfColumns := params.BeaconConfig().NumberOfColumns
roBlock, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
// Compute cells and proofs from blob sidecars.
var wg errgroup.Group
blobs := make([][]byte, blobCount)
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
for i := range blobCount {
blob := roBlobSidecars[i].Blob
blobs[i] = blob
wg.Go(func() error {
var kzgBlob kzg.Blob
count := copy(kzgBlob[:], blob)
require.Equal(t, len(kzgBlob), count)
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
if err != nil {
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
}
// It is safe for multiple goroutines to concurrently write to the same slice,
// as long as they are writing to different indices, which is the case here.
cellsAndProofs[i] = cp
return nil
})
}
err := wg.Wait()
require.NoError(t, err)
// Flatten proofs.
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
for _, cp := range cellsAndProofs {
for _, proof := range cp.Proofs {
cellProofs = append(cellProofs, proof[:])
}
}
// Construct data column sidecars.
// It is OK to use the public function `ConstructDataColumnSidecars`, as long as
// `TestConstructDataColumnSidecars` tests pass.
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
require.NoError(t, err)
// Convert to verified data column sidecars.
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
for _, dataColumnSidecar := range dataColumnSidecars {
roSidecar, err := blocks.NewRODataColumn(dataColumnSidecar)
require.NoError(t, err)
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar)
}
indices := []int{2, 0}
t.Run("no reconstruction needed", func(t *testing.T) {
// Reconstruct blobs.
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, indices)
require.NoError(t, err)
// Compare blobs.
for i, blobIndex := range indices {
expected := roBlobSidecars[blobIndex]
actual := reconstructedVerifiedRoBlobSidecars[i].ROBlob
require.DeepSSZEqual(t, expected, actual)
}
})
t.Run("reconstruction needed", func(t *testing.T) {
// Arbitrarily keep only the even sidecars.
filteredSidecars := make([]blocks.VerifiedRODataColumn, 0, len(verifiedRoSidecars)/2)
for i := 0; i < len(verifiedRoSidecars); i += 2 {
filteredSidecars = append(filteredSidecars, verifiedRoSidecars[i])
}
// Reconstruct blobs.
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(roBlock, filteredSidecars, indices)
require.NoError(t, err)
// Compare blobs.
for i, blobIndex := range indices {
expected := roBlobSidecars[blobIndex]
actual := reconstructedVerifiedRoBlobSidecars[i].ROBlob
require.DeepSSZEqual(t, expected, actual)
}
})
})
}

View File

@@ -1,54 +0,0 @@
package peerdas
import (
"fmt"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/pkg/errors"
)
// ConstructDataColumnSidecars constructs data column sidecars from a block, blobs and their cell proofs.
// This is a convenience method as blob and cell proofs are common inputs.
func ConstructDataColumnSidecars(block interfaces.SignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
// Check if the block is at least a Fulu block.
if block.Version() < version.Fulu {
return nil, nil
}
numberOfColumns := params.BeaconConfig().NumberOfColumns
if uint64(len(blobs))*numberOfColumns != uint64(len(cellProofs)) {
return nil, fmt.Errorf("number of blobs and cell proofs do not match: %d * %d != %d", len(blobs), numberOfColumns, len(cellProofs))
}
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobs))
for i, blob := range blobs {
var b kzg.Blob
copy(b[:], blob)
cells, err := kzg.ComputeCells(&b)
if err != nil {
return nil, err
}
var proofs []kzg.Proof
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
proofs = append(proofs, kzg.Proof(cellProofs[idx]))
}
cellsAndProofs = append(cellsAndProofs, kzg.CellsAndProofs{
Cells: cells,
Proofs: proofs,
})
}
dataColumnSidecars, err := DataColumnSidecars(block, cellsAndProofs)
if err != nil {
return nil, errors.Wrap(err, "data column sidcars")
}
return dataColumnSidecars, nil
}

View File

@@ -0,0 +1,65 @@
package peerdas
import (
"bytes"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/pkg/errors"
)
var (
ErrBlockColumnSizeMismatch = errors.New("size mismatch between data column and block")
ErrTooManyCommitments = errors.New("too many commitments")
ErrRootMismatch = errors.New("root mismatch between data column and block")
ErrCommitmentMismatch = errors.New("commitment mismatch between data column and block")
)
// DataColumnsAlignWithBlock checks if the data columns align with the block.
func DataColumnsAlignWithBlock(block blocks.ROBlock, dataColumns []blocks.RODataColumn) error {
// No data columns before Fulu.
if block.Version() < version.Fulu {
return nil
}
// Compute the maximum number of blobs per block.
blockSlot := block.Block().Slot()
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blockSlot)
// Check if the block has not too many commitments.
blockCommitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "blob KZG commitments")
}
blockCommitmentCount := len(blockCommitments)
if blockCommitmentCount > maxBlobsPerBlock {
return ErrTooManyCommitments
}
blockRoot := block.Root()
for _, dataColumn := range dataColumns {
// Check if the root of the data column sidecar matches the block root.
if dataColumn.BlockRoot() != blockRoot {
return ErrRootMismatch
}
// Check if the content length of the data column sidecar matches the block.
if len(dataColumn.Column) != blockCommitmentCount ||
len(dataColumn.KzgCommitments) != blockCommitmentCount ||
len(dataColumn.KzgProofs) != blockCommitmentCount {
return ErrBlockColumnSizeMismatch
}
// Check if the commitments of the data column sidecar match the block.
for i := range blockCommitments {
if !bytes.Equal(blockCommitments[i], dataColumn.KzgCommitments[i]) {
return ErrCommitmentMismatch
}
}
}
return nil
}

View File

@@ -0,0 +1,77 @@
package peerdas_test
import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
)
func TestDataColumnsAlignWithBlock(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
t.Run("pre fulu", func(t *testing.T) {
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 0, 0)
err := peerdas.DataColumnsAlignWithBlock(block, nil)
require.NoError(t, err)
})
t.Run("too many commitmnets", func(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.BlobSchedule = []params.BlobScheduleEntry{{}}
params.OverrideBeaconConfig(config)
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3)
err := peerdas.DataColumnsAlignWithBlock(block, nil)
require.ErrorIs(t, err, peerdas.ErrTooManyCommitments)
})
t.Run("root mismatch", func(t *testing.T) {
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0)
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrRootMismatch)
})
t.Run("column size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
sidecars[0].Column = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG commitments size mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
sidecars[0].KzgCommitments = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("KZG proofs mismatch", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
sidecars[0].KzgProofs = [][]byte{}
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
})
t.Run("commitment mismatch", func(t *testing.T) {
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
alteredSidecars[1].KzgCommitments[0][0]++ // Overflow is OK
err := peerdas.DataColumnsAlignWithBlock(block, alteredSidecars)
require.ErrorIs(t, err, peerdas.ErrCommitmentMismatch)
})
t.Run("nominal", func(t *testing.T) {
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
require.NoError(t, err)
})
}

View File

@@ -403,6 +403,11 @@ func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []u
return nil, nil return nil, nil
} }
// Exit early if no data column sidecars for this root is stored.
if !summary.HasAtLeastOneIndex(indices) {
return nil, nil
}
// Compute the file path. // Compute the file path.
filePath := filePath(root, summary.epoch) filePath := filePath(root, summary.epoch)

View File

@@ -33,6 +33,17 @@ func (s DataColumnStorageSummary) HasIndex(index uint64) bool {
return s.mask[index] return s.mask[index]
} }
// HasAtLeastOneIndex returns true if at least one of the DataColumnSidecars at the given indices is available in the filesystem.
func (s DataColumnStorageSummary) HasAtLeastOneIndex(indices []uint64) bool {
for _, index := range indices {
if s.mask[index] {
return true
}
}
return false
}
// Count returns the number of available data columns. // Count returns the number of available data columns.
func (s DataColumnStorageSummary) Count() uint64 { func (s DataColumnStorageSummary) Count() uint64 {
count := uint64(0) count := uint64(0)
@@ -61,6 +72,18 @@ func (s DataColumnStorageSummary) AllAvailable(indices map[uint64]bool) bool {
return true return true
} }
// Stored returns a map of all stored data columns.
func (s DataColumnStorageSummary) Stored() map[uint64]bool {
stored := make(map[uint64]bool, fieldparams.NumberOfColumns)
for index, exists := range s.mask {
if exists {
stored[uint64(index)] = true
}
}
return stored
}
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root. // DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root. // The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
type DataColumnStorageSummarizer interface { type DataColumnStorageSummarizer interface {

View File

@@ -22,6 +22,16 @@ func TestHasIndex(t *testing.T) {
require.Equal(t, true, hasIndex) require.Equal(t, true, hasIndex)
} }
func TestHasAtLeastOneIndex(t *testing.T) {
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true})
hasAtLeastOneIndex := summary.HasAtLeastOneIndex([]uint64{3, 1, 2})
require.Equal(t, true, hasAtLeastOneIndex)
hasAtLeastOneIndex = summary.HasAtLeastOneIndex([]uint64{3, 4, 2})
require.Equal(t, false, hasAtLeastOneIndex)
}
func TestCount(t *testing.T) { func TestCount(t *testing.T) {
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true, false, true}) summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true, false, true})
@@ -51,6 +61,18 @@ func TestAllAvailableDataColumns(t *testing.T) {
require.Equal(t, true, allAvailable) require.Equal(t, true, allAvailable)
} }
func TestStored(t *testing.T) {
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true, true, false})
expected := map[uint64]bool{1: true, 2: true}
actual := summary.Stored()
require.Equal(t, len(expected), len(actual))
for k, v := range expected {
require.Equal(t, v, actual[k])
}
}
func TestSummary(t *testing.T) { func TestSummary(t *testing.T) {
root := [fieldparams.RootLength]byte{} root := [fieldparams.RootLength]byte{}

View File

@@ -352,7 +352,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
} }
func TestGetDataColumnSidecars(t *testing.T) { func TestGetDataColumnSidecars(t *testing.T) {
t.Run("not found", func(t *testing.T) { t.Run("root not found", func(t *testing.T) {
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t) _, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14}) verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
@@ -360,6 +360,26 @@ func TestGetDataColumnSidecars(t *testing.T) {
require.Equal(t, 0, len(verifiedRODataColumnSidecars)) require.Equal(t, 0, len(verifiedRODataColumnSidecars))
}) })
t.Run("indices not found", func(t *testing.T) {
_, savedVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
t,
util.DataColumnsParamsByRoot{
{1}: {
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
},
},
)
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
err := dataColumnStorage.Save(savedVerifiedRoDataColumnSidecars)
require.NoError(t, err)
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{3, 1, 2})
require.NoError(t, err)
require.Equal(t, 0, len(verifiedRODataColumnSidecars))
})
t.Run("nominal", func(t *testing.T) { t.Run("nominal", func(t *testing.T) {
_, expectedVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars( _, expectedVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
t, t,

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"testing" "testing"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params" "github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives" "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/time/slots" "github.com/OffchainLabs/prysm/v6/time/slots"
@@ -117,3 +118,14 @@ func NewEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...Dat
return bs return bs
} }
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
c := newDataColumnStorageSummaryCache()
for root, indices := range set {
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
t.Fatal(err)
}
}
return c
}

View File

@@ -0,0 +1,3 @@
### Changed
- PeerDAS: Refactor the reconstruction pipeline.
- PeerDAS: `DataColumnStorage.Get` - Exit early no columns are available.

View File

@@ -321,4 +321,9 @@ var (
Usage: "Specifies the retention period for the pruner service in terms of epochs. " + Usage: "Specifies the retention period for the pruner service in terms of epochs. " +
"If this value is less than MIN_EPOCHS_FOR_BLOCK_REQUESTS, it will be ignored.", "If this value is less than MIN_EPOCHS_FOR_BLOCK_REQUESTS, it will be ignored.",
} }
// SubscribeAllDataSubnets enables subscription to all data subnets.
SubscribeAllDataSubnets = &cli.BoolFlag{
Name: "subscribe-all-data-subnets",
Usage: "Enable subscription to all data subnets.",
}
) )

View File

@@ -9,6 +9,7 @@ import (
// beacon node. // beacon node.
type GlobalFlags struct { type GlobalFlags struct {
SubscribeToAllSubnets bool SubscribeToAllSubnets bool
SubscribeAllDataSubnets bool
MinimumSyncPeers int MinimumSyncPeers int
MinimumPeersPerSubnet int MinimumPeersPerSubnet int
MaxConcurrentDials int MaxConcurrentDials int
@@ -37,10 +38,17 @@ func Init(c *GlobalFlags) {
// based on the provided cli context. // based on the provided cli context.
func ConfigureGlobalFlags(ctx *cli.Context) { func ConfigureGlobalFlags(ctx *cli.Context) {
cfg := &GlobalFlags{} cfg := &GlobalFlags{}
if ctx.Bool(SubscribeToAllSubnets.Name) { if ctx.Bool(SubscribeToAllSubnets.Name) {
log.Warn("Subscribing to All Attestation Subnets") log.Warn("Subscribing to All Attestation Subnets")
cfg.SubscribeToAllSubnets = true cfg.SubscribeToAllSubnets = true
} }
if ctx.Bool(SubscribeAllDataSubnets.Name) {
log.Warning("Subscribing to all data subnets")
cfg.SubscribeAllDataSubnets = true
}
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name) cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name) cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name) cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)

View File

@@ -47,4 +47,5 @@ const (
// Introduced in Fulu network upgrade. // Introduced in Fulu network upgrade.
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network. NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
) )

View File

@@ -47,4 +47,5 @@ const (
// Introduced in Fulu network upgrade. // Introduced in Fulu network upgrade.
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network. NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
) )

View File

@@ -1,10 +1,10 @@
package util package util
import ( import (
"encoding/binary"
"math/big" "math/big"
"testing" "testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing" "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams" fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params" "github.com/OffchainLabs/prysm/v6/config/params"
@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil" "github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/network/forks" "github.com/OffchainLabs/prysm/v6/network/forks"
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1" enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require" "github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/time/slots" "github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@@ -96,22 +97,38 @@ func GenerateTestElectraBlockWithSidecar(t *testing.T, parent [32]byte, slot pri
block.Block.Slot = g.slot block.Block.Slot = g.slot
block.Block.ParentRoot = g.parent[:] block.Block.ParentRoot = g.parent[:]
block.Block.ProposerIndex = g.proposer block.Block.ProposerIndex = g.proposer
commitments := make([][48]byte, g.nblobs)
block.Block.Body.BlobKzgCommitments = make([][]byte, g.nblobs) blobs := make([][]byte, 0, g.nblobs)
for i := range commitments { commitments := make([][]byte, 0, g.nblobs)
binary.LittleEndian.PutUint16(commitments[i][0:16], uint16(i)) kzgProofs := make([][]byte, 0, g.nblobs)
binary.LittleEndian.PutUint16(commitments[i][16:32], uint16(g.slot))
block.Block.Body.BlobKzgCommitments[i] = commitments[i][:] for i := range g.nblobs {
blob := kzg.Blob{uint8(i)}
commitment, err := kzg.BlobToKZGCommitment(&blob)
require.NoError(t, err)
kzgProof, err := kzg.ComputeBlobKZGProof(&blob, commitment)
require.NoError(t, err)
blobs = append(blobs, blob[:])
commitments = append(commitments, commitment[:])
kzgProofs = append(kzgProofs, kzgProof[:])
} }
block.Block.Body.BlobKzgCommitments = commitments
body, err := blocks.NewBeaconBlockBody(block.Block.Body) body, err := blocks.NewBeaconBlockBody(block.Block.Body)
require.NoError(t, err) require.NoError(t, err)
inclusion := make([][][]byte, len(commitments))
for i := range commitments { inclusionProofs := make([][][]byte, 0, g.nblobs)
proof, err := blocks.MerkleProofKZGCommitment(body, i) for i := range g.nblobs {
inclusionProof, err := blocks.MerkleProofKZGCommitment(body, i)
require.NoError(t, err) require.NoError(t, err)
inclusion[i] = proof
inclusionProofs = append(inclusionProofs, inclusionProof)
} }
if g.sign { if g.sign {
epoch := slots.ToEpoch(block.Block.Slot) epoch := slots.ToEpoch(block.Block.Slot)
schedule := forks.NewOrderedSchedule(params.BeaconConfig()) schedule := forks.NewOrderedSchedule(params.BeaconConfig())
@@ -128,17 +145,30 @@ func GenerateTestElectraBlockWithSidecar(t *testing.T, parent [32]byte, slot pri
root, err := block.Block.HashTreeRoot() root, err := block.Block.HashTreeRoot()
require.NoError(t, err) require.NoError(t, err)
sidecars := make([]blocks.ROBlob, len(commitments))
sbb, err := blocks.NewSignedBeaconBlock(block) sbb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err) require.NoError(t, err)
sh, err := sbb.Header() sh, err := sbb.Header()
require.NoError(t, err) require.NoError(t, err)
for i, c := range block.Block.Body.BlobKzgCommitments {
sidecars[i] = GenerateTestDenebBlobSidecar(t, root, sh, i, c, inclusion[i]) roSidecars := make([]blocks.ROBlob, 0, g.nblobs)
for i := range g.nblobs {
pbSidecar := ethpb.BlobSidecar{
Index: uint64(i),
Blob: blobs[i],
KzgCommitment: commitments[i],
KzgProof: kzgProofs[i],
SignedBlockHeader: sh,
CommitmentInclusionProof: inclusionProofs[i],
}
roSidecar, err := blocks.NewROBlobWithRoot(&pbSidecar, root)
require.NoError(t, err)
roSidecars = append(roSidecars, roSidecar)
} }
rob, err := blocks.NewROBlock(sbb) rob, err := blocks.NewROBlock(sbb)
require.NoError(t, err) require.NoError(t, err)
return rob, sidecars return rob, roSidecars
} }