mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
29 Commits
peerDAS
...
peerdas-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5960a6bbab | ||
|
|
725dda51c3 | ||
|
|
6a44fef25e | ||
|
|
654f927fc2 | ||
|
|
08bb7c72fb | ||
|
|
9f91957ec3 | ||
|
|
9a80f043de | ||
|
|
8192e653e7 | ||
|
|
613c9c5de9 | ||
|
|
731a6fec5c | ||
|
|
ffc794182f | ||
|
|
c7bd2d11b2 | ||
|
|
02a6c034a7 | ||
|
|
584ef60ab0 | ||
|
|
bc017ed493 | ||
|
|
22b3327a97 | ||
|
|
aa84fbc597 | ||
|
|
c6e8c1a380 | ||
|
|
7b9eef4a45 | ||
|
|
5dfef04a6b | ||
|
|
6d16ed4a89 | ||
|
|
799ed429ca | ||
|
|
30ee3a7225 | ||
|
|
07f73857f9 | ||
|
|
5c76dc6c3a | ||
|
|
fa81334983 | ||
|
|
7fd169da5f | ||
|
|
ee4e09effe | ||
|
|
91738a24fa |
@@ -144,6 +144,7 @@ go_test(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
@@ -165,6 +166,7 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -444,6 +444,9 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
|
||||
// Blobs may not exist for some blocks, leading to deletion failures. Log such errors at debug level.
|
||||
log.WithError(err).Debug("Could not remove blob from blob storage")
|
||||
}
|
||||
if err := s.dataColumnStorage.Remove(root); err != nil {
|
||||
log.WithError(err).Debug("Could not remove data columns from data column storage")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsA
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
return CellsAndProofs{}, errors.Wrap(err, "recover cells and KZG proofs")
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
@@ -208,6 +210,14 @@ func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the data column storage backend for the blockchain service.
|
||||
func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.dataColumnStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSyncChecker(checker Checker) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.SyncChecker = checker
|
||||
@@ -221,3 +231,10 @@ func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithGenesisTime(genesisTime time.Time) Option {
|
||||
return func(s *Service) error {
|
||||
s.genesisTime = genesisTime
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -237,9 +238,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
}
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
@@ -526,7 +526,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
func missingDataColumns(bs *filesystem.DataColumnStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -541,7 +541,7 @@ func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
for column := range expected {
|
||||
if !summary.HasDataColumnIndex(column) {
|
||||
if !summary.HasIndex(column) {
|
||||
missing[column] = true
|
||||
}
|
||||
}
|
||||
@@ -549,112 +549,32 @@ func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// isDataAvailable blocks until all sidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.areDataColumnsAvailable(ctx, root, signed)
|
||||
}
|
||||
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// expected is the number of kzg commitments observed in the block.
|
||||
expected := len(kzgCommitments)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The gossip handler for blobs writes the index of each verified blob referencing the given
|
||||
// root to the channel returned by blobNotifiers.forRoot.
|
||||
nc := s.blobNotifiers.forRoot(root, block.Slot())
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case idx := <-nc:
|
||||
// Delete each index seen in the notification channel.
|
||||
delete(missing, idx)
|
||||
// Read from the channel until there are no more missing sidecars.
|
||||
if len(missing) > 0 {
|
||||
continue
|
||||
}
|
||||
// Once all sidecars have been observed, clean up the notification channel.
|
||||
s.blobNotifiers.delete(root)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signedBlock.Version() < version.Fulu {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
return s.areBlobsAvailable(ctx, root, block)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
@@ -693,37 +613,25 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Exit early if the node is not expected to custody any data columns.
|
||||
if len(peerInfo.CustodyColumns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
// Subscribe to newly data columns stored in the database.
|
||||
identsChan := make(chan filesystem.DataColumnsIdent)
|
||||
subscription := s.dataColumnStorage.DataColumnFeed.Subscribe(identsChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
summary := s.blobStorage.Summary(root)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
retrievedDataColumnsCount := uint64(0)
|
||||
for column := range numberOfColumns {
|
||||
if summary.HasDataColumnIndex(column) {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
}
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
if peerdas.CanSelfReconstruct(storedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumns(s.blobStorage, root, peerInfo.CustodyColumns)
|
||||
missingMap, err := missingDataColumns(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
@@ -733,10 +641,11 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signedBlock.Block().Slot()+1, s.genesisTime)
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
@@ -760,42 +669,44 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signedBlock.Block().Slot(),
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
}).Error("Some data columns are still unavailable at slot end")
|
||||
}).Warning("Data columns still missing at slot end")
|
||||
})
|
||||
|
||||
defer nst.Stop()
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
case idents := <-identsChan:
|
||||
if idents.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[rootIndex.Index]; ok {
|
||||
retrievedDataColumnsCount++
|
||||
for _, index := range idents.Indices {
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[index]; ok {
|
||||
storedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(storedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, index)
|
||||
|
||||
// Return if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
@@ -810,6 +721,89 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si
|
||||
}
|
||||
}
|
||||
|
||||
// areBlobsAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
blockSlot := block.Slot()
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// expected is the number of kzg commitments observed in the block.
|
||||
expected := len(kzgCommitments)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The gossip handler for blobs writes the index of each verified blob referencing the given
|
||||
// root to the channel returned by blobNotifiers.forRoot.
|
||||
nc := s.blobNotifiers.forRoot(root, block.Slot())
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockSlot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case idx := <-nc:
|
||||
// Delete each index seen in the notification channel.
|
||||
delete(missing, idx)
|
||||
// Read from the channel until there are no more missing sidecars.
|
||||
if len(missing) > 0 {
|
||||
continue
|
||||
}
|
||||
// Once all sidecars have been observed, clean up the notification channel.
|
||||
s.blobNotifiers.delete(root)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -26,6 +28,7 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -3148,3 +3151,151 @@ func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
|
||||
reset()
|
||||
}
|
||||
|
||||
type testIsAvailableParams struct {
|
||||
options []Option
|
||||
blobKzgCommitmentsCount uint64
|
||||
columnsToSave []uint64
|
||||
}
|
||||
|
||||
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
|
||||
service, _ := minimalTestService(t, options...)
|
||||
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
|
||||
|
||||
err := service.saveGenesisData(ctx, genesisState)
|
||||
require.NoError(t, err)
|
||||
|
||||
conf := util.DefaultBlockGenConfig()
|
||||
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
|
||||
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := signedBeaconBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]verification.DataColumnParams, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnParam := verification.DataColumnParams{ColumnIndex: i}
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParam)
|
||||
}
|
||||
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: dataColumnsParams}
|
||||
_, verifiedRODataColumns := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
err = dataColumnStorage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ctx, cancel, service, root, signed
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
t.Run("Fulu - out of retention window", func(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
halfNumberOfColumns := params.BeaconConfig().NumberOfColumns / 2
|
||||
indices := make([]uint64, 0, halfNumberOfColumns)
|
||||
for i := range halfNumberOfColumns {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no missing data columns", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some initially missing data columns (no reconstruction)", func(t *testing.T) {
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(128)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(128)
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&custodyInfo)},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
|
||||
// If needed, generate a random root that is different from the block root.
|
||||
var randomRoot [fieldparams.RootLength]byte
|
||||
for randomRoot == root {
|
||||
randomRootSlice := make([]byte, fieldparams.RootLength)
|
||||
_, err := rand.Read(randomRootSlice)
|
||||
require.NoError(t, err)
|
||||
copy(randomRoot[:], randomRootSlice)
|
||||
}
|
||||
|
||||
// TODO: Achieve the same result without using time.AfterFunc.
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
halfNumberOfColumns := params.BeaconConfig().NumberOfColumns / 2
|
||||
indices := make([]uint64, 0, halfNumberOfColumns)
|
||||
for i := range halfNumberOfColumns {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
|
||||
withSomeRequiredColumns := filesystem.DataColumnsIdent{Root: root, Indices: indices}
|
||||
service.dataColumnStorage.DataColumnFeed.Send(withSomeRequiredColumns)
|
||||
})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some columns are definitively missing", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, cancel, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
// TODO: Achieve the same result without using time.AfterFunc.
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
cancel()
|
||||
})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -56,6 +56,7 @@ type BlobReceiver interface {
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
ReceiveDataColumns([]blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
@@ -235,26 +236,32 @@ func (s *Service) handleDA(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (time.Duration, error) {
|
||||
daStartTime := time.Now()
|
||||
if avs != nil {
|
||||
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed := time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "is data available")
|
||||
}
|
||||
return
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
return daWaitedTime, nil
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
|
||||
@@ -5,9 +5,20 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
// ReceiveDataColumns receives a batch of data columns.
|
||||
func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save(dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
// (It is only a wrapper around ReceiveDataColumns.)
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -67,6 +67,7 @@ type Service struct {
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
|
||||
@@ -132,6 +132,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithDepositCache(dc),
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
|
||||
@@ -709,6 +709,11 @@ func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumns implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
@@ -230,14 +230,17 @@ func verifyBlobCommitmentCount(slot primitives.Slot, body interfaces.ReadOnlyBea
|
||||
if body.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
kzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(kzgs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
|
||||
commitmentCount, maxBlobsPerBlock := len(kzgs), params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if commitmentCount > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: actual count %d - max allowed %d", commitmentCount, maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -926,8 +926,10 @@ func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1)}}
|
||||
maxCommitmentsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, maxCommitmentsPerBlock+1)}}
|
||||
rb, err = consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: actual count %d - max allowed %d", maxCommitmentsPerBlock+1, maxCommitmentsPerBlock), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
}
|
||||
|
||||
@@ -69,6 +69,10 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depostiRequestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositBalanceToConsume, err := beaconState.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -154,7 +158,7 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositRequestsStartIndex: depostiRequestsStartIndex,
|
||||
DepositBalanceToConsume: depositBalanceToConsume,
|
||||
ExitBalanceToConsume: exitBalanceToConsume,
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"p2p_interface.go",
|
||||
"peer_sampling.go",
|
||||
"reconstruction.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -24,6 +25,7 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"slices"
|
||||
@@ -20,7 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -105,59 +103,36 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#get_data_column_sidecars
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
startTime := time.Now()
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/fulu/das-core.md#get_data_column_sidecars
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
start := time.Now()
|
||||
if signedBlock == nil || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
}
|
||||
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobsCount)
|
||||
|
||||
eg, _ := errgroup.WithContext(context.Background())
|
||||
for i := range blobs {
|
||||
blobIndex := i
|
||||
eg.Go(func() error {
|
||||
blob := &blobs[blobIndex]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs[blobIndex] = blobCellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
blobsCount := len(cellsAndProofs)
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
@@ -186,17 +161,18 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
Index: columnIndex,
|
||||
Column: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
KzgProofs: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
dataColumnComputationTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
@@ -275,10 +251,10 @@ func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSideca
|
||||
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
|
||||
for i := range dataColumnsSidecar {
|
||||
dataColumnSideCar := dataColumnsSidecar[i]
|
||||
columnIndex := dataColumnSideCar.ColumnIndex
|
||||
index := dataColumnSideCar.Index
|
||||
|
||||
if columnIndex < uint64(neededColumnCount) {
|
||||
sliceIndexFromColumnIndex[columnIndex] = i
|
||||
if index < uint64(neededColumnCount) {
|
||||
sliceIndexFromColumnIndex[index] = i
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,12 +281,12 @@ func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSideca
|
||||
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
|
||||
firstDataColumnSidecar := dataColumnsSidecar[0]
|
||||
|
||||
blobCount := uint64(len(firstDataColumnSidecar.DataColumn))
|
||||
blobCount := uint64(len(firstDataColumnSidecar.Column))
|
||||
|
||||
// Check all colums have te same length.
|
||||
for i := range dataColumnsSidecar {
|
||||
if uint64(len(dataColumnsSidecar[i].DataColumn)) != blobCount {
|
||||
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].DataColumn))
|
||||
if uint64(len(dataColumnsSidecar[i].Column)) != blobCount {
|
||||
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].Column))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -331,7 +307,7 @@ func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSideca
|
||||
}
|
||||
|
||||
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
|
||||
cell := dataColumnSideCar.DataColumn[blobIndex]
|
||||
cell := dataColumnSideCar.Column[blobIndex]
|
||||
|
||||
for i := 0; i < len(cell); i++ {
|
||||
blob[columnIndex*kzg.BytesPerCell+i] = cell[i]
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.Blob{})
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
@@ -29,7 +29,7 @@ func TestBlobs(t *testing.T) {
|
||||
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns/2)
|
||||
for i := 2; i < fieldparams.NumberOfColumns/2+2; i++ {
|
||||
almostAllColumns = append(almostAllColumns, ðpb.DataColumnSidecar{
|
||||
ColumnIndex: uint64(i),
|
||||
Index: uint64(i),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -139,7 +139,8 @@ func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
|
||||
}
|
||||
|
||||
// Compute data columns sidecars from the signed beacon block and from the blobs.
|
||||
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute the blobs from the data columns sidecar.
|
||||
|
||||
@@ -34,7 +34,7 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, er
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
for _, sidecar := range sidecars {
|
||||
count += len(sidecar.DataColumn)
|
||||
count += len(sidecar.Column)
|
||||
}
|
||||
|
||||
commitments := make([]kzg.Bytes48, 0, count)
|
||||
@@ -44,25 +44,25 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, er
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
// Check if the columns index is not too large
|
||||
if sidecar.ColumnIndex >= numberOfColumns {
|
||||
if sidecar.Index >= numberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
|
||||
// Check if the KZG commitments size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgCommitments) {
|
||||
if len(sidecar.Column) != len(sidecar.KzgCommitments) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
// Check if the KZG proofs size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgProof) {
|
||||
if len(sidecar.Column) != len(sidecar.KzgProofs) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
for i := range sidecar.DataColumn {
|
||||
for i := range sidecar.Column {
|
||||
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
|
||||
indices = append(indices, sidecar.ColumnIndex)
|
||||
cells = append(cells, kzg.Cell(sidecar.DataColumn[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProof[i]))
|
||||
indices = append(indices, sidecar.Index)
|
||||
cells = append(cells, kzg.Cell(sidecar.Column[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProofs[i]))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,8 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
|
||||
@@ -32,9 +32,9 @@ func RecoverCellsAndProofs(
|
||||
}
|
||||
|
||||
// Check if all columns have the same length.
|
||||
blobCount := len(dataColumnSideCars[0].DataColumn)
|
||||
blobCount := len(dataColumnSideCars[0].Column)
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
length := len(sidecar.DataColumn)
|
||||
length := len(sidecar.Column)
|
||||
|
||||
if length != blobCount {
|
||||
return nil, errors.New("columns do not have the same length")
|
||||
@@ -52,10 +52,10 @@ func RecoverCellsAndProofs(
|
||||
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
// Build the cell indices.
|
||||
cellsIndices = append(cellsIndices, sidecar.ColumnIndex)
|
||||
cellsIndices = append(cellsIndices, sidecar.Index)
|
||||
|
||||
// Get the cell.
|
||||
column := sidecar.DataColumn
|
||||
column := sidecar.Column
|
||||
cell := column[bIndex]
|
||||
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
@@ -132,10 +132,10 @@ func DataColumnSidecarsForReconstruct(
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: uint64(columnIndex),
|
||||
DataColumn: columnBytes,
|
||||
Index: uint64(columnIndex),
|
||||
Column: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
KzgProofs: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
@@ -95,7 +95,8 @@ func TestReconstructionRoundTrip(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert data columns sidecars from signed block and blobs.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create verified RO data columns.
|
||||
@@ -114,8 +115,8 @@ func TestReconstructionRoundTrip(t *testing.T) {
|
||||
|
||||
var noDataColumns []*ethpb.DataColumnSidecar
|
||||
dataColumnsWithDifferentLengths := []*ethpb.DataColumnSidecar{
|
||||
{DataColumn: [][]byte{{}, {}}},
|
||||
{DataColumn: [][]byte{{}}},
|
||||
{Column: [][]byte{{}, {}}},
|
||||
{Column: [][]byte{{}}},
|
||||
}
|
||||
notEnoughDataColumns := dataColumnSidecars[:numberOfColumns/2-1]
|
||||
originalDataColumns := dataColumnSidecars[:numberOfColumns/2]
|
||||
|
||||
57
beacon-chain/core/peerdas/util.go
Normal file
57
beacon-chain/core/peerdas/util.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, blobs and their cell proofs.
|
||||
// This is a convenience method as blob and cell proofs are common inputs.
|
||||
func ConstructDataColumnSidecars(block interfaces.SignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the block is at least a Fulu block.
|
||||
if block.Version() < version.Fulu {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cellsAndProofs, err := constructCellsAndProofs(blobs, cellProofs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return DataColumnSidecars(block, cellsAndProofs)
|
||||
}
|
||||
|
||||
func constructCellsAndProofs(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
numColumns := int(params.BeaconConfig().NumberOfColumns)
|
||||
if len(blobs)*numColumns != len(cellProofs) {
|
||||
return nil, fmt.Errorf("number of blobs and cell proofs do not match: %d * %d != %d", len(blobs), numColumns, len(cellProofs))
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobs))
|
||||
|
||||
for i, blob := range blobs {
|
||||
var b kzg.Blob
|
||||
copy(b[:], blob)
|
||||
cells, err := kzg.ComputeCells(&b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := i * numColumns; idx < (i+1)*numColumns; idx++ {
|
||||
proofs = append(proofs, kzg.Proof(cellProofs[idx]))
|
||||
}
|
||||
|
||||
cellsAndProofs = append(cellsAndProofs, kzg.CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
})
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
],
|
||||
@@ -34,6 +35,7 @@ go_test(
|
||||
"availability_columns_test.go",
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -41,6 +43,7 @@ go_test(
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -54,10 +53,16 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
if len(sc) == 0 {
|
||||
func (s *LazilyPersistentStore) Persist(current primitives.Slot, scg ...blocks.ROSidecar) error {
|
||||
if len(scg) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sc, err := blocks.BlobSidecarsFromSidecars(scg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
@@ -81,7 +86,7 @@ func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.RO
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
|
||||
|
||||
@@ -15,53 +15,63 @@ import (
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
custodyInfo: custodyInfo,
|
||||
}
|
||||
}
|
||||
|
||||
// Persist do nothing at the moment.
|
||||
// TODO: Very Ugly, change interface to allow for columns and blobs
|
||||
func (*LazilyPersistentStoreColumn) Persist(_ primitives.Slot, _ ...blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
|
||||
if len(sc) == 0 {
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
|
||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first sidecar.
|
||||
firstSidecar := dataColumnSidecars[0]
|
||||
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := firstSidecar.BlockRoot()
|
||||
for _, sidecar := range dataColumnSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
|
||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromColumn(sc[0])
|
||||
|
||||
key := dataColumnCacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stashColumns(&sc[i]); err != nil {
|
||||
return err
|
||||
|
||||
for i := range sidecars {
|
||||
if err := entry.stash(&dataColumnSidecars[i]); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -69,11 +79,10 @@ func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(
|
||||
ctx context.Context,
|
||||
nodeID enode.ID,
|
||||
currentSlot primitives.Slot,
|
||||
block blocks.ROBlock,
|
||||
) error {
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(nodeID, block, currentSlot)
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
@@ -83,8 +92,11 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := keyFromBlock(block)
|
||||
key := dataColumnCacheKey{slot: block.Block().Slot(), root: blockRoot}
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
@@ -92,16 +104,13 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filterColumns(blockRoot, blockCommitments)
|
||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete DataColumnSidecar batch")
|
||||
}
|
||||
@@ -114,11 +123,9 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
// Ensure that each column sidecar is written to disk.
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
if err := s.store.SaveDataColumn(verifiedRODataColumn); err != nil {
|
||||
return errors.Wrapf(err, "save data columns for index `%d` for block `%#x`", verifiedRODataColumn.ColumnIndex, blockRoot)
|
||||
}
|
||||
// Ensure that column sidecars are written to disk.
|
||||
if err := s.store.Save(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrapf(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
// All ColumnSidecars are persisted - data availability check succeeds.
|
||||
@@ -163,6 +170,7 @@ func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, bl
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -15,82 +19,235 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, dataColumnParamsByBlockRoot verification.DataColumnsParamsByRoot) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
||||
}
|
||||
|
||||
return roSidecars, roDataColumns
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
{2}: {{ColumnIndex: 2}},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
|
||||
{}: {{ColumnIndex: 1}, {ColumnIndex: 5}},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := dataColumnCacheKey{slot: 0, root: [32]byte{}}
|
||||
entry := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[int]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
t.Run("No commitments", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Some sidecars are not available", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("All sidecars are available", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
dataColumnsParams := make([]verification.DataColumnParams, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := verification.DataColumnParams{
|
||||
ColumnIndex: index,
|
||||
KzgCommitments: commitments,
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
dataColumnsParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: dataColumnsParams}
|
||||
_, verifiedRoDataColumns := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParamsByBlockRoot)
|
||||
|
||||
key := dataColumnCacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "pre fulu",
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
bb := util.NewBeaconBlockElectra()
|
||||
sb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "commitments within da",
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockElectra()
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := c.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, &peerdas.CustodyInfo{})
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, &peerdas.CustodyInfo{})
|
||||
|
||||
co, err := s.fullCommitmentsToCheck(enode.ID{}, b, c.slot)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 0; i < len(co); i++ {
|
||||
require.DeepEqual(t, c.commits, co[i])
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -117,46 +116,53 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: scs}
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err = as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, enode.ID{}, 1, blk))
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
scs[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
@@ -164,10 +170,13 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
scs[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, scs[0]), errIndexOutOfBounds)
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
|
||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
||||
|
||||
_, more := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,22 +2,15 @@ package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
var errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
|
||||
|
||||
// cacheKey includes the slot so that we can easily iterate through the cache and compare
|
||||
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
|
||||
@@ -40,10 +33,6 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
@@ -67,7 +56,6 @@ func (c *cache) delete(key cacheKey) {
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs []*blocks.ROBlob
|
||||
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
@@ -93,17 +81,6 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
|
||||
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
|
||||
}
|
||||
if e.colScs[sc.ColumnIndex] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
|
||||
}
|
||||
e.colScs[sc.ColumnIndex] = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not committed to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
@@ -139,66 +116,3 @@ func (e *cacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot) ([
|
||||
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) filterColumns(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllDataColumnsAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range uint64(fieldparams.NumberOfColumns) {
|
||||
// Skip if we already store this data column.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if commitmentsArray[i] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.colScs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(commitmentsArray[i], e.colScs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.colScs[i])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
|
||||
entry := &cacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od, 0)
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
|
||||
135
beacon-chain/das/data_column_cache.go
Normal file
135
beacon-chain/das/data_column_cache.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errColumnIndexTooHigh = errors.New("column index too high")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
|
||||
// dataColumnCacheKey includes the slot so that we can easily iterate through the cache and compare
|
||||
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
|
||||
// the root+slot when interacting with the cache, so it isn't an inconvenience to use both.
|
||||
type dataColumnCacheKey struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
type dataColumnCache struct {
|
||||
entries map[dataColumnCacheKey]*dataColumnCacheEntry
|
||||
}
|
||||
|
||||
func newDataColumnCache() *dataColumnCache {
|
||||
return &dataColumnCache{entries: make(map[dataColumnCacheKey]*dataColumnCacheEntry)}
|
||||
}
|
||||
|
||||
// ensure returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *dataColumnCache) ensure(key dataColumnCacheKey) *dataColumnCacheEntry {
|
||||
entry, ok := c.entries[key]
|
||||
if !ok {
|
||||
entry = &dataColumnCacheEntry{}
|
||||
c.entries[key] = entry
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *dataColumnCache) delete(key dataColumnCacheKey) {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type dataColumnCacheEntry struct {
|
||||
scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.DataColumnStorageSummary
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) setDiskSummary(sum filesystem.DataColumnStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of DataColumnSidecars.
|
||||
// Only the first DataColumnSidecar of a given Index will be kept in the cache.
|
||||
// stash will return an error if the given data colunn is already in the cache, or if the Index is out of bounds.
|
||||
func (e *dataColumnCacheEntry) stash(sc *blocks.RODataColumn) error {
|
||||
if sc.Index >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", sc.Index)
|
||||
}
|
||||
|
||||
if e.scs[sc.Index] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitments)
|
||||
}
|
||||
|
||||
e.scs[sc.Index] = sc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) filter(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range nonEmptyIndices {
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(commitmentsArray[i], e.scs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.scs[i])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
124
beacon-chain/das/data_column_cache_test.go
Normal file
124
beacon-chain/das/data_column_cache_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
c := newDataColumnCache()
|
||||
key := dataColumnCacheKey{}
|
||||
entry := c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true})
|
||||
entry.setDiskSummary(diskSummary)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{diskSummary: diskSummary}, *entry)
|
||||
|
||||
c.delete(key)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
}
|
||||
|
||||
func TestStash(t *testing.T) {
|
||||
t.Run("Index too high", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 10_000}}}
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal and already existing", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
|
||||
|
||||
err = entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterDataColumns(t *testing.T) {
|
||||
t.Run("All available", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("Some scs missing", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Commitments not equal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[1] = &roDataColumns[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: {{ColumnIndex: 3, KzgCommitments: [][]byte{[]byte{3}}}}}
|
||||
expected, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[3] = &expected[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
require.Equal(t, 2, s.count())
|
||||
}
|
||||
|
||||
func TestNonEmptyIndices(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{10}}, nil, [][]byte{[]byte{20}}}
|
||||
actual := s.nonEmptyIndices()
|
||||
require.DeepEqual(t, map[uint64]bool{1: true, 3: true}, actual)
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -15,6 +14,6 @@ import (
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, nodeID enode.ID, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -17,7 +17,7 @@ type MockAvailabilityStore struct {
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
|
||||
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
}
|
||||
@@ -25,9 +25,13 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, _ enode.ID,
|
||||
}
|
||||
|
||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(current, sc...)
|
||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,9 @@ go_library(
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"cache.go",
|
||||
"data_column.go",
|
||||
"data_column_cache.go",
|
||||
"doc.go",
|
||||
"iteration.go",
|
||||
"layout.go",
|
||||
"layout_by_epoch.go",
|
||||
@@ -17,6 +20,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
@@ -43,6 +47,8 @@ go_test(
|
||||
srcs = [
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
"data_column_test.go",
|
||||
"iteration_test.go",
|
||||
"layout_test.go",
|
||||
"migration_test.go",
|
||||
@@ -50,12 +56,10 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -30,15 +28,8 @@ var (
|
||||
errNoBasePath = errors.New("BlobStorage base path not specified in init")
|
||||
)
|
||||
|
||||
type (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
RootIndexPair struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Index uint64
|
||||
}
|
||||
)
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
@@ -85,10 +76,7 @@ func WithLayout(name string) BlobStorageOption {
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
}
|
||||
|
||||
b := &BlobStorage{}
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
@@ -127,7 +115,6 @@ type BlobStorage struct {
|
||||
fs afero.Fs
|
||||
layout fsLayout
|
||||
cache *blobStorageSummaryCache
|
||||
DataColumnFeed *event.Feed
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -214,51 +201,6 @@ func (bs *BlobStorage) writePart(sidecar blocks.VerifiedROBlob) (ppath string, e
|
||||
return ppath, nil
|
||||
}
|
||||
|
||||
func (bs *BlobStorage) writeDataColumnPart(sidecar blocks.VerifiedRODataColumn) (ppath string, err error) {
|
||||
ident := identForDataColumnSidecar(sidecar)
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
if len(sidecarData) == 0 {
|
||||
return "", errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(bs.layout.dir(ident), directoryPermissions()); err != nil {
|
||||
return "", err
|
||||
}
|
||||
ppath = bs.layout.partPath(ident, fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(ppath)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
defer func() {
|
||||
cerr := partialFile.Close()
|
||||
// The close error is probably less important than any existing error, so only overwrite nil err.
|
||||
if cerr != nil && err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
return ppath, errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return ppath, err
|
||||
}
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return ppath, fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
return ppath, nil
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
startTime := time.Now()
|
||||
@@ -309,65 +251,6 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves dataColumns given a list of sidecars.
|
||||
func (bs *BlobStorage) SaveDataColumn(verifiedRODataColumns blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
ident := identForDataColumnSidecar(verifiedRODataColumns)
|
||||
sszPath := bs.layout.sszPath(ident)
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "afero exists")
|
||||
}
|
||||
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
partialMoved := false
|
||||
partPath, err := bs.writeDataColumnPart(verifiedRODataColumns)
|
||||
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved || partPath == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// It's expected to error if the save is successful.
|
||||
if err := bs.fs.Remove(partPath); err == nil {
|
||||
log.WithField("partPath", partPath).Debug("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rename")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
if err := bs.layout.notify(ident); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", verifiedRODataColumns.BlockRoot())
|
||||
}
|
||||
|
||||
// Notify the data column notifier that a new data column has been saved.
|
||||
if bs.DataColumnFeed != nil {
|
||||
bs.DataColumnFeed.Send(RootIndexPair{
|
||||
Root: verifiedRODataColumns.BlockRoot(),
|
||||
Index: verifiedRODataColumns.ColumnIndex,
|
||||
})
|
||||
}
|
||||
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -383,25 +266,7 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.VerifiedROBlobFromDisk(bs.fs, root, bs.layout.sszPath(ident))
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedRODataColumn.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (blocks.VerifiedRODataColumn, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
ident, err := bs.layout.ident(root, idx)
|
||||
if err != nil {
|
||||
return verification.VerifiedRODataColumnError(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
blobFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
return verification.VerifiedRODataColumnFromDisk(bs.fs, root, bs.layout.sszPath(ident))
|
||||
}
|
||||
|
||||
// Remove removes all blobs or data columns for a given root.
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
dirIdent, err := bs.layout.dirIdent(root)
|
||||
if err != nil {
|
||||
|
||||
@@ -2,7 +2,6 @@ package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
@@ -10,11 +9,9 @@ import (
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -59,7 +56,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
require.NoError(t, bs.Save(sc))
|
||||
actualSc, err := bs.Get(sc.BlockRoot(), sc.Index)
|
||||
require.NoError(t, err)
|
||||
expectedIdx := dataIndexMask{false, false, true, false, false, false}
|
||||
expectedIdx := blobIndexMask{false, false, true, false, false, false}
|
||||
actualIdx := bs.Summary(actualSc.BlockRoot()).mask
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedIdx, actualIdx)
|
||||
@@ -224,158 +221,3 @@ func TestLayoutNames(t *testing.T) {
|
||||
_, err := newLayout(badLayoutName, nil, nil, nil)
|
||||
require.ErrorIs(t, err, errInvalidLayoutName)
|
||||
}
|
||||
|
||||
func TestBlobStorage_DataColumn_WithAllLayouts(t *testing.T) {
|
||||
for _, layout := range LayoutNames {
|
||||
t.Run(fmt.Sprintf("layout=%s", layout), func(t *testing.T) {
|
||||
sidecars := setupDataColumnTest(t)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t, WithLayout(layout))
|
||||
sidecar := sidecars[0]
|
||||
|
||||
columnPath := bs.layout.sszPath(identForDataColumnSidecar(sidecar))
|
||||
data, err := ssz.MarshalSSZ(sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.SaveDataColumn(sidecar))
|
||||
// No error when attempting to write twice.
|
||||
require.NoError(t, bs.SaveDataColumn(sidecar))
|
||||
|
||||
content, err := afero.ReadFile(fs, columnPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(data, content))
|
||||
|
||||
// Deserialize the DataColumnSidecar from the saved file data.
|
||||
saved := ðpb.DataColumnSidecar{}
|
||||
err = saved.UnmarshalSSZ(content)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compare the original Sidecar and the saved Sidecar.
|
||||
require.DeepSSZEqual(t, sidecar.DataColumnSidecar, saved)
|
||||
})
|
||||
|
||||
t.Run("indices", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t, WithLayout(layout))
|
||||
sidecar := sidecars[2]
|
||||
require.NoError(t, bs.SaveDataColumn(sidecar))
|
||||
actual, err := bs.GetColumn(sidecar.BlockRoot(), sidecar.ColumnIndex)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, sidecar, actual)
|
||||
|
||||
expectedIdx := make(dataIndexMask, params.BeaconConfig().NumberOfColumns)
|
||||
expectedIdx[2] = true
|
||||
actualIdx := bs.Summary(actual.BlockRoot()).mask
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedIdx, actualIdx)
|
||||
|
||||
sidecar = sidecars[10]
|
||||
expectedIdx[10] = true
|
||||
require.NoError(t, bs.SaveDataColumn(sidecar))
|
||||
actual, err = bs.GetColumn(sidecar.BlockRoot(), sidecar.ColumnIndex)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, sidecar, actual)
|
||||
actualIdx = bs.Summary(actual.BlockRoot()).mask
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedIdx, actualIdx)
|
||||
})
|
||||
|
||||
t.Run("write -> read -> delete", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t, WithLayout(layout))
|
||||
err := bs.SaveDataColumn(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := sidecars[0]
|
||||
actual, err := bs.GetColumn(expected.BlockRoot(), expected.ColumnIndex)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
|
||||
require.NoError(t, bs.Remove(expected.BlockRoot()))
|
||||
for i := range params.BeaconConfig().NumberOfColumns {
|
||||
_, err = bs.GetColumn(expected.BlockRoot(), uint64(i))
|
||||
require.Equal(t, true, db.IsNotFound(err))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("clear", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t, WithLayout(layout))
|
||||
err := bs.SaveDataColumn(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
res, err := bs.GetColumn(sidecars[0].BlockRoot(), sidecars[0].ColumnIndex)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.NoError(t, bs.Clear())
|
||||
// After clearing, the blob should not exist in the db.
|
||||
_, err = bs.GetColumn(sidecars[0].BlockRoot(), sidecars[0].ColumnIndex)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobStorage_DataColumn_WithMigrationFromFlatToByEpoch(t *testing.T) {
|
||||
sidecars := setupDataColumnTest(t)
|
||||
|
||||
// Setup flat layout
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t, WithLayout(LayoutNameFlat))
|
||||
sidecar := sidecars[0]
|
||||
columnPath := bs.layout.sszPath(identForDataColumnSidecar(sidecar))
|
||||
data, err := ssz.MarshalSSZ(sidecar)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.SaveDataColumn(sidecar))
|
||||
content, err := afero.ReadFile(fs, columnPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(data, content))
|
||||
|
||||
// Setup by-epoch layout
|
||||
bs = NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
|
||||
|
||||
// Verify data is the same
|
||||
columnPath = bs.layout.sszPath(identForDataColumnSidecar(sidecar))
|
||||
content, err = afero.ReadFile(fs, columnPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(data, content))
|
||||
}
|
||||
|
||||
func TestBlobStorage_DataColumn_WithMigrationFromByEpochToFlat(t *testing.T) {
|
||||
sidecars := setupDataColumnTest(t)
|
||||
|
||||
// Setup by-epoch layout
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t, WithLayout(LayoutNameFlat))
|
||||
for _, sidecar := range sidecars {
|
||||
require.NoError(t, bs.SaveDataColumn(sidecar))
|
||||
}
|
||||
columnPath := bs.layout.sszPath(identForDataColumnSidecar(sidecars[0]))
|
||||
content, err := afero.ReadFile(fs, columnPath)
|
||||
require.NoError(t, err)
|
||||
data, err := ssz.MarshalSSZ(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(data, content))
|
||||
|
||||
// Setup flat layout
|
||||
bs = NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
|
||||
|
||||
// Verify data is the same
|
||||
columnPath = bs.layout.sszPath(identForDataColumnSidecar(sidecars[0]))
|
||||
content, err = afero.ReadFile(fs, columnPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(data, content))
|
||||
}
|
||||
|
||||
func setupDataColumnTest(t *testing.T) []blocks.VerifiedRODataColumn {
|
||||
// load trusted setup
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup right fork epoch
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 0
|
||||
cfg.DenebForkEpoch = 0
|
||||
cfg.ElectraForkEpoch = 0
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, scs := util.GenerateTestFuluBlockWithSidecar(t, [32]byte{}, 0, 1)
|
||||
return verification.FakeVerifyDataColumnSliceForTest(t, scs)
|
||||
}
|
||||
|
||||
@@ -10,16 +10,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// dataIndexMask is a bitmask representing the set of blob or data column indices that are currently set.
|
||||
type dataIndexMask []bool
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
type blobIndexMask []bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars or DataColumnSidecars on disk for each root the cache knows about.
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
epoch primitives.Epoch
|
||||
mask dataIndexMask
|
||||
mask blobIndexMask
|
||||
}
|
||||
|
||||
// HasIndex returns true if the BlobSidecar or DataColumnSidecar at the given index is available in the filesystem.
|
||||
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
if idx >= uint64(len(s.mask)) {
|
||||
return false
|
||||
@@ -27,21 +27,6 @@ func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// HasDataColumnIndex returns true if the DataColumnSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasDataColumnIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if idx >= numberOfColumns {
|
||||
return false
|
||||
}
|
||||
|
||||
if idx >= uint64(len(s.mask)) {
|
||||
return false
|
||||
}
|
||||
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > len(s.mask) {
|
||||
@@ -55,21 +40,6 @@ func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// AllDataColumnsAvailable returns true if we have all data columns for corresponding indices.
|
||||
func (s BlobStorageSummary) AllDataColumnsAvailable(indices map[uint64]bool) bool {
|
||||
if len(indices) > len(s.mask) {
|
||||
return false
|
||||
}
|
||||
|
||||
for indice := range indices {
|
||||
if !s.mask[indice] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s BlobStorageSummary) MaxBlobsForEpoch() uint64 {
|
||||
return uint64(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(s.epoch))
|
||||
}
|
||||
@@ -115,23 +85,16 @@ func (s *blobStorageSummaryCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
}
|
||||
|
||||
func (s *blobStorageSummaryCache) ensure(ident blobIdent) error {
|
||||
maskSize := uint64(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(ident.epoch))
|
||||
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
if ident.epoch >= fuluForkEpoch {
|
||||
maskSize = params.BeaconConfig().NumberOfColumns
|
||||
}
|
||||
|
||||
if ident.index >= maskSize {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(ident.epoch)
|
||||
if ident.index >= uint64(maxBlobsPerBlock) {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[ident.root]
|
||||
v.epoch = ident.epoch
|
||||
if v.mask == nil {
|
||||
v.mask = make(dataIndexMask, maskSize)
|
||||
v.mask = make(blobIndexMask, maxBlobsPerBlock)
|
||||
}
|
||||
if !v.mask[ident.index] {
|
||||
s.updateMetrics(1)
|
||||
|
||||
@@ -3,7 +3,6 @@ package filesystem
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -24,7 +23,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
root [32]byte
|
||||
expected dataIndexMask
|
||||
expected blobIndexMask
|
||||
}{
|
||||
{
|
||||
name: "not found",
|
||||
@@ -153,124 +152,3 @@ func TestAllAvailable(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasDataColumnIndex(t *testing.T) {
|
||||
storedIndices := map[uint64]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
5: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
idx uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "index is too high",
|
||||
idx: fieldparams.NumberOfColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "non existing index",
|
||||
idx: 2,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "existing index",
|
||||
idx: 3,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
// Get the maximum index that is stored.
|
||||
maxIndex := uint64(0)
|
||||
for index := range storedIndices {
|
||||
if index > maxIndex {
|
||||
maxIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
mask := make(dataIndexMask, maxIndex+1)
|
||||
|
||||
for idx := range storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.HasDataColumnIndex(c.idx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllDataColumnAvailable(t *testing.T) {
|
||||
tooManyColumns := make(map[uint64]bool, fieldparams.NumberOfColumns+1)
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns+1; i++ {
|
||||
tooManyColumns[i] = true
|
||||
}
|
||||
|
||||
columns346 := map[uint64]bool{
|
||||
3: true,
|
||||
4: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
columns36 := map[uint64]bool{
|
||||
3: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
storedIndices map[uint64]bool
|
||||
testedIndices map[uint64]bool
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: map[uint64]bool{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "too many tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: tooManyColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "not all tested indices are stored",
|
||||
storedIndices: columns36,
|
||||
testedIndices: columns346,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "all tested indices are stored",
|
||||
storedIndices: columns346,
|
||||
testedIndices: columns36,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
// Get the maximum index that is stored.
|
||||
maxIndex := uint64(0)
|
||||
for index := range c.storedIndices {
|
||||
if index > maxIndex {
|
||||
maxIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
mask := make(dataIndexMask, maxIndex+1)
|
||||
|
||||
for idx := range c.storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.AllDataColumnsAvailable(c.testedIndices))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
999
beacon-chain/db/filesystem/data_column.go
Normal file
999
beacon-chain/db/filesystem/data_column.go
Normal file
@@ -0,0 +1,999 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const (
|
||||
version = 0x01
|
||||
versionOffset = 0 // bytes
|
||||
versionSize = 1 // bytes
|
||||
maxSszEncodedDataColumnSidecarSize = 536_870_912 // 2**(4*8) / 8 bytes
|
||||
encodedSszEncodedDataColumnSidecarSizeOffset = versionOffset + versionSize
|
||||
encodedSszEncodedDataColumnSidecarSizeSize = 4 // bytes (size of the encoded size of the SSZ encoded data column sidecar)
|
||||
mandatoryNumberOfColumns = 128 // 2**7
|
||||
indicesOffset = encodedSszEncodedDataColumnSidecarSizeOffset + encodedSszEncodedDataColumnSidecarSizeSize
|
||||
indicesSize = mandatoryNumberOfColumns
|
||||
nonZeroOffset = mandatoryNumberOfColumns
|
||||
headerSize = versionSize + encodedSszEncodedDataColumnSidecarSizeSize + mandatoryNumberOfColumns
|
||||
dataColumnsFileExtension = "sszs"
|
||||
prunePeriod = 1 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
errWrongNumberOfColumns = errors.New("wrong number of data columns")
|
||||
errDataColumnIndexTooLarge = errors.New("data column index too large")
|
||||
errWrongBytesWritten = errors.New("wrong number of bytes written")
|
||||
errWrongVersion = errors.New("wrong version")
|
||||
errWrongBytesHeaderRead = errors.New("wrong number of bytes header read")
|
||||
errTooManyDataColumns = errors.New("too many data columns")
|
||||
errWrongSszEncodedDataColumnSidecarSize = errors.New("wrong SSZ encoded data column sidecar size")
|
||||
errDataColumnSidecarsFromDifferentSlots = errors.New("data column sidecars from different slots")
|
||||
)
|
||||
|
||||
type (
|
||||
// DataColumnStorage is the concrete implementation of the filesystem backend for saving and retrieving DataColumnSidecars.
|
||||
DataColumnStorage struct {
|
||||
base string
|
||||
retentionEpochs primitives.Epoch
|
||||
fs afero.Fs
|
||||
cache *dataColumnStorageSummaryCache
|
||||
DataColumnFeed *event.Feed
|
||||
muChans map[[fieldparams.RootLength]byte]*muChan
|
||||
mu sync.Mutex // protect muChans
|
||||
pruneMu sync.RWMutex
|
||||
}
|
||||
|
||||
// DataColumnStorageOption is a functional option for configuring a DataColumnStorage.
|
||||
DataColumnStorageOption func(*DataColumnStorage) error
|
||||
|
||||
// DataColumnIdent is the unique identifier for a data column sidecar.
|
||||
DataColumnsIdent struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Epoch primitives.Epoch
|
||||
Indices []uint64
|
||||
}
|
||||
|
||||
storageIndices struct {
|
||||
indices [mandatoryNumberOfColumns]byte
|
||||
}
|
||||
|
||||
metadata struct {
|
||||
indices *storageIndices
|
||||
savedDataColumnSidecarCount int64
|
||||
sszEncodedDataColumnSidecarSize uint32
|
||||
fileSize int64
|
||||
}
|
||||
|
||||
fileMetadata struct {
|
||||
period uint64
|
||||
epoch primitives.Epoch
|
||||
blockRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
muChan struct {
|
||||
mu *sync.RWMutex
|
||||
toStore chan []blocks.VerifiedRODataColumn
|
||||
}
|
||||
)
|
||||
|
||||
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
|
||||
func WithDataColumnBasePath(base string) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
b.base = base
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnRetentionEpochs is an option that changes the number of epochs data columns will be persisted.
|
||||
func WithDataColumnRetentionEpochs(e primitives.Epoch) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
b.retentionEpochs = e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnFs allows the afero.Fs implementation to be customized.
|
||||
// Used by tests to substitute an in-memory filesystem.
|
||||
func WithDataColumnFs(fs afero.Fs) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
b.fs = fs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewDataColumnStorage creates a new instance of the DataColumnStorage object. Note that the implementation of DataColumnStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the data column storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewDataColumnStorage(ctx context.Context, opts ...DataColumnStorageOption) (*DataColumnStorage, error) {
|
||||
storage := &DataColumnStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
muChans: make(map[[fieldparams.RootLength]byte]*muChan),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(storage); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create data column storage")
|
||||
}
|
||||
}
|
||||
|
||||
// Allow tests to set up a different fs using WithFs.
|
||||
if storage.fs == nil {
|
||||
if storage.base == "" {
|
||||
return nil, errNoBasePath
|
||||
}
|
||||
|
||||
storage.base = path.Clean(storage.base)
|
||||
if err := file.MkdirAll(storage.base); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create data column storage at %s", storage.base)
|
||||
}
|
||||
|
||||
storage.fs = afero.NewBasePathFs(afero.NewOsFs(), storage.base)
|
||||
}
|
||||
|
||||
storage.cache = newDataColumnStorageSummaryCache()
|
||||
|
||||
async.RunEvery(ctx, prunePeriod, func() {
|
||||
storage.pruneMu.Lock()
|
||||
defer storage.pruneMu.Unlock()
|
||||
|
||||
storage.prune()
|
||||
})
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// WarmCache warms the cache of the data column filesystem.
|
||||
// It holds the database (read) lock for all the time it is running.
|
||||
func (dcs *DataColumnStorage) WarmCache() {
|
||||
start := time.Now()
|
||||
log.Info("Data column filesystem cache warm-up started")
|
||||
|
||||
dcs.pruneMu.Lock()
|
||||
defer dcs.pruneMu.Unlock()
|
||||
|
||||
highestStoredEpoch := primitives.Epoch(0)
|
||||
|
||||
// Walk the data column filesystem to warm up the cache.
|
||||
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
|
||||
// If not a leaf, skip.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract metadata from the file path.
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
}
|
||||
|
||||
// Open the data column filesystem file.
|
||||
file, err := dcs.fs.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while opening data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
defer func() {
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
closeErr := file.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the metadata of the file.
|
||||
metadata, err := dcs.metadata(file)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the indices.
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
|
||||
|
||||
// Update the highest stored epoch.
|
||||
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
|
||||
|
||||
// Set the ident in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error encountered while walking data column filesystem.")
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem.
|
||||
dcs.prune()
|
||||
|
||||
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary.
|
||||
func (dcs *DataColumnStorage) Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary {
|
||||
return dcs.cache.Summary(root)
|
||||
}
|
||||
|
||||
// Save saves data column sidecars into the database and asynchronously performs pruning.
|
||||
// The returned chanel is closed when the pruning is complete.
|
||||
func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the number of columns is the one expected.
|
||||
// While implementing this, we expect the number of columns won't change.
|
||||
// If it does, we will need to create a new version of the data column sidecar file.
|
||||
if params.BeaconConfig().NumberOfColumns != mandatoryNumberOfColumns {
|
||||
return errWrongNumberOfColumns
|
||||
}
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
dataColumnSidecarsbyRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
|
||||
// Group data column sidecars by root.
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
// Check if the data column index is too large.
|
||||
if dataColumnSidecar.Index >= mandatoryNumberOfColumns {
|
||||
return errDataColumnIndexTooLarge
|
||||
}
|
||||
|
||||
// Group data column sidecars by root.
|
||||
root := dataColumnSidecar.BlockRoot()
|
||||
dataColumnSidecarsbyRoot[root] = append(dataColumnSidecarsbyRoot[root], dataColumnSidecar)
|
||||
}
|
||||
|
||||
for root, dataColumnSidecars := range dataColumnSidecarsbyRoot {
|
||||
// Safety check all data column sidecars for this root are from the same slot.
|
||||
firstSlot := dataColumnSidecars[0].SignedBlockHeader.Header.Slot
|
||||
for _, dataColumnSidecar := range dataColumnSidecars[1:] {
|
||||
if dataColumnSidecar.SignedBlockHeader.Header.Slot != firstSlot {
|
||||
return errDataColumnSidecarsFromDifferentSlots
|
||||
}
|
||||
}
|
||||
|
||||
// Set the highest epoch.
|
||||
epoch := slots.ToEpoch(dataColumnSidecars[0].Slot())
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
// Save data columns in the filesystem.
|
||||
if err := dcs.saveFilesystem(root, epoch, dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "save filesystem")
|
||||
}
|
||||
|
||||
// Get all indices.
|
||||
indices := make([]uint64, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
indices = append(indices, dataColumnSidecar.Index)
|
||||
}
|
||||
|
||||
// Compute the data columns ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: root, Epoch: slots.ToEpoch(dataColumnSidecars[0].Slot()), Indices: indices}
|
||||
|
||||
// Set data columns in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
return errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
// Notify the data column feed.
|
||||
dcs.DataColumnFeed.Send(dataColumnsIdent)
|
||||
}
|
||||
|
||||
dataColumnSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveFilesystem saves data column sidecars into the database.
|
||||
// This function expects all data column sidecars to belong to the same block.
|
||||
func (dcs *DataColumnStorage) saveFilesystem(root [fieldparams.RootLength]byte, epoch primitives.Epoch, dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
// Compute the file path.
|
||||
filePath := filePath(root, epoch)
|
||||
|
||||
dcs.pruneMu.RLock()
|
||||
defer dcs.pruneMu.RUnlock()
|
||||
|
||||
fileMu, toStore := dcs.fileMutex(root)
|
||||
toStore <- dataColumnSidecars
|
||||
|
||||
fileMu.Lock()
|
||||
defer fileMu.Unlock()
|
||||
|
||||
// Check if the file exists.
|
||||
exists, err := afero.Exists(dcs.fs, filePath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "afero exists")
|
||||
}
|
||||
|
||||
if exists {
|
||||
if err := dcs.saveDataColumnSidecarsExistingFile(filePath, toStore); err != nil {
|
||||
return errors.Wrap(err, "save data column existing file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := dcs.saveDataColumnSidecarsNewFile(filePath, toStore); err != nil {
|
||||
return errors.Wrap(err, "save data columns new file")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves data column sidecars from the database.
|
||||
// If one of the requested data column sidecars is not found, it is just skipped.
|
||||
// If indices is nil, then all stored data column sidecars are returned.
|
||||
// Since DataColumnStorage only writes data columns that have undergone full verification, the return
|
||||
// value is always a VerifiedRODataColumn.
|
||||
func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
dcs.pruneMu.RLock()
|
||||
defer dcs.pruneMu.RUnlock()
|
||||
|
||||
fileMu, _ := dcs.fileMutex(root)
|
||||
fileMu.RLock()
|
||||
defer fileMu.RUnlock()
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
// Build all indices if none are provided.
|
||||
if indices == nil {
|
||||
indices = make([]uint64, mandatoryNumberOfColumns)
|
||||
for i := range indices {
|
||||
indices[i] = uint64(i)
|
||||
}
|
||||
}
|
||||
|
||||
summary, ok := dcs.cache.get(root)
|
||||
if !ok {
|
||||
// Nothing found in db. Exit early.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the file path.
|
||||
filePath := filePath(root, summary.epoch)
|
||||
|
||||
// Open the data column sidecars file.
|
||||
// We do not specially check if the file exists since we have already checked the cache.
|
||||
file, err := dcs.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars file path open")
|
||||
}
|
||||
|
||||
// Read file metadata.
|
||||
metadata, err := dcs.metadata(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "metadata")
|
||||
}
|
||||
|
||||
// Retrieve data column sidecars from the file.
|
||||
verifiedRODataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
ok, position, err := metadata.indices.get(index)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get index")
|
||||
}
|
||||
|
||||
// Skip if the data column is not saved.
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Compute the offset of the data column sidecar.
|
||||
offset := headerSize + position*int64(metadata.sszEncodedDataColumnSidecarSize)
|
||||
|
||||
// Seek to the beginning of the data column sidecar.
|
||||
_, err = file.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "seek")
|
||||
}
|
||||
|
||||
verifiedRODataColumn, err := verification.VerifiedRODataColumnFromDisk(file, root, metadata.sszEncodedDataColumnSidecarSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "verified RO data column from disk")
|
||||
}
|
||||
|
||||
// Append the verified RO data column to the data column sidecars.
|
||||
verifiedRODataColumnSidecars = append(verifiedRODataColumnSidecars, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
dataColumnFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return verifiedRODataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// Remove deletes all data column sidecars for a given root.
|
||||
func (dcs *DataColumnStorage) Remove(blockRoot [fieldparams.RootLength]byte) error {
|
||||
dcs.pruneMu.RLock()
|
||||
defer dcs.pruneMu.RUnlock()
|
||||
|
||||
fileMu, _ := dcs.fileMutex(blockRoot)
|
||||
fileMu.Lock()
|
||||
defer fileMu.Unlock()
|
||||
|
||||
summary, ok := dcs.cache.get(blockRoot)
|
||||
if !ok {
|
||||
// Nothing found in db. Exit early.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the data column sidecars from the cache.
|
||||
dcs.cache.evict(blockRoot)
|
||||
|
||||
// Remove the data column sidecars file.
|
||||
filePath := filePath(blockRoot, summary.epoch)
|
||||
if err := dcs.fs.Remove(filePath); err != nil {
|
||||
return errors.Wrap(err, "remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (dcs *DataColumnStorage) Clear() error {
|
||||
dcs.pruneMu.Lock()
|
||||
defer dcs.pruneMu.Unlock()
|
||||
|
||||
dirs, err := listDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "list dir")
|
||||
}
|
||||
|
||||
dcs.cache.clear()
|
||||
|
||||
for _, dir := range dirs {
|
||||
if err := dcs.fs.RemoveAll(dir); err != nil {
|
||||
return errors.Wrap(err, "remove all")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// prune clean the cache, the filesystem and mutexes.
|
||||
func (dcs *DataColumnStorage) prune() {
|
||||
dcs.mu.Lock()
|
||||
defer dcs.mu.Unlock()
|
||||
|
||||
highestStoredEpoch := dcs.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
if highestStoredEpoch < dcs.retentionEpochs {
|
||||
return
|
||||
}
|
||||
|
||||
highestEpochToPrune := highestStoredEpoch - dcs.retentionEpochs
|
||||
highestPeriodToPrune := period(highestEpochToPrune)
|
||||
|
||||
// Prune the cache.
|
||||
prunedCount := dcs.cache.pruneUpTo(highestEpochToPrune)
|
||||
|
||||
if prunedCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
dataColumnPrunedCounter.Add(float64(prunedCount))
|
||||
|
||||
// Prune the filesystem.
|
||||
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading top directory")
|
||||
return
|
||||
}
|
||||
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
periodStr := periodFileInfo.Name()
|
||||
period, err := strconv.ParseUint(periodStr, 10, 64)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error encountered while parsing period %s", periodStr)
|
||||
continue
|
||||
}
|
||||
|
||||
if period < highestPeriodToPrune {
|
||||
// Remove everything lower thant highest period to prune.
|
||||
if err := dcs.fs.RemoveAll(periodStr); err != nil {
|
||||
log.WithError(err).Error("Error encountered while removing period directory")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if period > highestPeriodToPrune {
|
||||
// Do not remove anything higher than highest period to prune.
|
||||
continue
|
||||
}
|
||||
|
||||
// if period == highestPeriodToPrune
|
||||
epochFileInfos, err := afero.ReadDir(dcs.fs, periodStr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading epoch directory")
|
||||
continue
|
||||
}
|
||||
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
epochStr := epochFileInfo.Name()
|
||||
epochDir := path.Join(periodStr, epochStr)
|
||||
|
||||
epoch, err := strconv.ParseUint(epochStr, 10, 64)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Error encountered while parsing epoch %s", epochStr)
|
||||
continue
|
||||
}
|
||||
|
||||
if primitives.Epoch(epoch) > highestEpochToPrune {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := dcs.fs.RemoveAll(epochDir); err != nil {
|
||||
log.WithError(err).Error("Error encountered while removing epoch directory")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clear(dcs.muChans)
|
||||
}
|
||||
|
||||
// saveDataColumnSidecarsExistingFile saves data column sidecars into an existing file.
|
||||
// This function expects all data column sidecars to belong to the same block.
|
||||
func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string, inputDataColumnSidecars chan []blocks.VerifiedRODataColumn) (err error) {
|
||||
// Open the data column sidecars file.
|
||||
file, err := dcs.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecars file path open")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
metadata, err := dcs.metadata(file)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "metadata")
|
||||
}
|
||||
|
||||
// Create the SSZ encoded data column sidecars.
|
||||
var sszEncodedDataColumnSidecars []byte
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
// Extract the data columns index.
|
||||
dataColumnIndex := dataColumnSidecar.Index
|
||||
|
||||
ok, _, err := metadata.indices.get(dataColumnIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get index")
|
||||
}
|
||||
|
||||
// Skip if the data column is already saved.
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the number of saved data columns is too large.
|
||||
// This is impossible to happen in practice is this function is called
|
||||
// by SaveDataColumnSidecars.
|
||||
if metadata.savedDataColumnSidecarCount >= mandatoryNumberOfColumns {
|
||||
return errTooManyDataColumns
|
||||
}
|
||||
|
||||
// SSZ encode the data column sidecar.
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecar marshal SSZ")
|
||||
}
|
||||
|
||||
// Compute the size of the SSZ encoded data column sidecar.
|
||||
incomingSszEncodedDataColumnSidecarSize := uint32(len(sszEncodedDataColumnSidecar))
|
||||
|
||||
// Check if the incoming encoded data column sidecar size corresponds to the one read from the file.
|
||||
if incomingSszEncodedDataColumnSidecarSize != metadata.sszEncodedDataColumnSidecarSize {
|
||||
return errWrongSszEncodedDataColumnSidecarSize
|
||||
}
|
||||
|
||||
// Alter indices to mark the data column as saved.
|
||||
if err := metadata.indices.set(dataColumnIndex, uint8(metadata.savedDataColumnSidecarCount)); err != nil {
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
metadata.savedDataColumnSidecarCount++
|
||||
|
||||
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
}
|
||||
|
||||
// Save indices to the file.
|
||||
indices := metadata.indices.raw()
|
||||
count, err := file.WriteAt(indices[:], int64(versionSize+encodedSszEncodedDataColumnSidecarSizeSize))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write indices")
|
||||
}
|
||||
if count != mandatoryNumberOfColumns {
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
// Append the SSZ encoded data column sidecars to the end of the file.
|
||||
count, err = file.WriteAt(sszEncodedDataColumnSidecars, metadata.fileSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write SSZ encoded data column sidecars")
|
||||
}
|
||||
if count != len(sszEncodedDataColumnSidecars) {
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveDataColumnSidecarsNewFile saves data column sidecars into a new file.
|
||||
// This function expects all data column sidecars to belong to the same block.
|
||||
func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inputDataColumnSidecars chan []blocks.VerifiedRODataColumn) (err error) {
|
||||
// Initialize the indices.
|
||||
var indices storageIndices
|
||||
|
||||
var (
|
||||
sszEncodedDataColumnSidecarRefSize int
|
||||
sszEncodedDataColumnSidecars []byte
|
||||
)
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount := uint8(0)
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
// Extract the data column index.
|
||||
dataColumnIndex := dataColumnSidecar.Index
|
||||
|
||||
// Skip if the data column is already stored.
|
||||
ok, _, err := indices.get(dataColumnIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get index")
|
||||
}
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Alter the indices to mark the first data column sidecar as saved.
|
||||
// savedCount can safely be cast to uint8 since it is less than limit.
|
||||
if err := indices.set(dataColumnIndex, storedCount); err != nil {
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
// Increment the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount++
|
||||
|
||||
// SSZ encode the first data column sidecar.
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecar marshal SSZ")
|
||||
}
|
||||
|
||||
// Check if the size of the SSZ encoded data column sidecar is correct.
|
||||
if sszEncodedDataColumnSidecarRefSize != 0 && len(sszEncodedDataColumnSidecar) != sszEncodedDataColumnSidecarRefSize {
|
||||
return errWrongSszEncodedDataColumnSidecarSize
|
||||
}
|
||||
|
||||
// Set the SSZ encoded data column sidecar reference size.
|
||||
sszEncodedDataColumnSidecarRefSize = len(sszEncodedDataColumnSidecar)
|
||||
|
||||
// Append the first SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
}
|
||||
|
||||
if storedCount == 0 {
|
||||
// Nothing to save.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create the data column sidecars file.
|
||||
dir := filepath.Dir(filePath)
|
||||
if err := dcs.fs.MkdirAll(dir, directoryPermissions()); err != nil {
|
||||
return errors.Wrapf(err, "mkdir all")
|
||||
}
|
||||
|
||||
file, err := dcs.fs.Create(filePath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecars file path create")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Encode the SSZ encoded data column sidecar size.
|
||||
var encodedSszEncodedDataColumnSidecarSize [encodedSszEncodedDataColumnSidecarSizeSize]byte
|
||||
binary.BigEndian.PutUint32(encodedSszEncodedDataColumnSidecarSize[:], uint32(sszEncodedDataColumnSidecarRefSize))
|
||||
|
||||
// Get the raw indices.
|
||||
rawIndices := indices.raw()
|
||||
|
||||
// Concatenate the version, the data column sidecar size, the data column indices and the SSZ encoded data column sidecar.
|
||||
countToWrite := headerSize + len(sszEncodedDataColumnSidecars)
|
||||
bytes := make([]byte, 0, countToWrite)
|
||||
bytes = append(bytes, byte(version))
|
||||
bytes = append(bytes, encodedSszEncodedDataColumnSidecarSize[:]...)
|
||||
bytes = append(bytes, rawIndices[:]...)
|
||||
bytes = append(bytes, sszEncodedDataColumnSidecars...)
|
||||
|
||||
countWritten, err := file.Write(bytes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write")
|
||||
}
|
||||
if countWritten != countToWrite {
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// metadata runs file sanity checks and retrieves metadata of the file.
|
||||
// The file descriptor is left at the beginning of the first SSZ encoded data column sidecar.
|
||||
func (dcs *DataColumnStorage) metadata(file afero.File) (*metadata, error) {
|
||||
var header [headerSize]byte
|
||||
countRead, err := file.ReadAt(header[:], 0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read at")
|
||||
}
|
||||
if countRead != headerSize {
|
||||
return nil, errWrongBytesHeaderRead
|
||||
}
|
||||
|
||||
// Read the encoded file version.
|
||||
encodedFileVersion := header[versionOffset : versionOffset+versionSize]
|
||||
|
||||
// Convert the version to an int.
|
||||
fileVersion := int(encodedFileVersion[0])
|
||||
|
||||
// Check if the version is the expected one.
|
||||
if fileVersion != version {
|
||||
return nil, errWrongVersion
|
||||
}
|
||||
|
||||
// DataColumnSidecar is a variable sized ssz object, but all data columns for a block will be the same size.
|
||||
encodedSszEncodedDataColumnSidecarSize := header[encodedSszEncodedDataColumnSidecarSizeOffset : encodedSszEncodedDataColumnSidecarSizeOffset+encodedSszEncodedDataColumnSidecarSizeSize]
|
||||
|
||||
// Convert the SSZ encoded data column sidecar size to an int.
|
||||
sszEncodedDataColumnSidecarSize := binary.BigEndian.Uint32(encodedSszEncodedDataColumnSidecarSize)
|
||||
|
||||
// Read the data column indices.
|
||||
indices, err := newStorageIndices(header[indicesOffset : indicesOffset+mandatoryNumberOfColumns])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new storage indices")
|
||||
}
|
||||
|
||||
// Compute the saved columns count.
|
||||
savedDataColumnSidecarCount := int64(indices.len())
|
||||
|
||||
// Compute the size of the file.
|
||||
// It is safe to cast the SSZ encoded data column sidecar size to int64 since it is less than 2**63.
|
||||
fileSize := int64(headerSize) + savedDataColumnSidecarCount*int64(sszEncodedDataColumnSidecarSize) // lint:ignore uintcast
|
||||
|
||||
metadata := &metadata{
|
||||
indices: indices,
|
||||
savedDataColumnSidecarCount: savedDataColumnSidecarCount,
|
||||
sszEncodedDataColumnSidecarSize: sszEncodedDataColumnSidecarSize,
|
||||
fileSize: fileSize,
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
func (dcs *DataColumnStorage) fileMutex(root [fieldparams.RootLength]byte) (*sync.RWMutex, chan []blocks.VerifiedRODataColumn) {
|
||||
dcs.mu.Lock()
|
||||
defer dcs.mu.Unlock()
|
||||
|
||||
mc, ok := dcs.muChans[root]
|
||||
if !ok {
|
||||
mc = &muChan{
|
||||
mu: new(sync.RWMutex),
|
||||
toStore: make(chan []blocks.VerifiedRODataColumn, 1),
|
||||
}
|
||||
dcs.muChans[root] = mc
|
||||
|
||||
return mc.mu, mc.toStore
|
||||
}
|
||||
|
||||
return mc.mu, mc.toStore
|
||||
}
|
||||
|
||||
func newStorageIndices(indices []byte) (*storageIndices, error) {
|
||||
if len(indices) != mandatoryNumberOfColumns {
|
||||
return nil, errWrongNumberOfColumns
|
||||
}
|
||||
|
||||
var storageIndices storageIndices
|
||||
copy(storageIndices.indices[:], indices)
|
||||
|
||||
return &storageIndices, nil
|
||||
}
|
||||
|
||||
// get returns a boolean indicating if the data column sidecar is saved,
|
||||
// and the position of the data column sidecar in the file.
|
||||
func (si *storageIndices) get(dataColumnIndex uint64) (bool, int64, error) {
|
||||
if dataColumnIndex >= mandatoryNumberOfColumns {
|
||||
return false, 0, errDataColumnIndexTooLarge
|
||||
}
|
||||
|
||||
if si.indices[dataColumnIndex] < nonZeroOffset {
|
||||
return false, 0, nil
|
||||
}
|
||||
|
||||
return true, int64(si.indices[dataColumnIndex] - nonZeroOffset), nil
|
||||
}
|
||||
|
||||
// len returns the number of saved data column sidecars.
|
||||
func (si *storageIndices) len() int {
|
||||
count := 0
|
||||
|
||||
for _, i := range si.indices {
|
||||
if i >= nonZeroOffset {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// all returns all saved data column sidecars.
|
||||
func (si *storageIndices) all() []uint64 {
|
||||
indices := make([]uint64, 0, len(si.indices))
|
||||
|
||||
for index, i := range si.indices {
|
||||
if i >= nonZeroOffset {
|
||||
indices = append(indices, uint64(index))
|
||||
}
|
||||
}
|
||||
|
||||
return indices
|
||||
}
|
||||
|
||||
// raw returns the raw data column sidecar indices.
|
||||
// It can be safely modified by the caller.
|
||||
func (si *storageIndices) raw() [mandatoryNumberOfColumns]byte {
|
||||
var result [mandatoryNumberOfColumns]byte
|
||||
copy(result[:], si.indices[:])
|
||||
return result
|
||||
}
|
||||
|
||||
// set sets the data column sidecar as saved.
|
||||
func (si *storageIndices) set(dataColumnIndex uint64, position uint8) error {
|
||||
if dataColumnIndex >= mandatoryNumberOfColumns || position >= mandatoryNumberOfColumns {
|
||||
return errDataColumnIndexTooLarge
|
||||
}
|
||||
|
||||
si.indices[dataColumnIndex] = nonZeroOffset + position
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pullChan pulls data column sidecars from the input channel until it is empty.
|
||||
func pullChan(inputRoDataColumns chan []blocks.VerifiedRODataColumn) []blocks.VerifiedRODataColumn {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
dataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns)
|
||||
|
||||
for {
|
||||
select {
|
||||
case dataColumnSidecar := <-inputRoDataColumns:
|
||||
dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar...)
|
||||
default:
|
||||
return dataColumnSidecars
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// filePath builds the file path in database for a given root and epoch.
|
||||
func filePath(root [fieldparams.RootLength]byte, epoch primitives.Epoch) string {
|
||||
return path.Join(
|
||||
fmt.Sprintf("%d", period(epoch)),
|
||||
fmt.Sprintf("%d", epoch),
|
||||
fmt.Sprintf("%#x.%s", root, dataColumnsFileExtension),
|
||||
)
|
||||
}
|
||||
|
||||
// extractFileMetadata extracts the metadata from a file path.
|
||||
// If the path is not a leaf, it returns nil.
|
||||
func extractFileMetadata(path string) (*fileMetadata, error) {
|
||||
// Is this Windows friendly?
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) != 3 {
|
||||
return nil, errors.Errorf("unexpected file %s", path)
|
||||
}
|
||||
|
||||
period, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse period from %s", path)
|
||||
}
|
||||
|
||||
epoch, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse epoch from %s", path)
|
||||
}
|
||||
|
||||
partsRoot := strings.Split(parts[2], ".")
|
||||
if len(partsRoot) != 2 {
|
||||
return nil, errors.Errorf("failed to parse root from %s", path)
|
||||
}
|
||||
|
||||
blockRootString := partsRoot[0]
|
||||
if len(blockRootString) != 2+2*fieldparams.RootLength || blockRootString[:2] != "0x" {
|
||||
return nil, errors.Errorf("unexpected file name %s", path)
|
||||
}
|
||||
|
||||
if partsRoot[1] != dataColumnsFileExtension {
|
||||
return nil, errors.Errorf("unexpected extension %s", path)
|
||||
}
|
||||
|
||||
blockRootSlice, err := hex.DecodeString(blockRootString[2:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "decode string from %s", path)
|
||||
}
|
||||
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
copy(blockRoot[:], blockRootSlice)
|
||||
|
||||
result := &fileMetadata{period: period, epoch: primitives.Epoch(epoch), blockRoot: blockRoot}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// period computes the period of a given epoch.
|
||||
func period(epoch primitives.Epoch) uint64 {
|
||||
return uint64(epoch / params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
219
beacon-chain/db/filesystem/data_column_cache.go
Normal file
219
beacon-chain/db/filesystem/data_column_cache.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var errDataColumnIndexOutOfBounds = errors.New("data column index too high")
|
||||
|
||||
// DataColumnStorageSummary represents cached information about the DataColumnSidecars on disk for each root the cache knows about.
|
||||
type DataColumnStorageSummary struct {
|
||||
epoch primitives.Epoch
|
||||
mask [fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// NewDataColumnStorageSummary creates a new DataColumnStorageSummary for a given epoch and mask.
|
||||
func NewDataColumnStorageSummary(epoch primitives.Epoch, mask [fieldparams.NumberOfColumns]bool) DataColumnStorageSummary {
|
||||
return DataColumnStorageSummary{
|
||||
epoch: epoch,
|
||||
mask: mask,
|
||||
}
|
||||
}
|
||||
|
||||
// HasIndex returns true if the DataColumnSidecar at the given index is available in the filesystem.
|
||||
func (s DataColumnStorageSummary) HasIndex(index uint64) bool {
|
||||
if index >= uint64(fieldparams.NumberOfColumns) {
|
||||
return false
|
||||
}
|
||||
return s.mask[index]
|
||||
}
|
||||
|
||||
// Count returns the number of available data columns.
|
||||
func (s DataColumnStorageSummary) Count() uint64 {
|
||||
count := uint64(0)
|
||||
|
||||
for _, available := range s.mask {
|
||||
if available {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all data columns for corresponding indices.
|
||||
func (s DataColumnStorageSummary) AllAvailable(indices map[uint64]bool) bool {
|
||||
if len(indices) > len(s.mask) {
|
||||
return false
|
||||
}
|
||||
|
||||
for index := range indices {
|
||||
if !s.mask[index] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
|
||||
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type DataColumnStorageSummarizer interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
}
|
||||
|
||||
type dataColumnStorageSummaryCache struct {
|
||||
mu sync.RWMutex
|
||||
dataColumnCount float64
|
||||
lowestCachedEpoch primitives.Epoch
|
||||
highestCachedEpoch primitives.Epoch
|
||||
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
|
||||
}
|
||||
|
||||
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
|
||||
|
||||
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
|
||||
return &dataColumnStorageSummaryCache{
|
||||
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
|
||||
lowestCachedEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary for `root`.
|
||||
// The DataColumnStorageSummary can be used to check for the presence of DataColumnSidecars based on Index.
|
||||
func (sc *dataColumnStorageSummaryCache) Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary {
|
||||
sc.mu.RLock()
|
||||
defer sc.mu.RUnlock()
|
||||
|
||||
return sc.cache[root]
|
||||
}
|
||||
|
||||
func (sc *dataColumnStorageSummaryCache) HighestEpoch() primitives.Epoch {
|
||||
sc.mu.RLock()
|
||||
defer sc.mu.RUnlock()
|
||||
return sc.highestCachedEpoch
|
||||
}
|
||||
|
||||
// set updates the cache.
|
||||
func (sc *dataColumnStorageSummaryCache) set(dataColumnsIdent DataColumnsIdent) error {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
|
||||
summary := sc.cache[dataColumnsIdent.Root]
|
||||
summary.epoch = dataColumnsIdent.Epoch
|
||||
|
||||
count := uint64(0)
|
||||
for _, index := range dataColumnsIdent.Indices {
|
||||
if index >= numberOfColumns {
|
||||
return errDataColumnIndexOutOfBounds
|
||||
}
|
||||
|
||||
if summary.mask[index] {
|
||||
continue
|
||||
}
|
||||
|
||||
count++
|
||||
|
||||
summary.mask[index] = true
|
||||
sc.cache[dataColumnsIdent.Root] = summary
|
||||
sc.lowestCachedEpoch = min(sc.lowestCachedEpoch, dataColumnsIdent.Epoch)
|
||||
sc.highestCachedEpoch = max(sc.highestCachedEpoch, dataColumnsIdent.Epoch)
|
||||
}
|
||||
|
||||
countFloat := float64(count)
|
||||
sc.dataColumnCount += countFloat
|
||||
dataColumnDiskCount.Set(sc.dataColumnCount)
|
||||
dataColumnWrittenCounter.Add(countFloat)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// get returns the DataColumnStorageSummary for the given block root.
|
||||
// If the root is not in the cache, the second return value will be false.
|
||||
func (sc *dataColumnStorageSummaryCache) get(blockRoot [fieldparams.RootLength]byte) (DataColumnStorageSummary, bool) {
|
||||
sc.mu.RLock()
|
||||
defer sc.mu.RUnlock()
|
||||
|
||||
v, ok := sc.cache[blockRoot]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// evict removes the DataColumnStorageSummary for the given block root from the cache.
|
||||
func (s *dataColumnStorageSummaryCache) evict(blockRoot [fieldparams.RootLength]byte) int {
|
||||
deleted := 0
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
summary, ok := s.cache[blockRoot]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
for i := range summary.mask {
|
||||
if summary.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
|
||||
delete(s.cache, blockRoot)
|
||||
if deleted > 0 {
|
||||
s.dataColumnCount -= float64(deleted)
|
||||
dataColumnDiskCount.Set(s.dataColumnCount)
|
||||
}
|
||||
|
||||
// The lowest and highest cached epoch may no longer be valid here,
|
||||
// but is not worth the effort to recalculate.
|
||||
|
||||
return deleted
|
||||
}
|
||||
|
||||
// pruneUpTo removes all entries from the cache up to the given target epoch included.
|
||||
func (sc *dataColumnStorageSummaryCache) pruneUpTo(targetEpoch primitives.Epoch) uint64 {
|
||||
sc.mu.Lock()
|
||||
defer sc.mu.Unlock()
|
||||
|
||||
prunedCount := uint64(0)
|
||||
newLowestCachedEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
newHighestCachedEpoch := primitives.Epoch(0)
|
||||
|
||||
for blockRoot, summary := range sc.cache {
|
||||
epoch := summary.epoch
|
||||
|
||||
if epoch > targetEpoch {
|
||||
newLowestCachedEpoch = min(newLowestCachedEpoch, epoch)
|
||||
newHighestCachedEpoch = max(newHighestCachedEpoch, epoch)
|
||||
}
|
||||
|
||||
if epoch <= targetEpoch {
|
||||
for i := range summary.mask {
|
||||
if summary.mask[i] {
|
||||
prunedCount += 1
|
||||
}
|
||||
}
|
||||
|
||||
delete(sc.cache, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
if prunedCount > 0 {
|
||||
sc.lowestCachedEpoch = newLowestCachedEpoch
|
||||
sc.highestCachedEpoch = newHighestCachedEpoch
|
||||
sc.dataColumnCount -= float64(prunedCount)
|
||||
dataColumnDiskCount.Set(sc.dataColumnCount)
|
||||
}
|
||||
|
||||
return prunedCount
|
||||
}
|
||||
|
||||
// clear removes all entries from the cache.
|
||||
func (sc *dataColumnStorageSummaryCache) clear() uint64 {
|
||||
return sc.pruneUpTo(params.BeaconConfig().FarFutureEpoch)
|
||||
}
|
||||
235
beacon-chain/db/filesystem/data_column_cache_test.go
Normal file
235
beacon-chain/db/filesystem/data_column_cache_test.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestHasIndex(t *testing.T) {
|
||||
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
|
||||
hasIndex := summary.HasIndex(1_000_000)
|
||||
require.Equal(t, false, hasIndex)
|
||||
|
||||
hasIndex = summary.HasIndex(0)
|
||||
require.Equal(t, false, hasIndex)
|
||||
|
||||
hasIndex = summary.HasIndex(1)
|
||||
require.Equal(t, true, hasIndex)
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
count := summary.Count()
|
||||
require.Equal(t, uint64(2), count)
|
||||
}
|
||||
|
||||
func TestAllAvailableDataColumns(t *testing.T) {
|
||||
const count = uint64(1_000)
|
||||
|
||||
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
indices := make(map[uint64]bool, count)
|
||||
for i := range count {
|
||||
indices[i] = true
|
||||
}
|
||||
|
||||
allAvailable := summary.AllAvailable(indices)
|
||||
require.Equal(t, false, allAvailable)
|
||||
|
||||
indices = map[uint64]bool{1: true, 2: true}
|
||||
allAvailable = summary.AllAvailable(indices)
|
||||
require.Equal(t, false, allAvailable)
|
||||
|
||||
indices = map[uint64]bool{1: true, 3: true}
|
||||
allAvailable = summary.AllAvailable(indices)
|
||||
require.Equal(t, true, allAvailable)
|
||||
}
|
||||
|
||||
func TestSummary(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
expected := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{})
|
||||
actual := summaryCache.Summary(root)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
|
||||
summaryCache = newDataColumnStorageSummaryCache()
|
||||
expected = NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{true, false, true, false})
|
||||
summaryCache.cache[root] = expected
|
||||
actual = summaryCache.Summary(root)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestHighestEpoch(t *testing.T) {
|
||||
root1 := [fieldparams.RootLength]byte{1}
|
||||
root2 := [fieldparams.RootLength]byte{2}
|
||||
root3 := [fieldparams.RootLength]byte{3}
|
||||
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
actual := summaryCache.HighestEpoch()
|
||||
require.Equal(t, primitives.Epoch(0), actual)
|
||||
|
||||
err := summaryCache.set(DataColumnsIdent{Root: root1, Epoch: 42, Indices: []uint64{1, 3}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.HighestEpoch())
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: root2, Epoch: 43, Indices: []uint64{1, 3}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(43), summaryCache.HighestEpoch())
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: root3, Epoch: 40, Indices: []uint64{1, 3}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(43), summaryCache.HighestEpoch())
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
t.Run("Index out of bounds", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
err := summaryCache.set(DataColumnsIdent{Indices: []uint64{1_000_000}})
|
||||
require.ErrorIs(t, err, errDataColumnIndexOutOfBounds)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, 0, len(summaryCache.cache))
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
root1 := [fieldparams.RootLength]byte{1}
|
||||
root2 := [fieldparams.RootLength]byte{2}
|
||||
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
|
||||
err := summaryCache.set(DataColumnsIdent{Root: root1, Epoch: 42, Indices: []uint64{1, 3}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, 1, len(summaryCache.cache))
|
||||
expected := DataColumnStorageSummary{epoch: 42, mask: [fieldparams.NumberOfColumns]bool{false, true, false, true}}
|
||||
actual := summaryCache.cache[root1]
|
||||
require.DeepEqual(t, expected, actual)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: root1, Epoch: 42, Indices: []uint64{0, 1}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, 1, len(summaryCache.cache))
|
||||
expected = DataColumnStorageSummary{epoch: 42, mask: [fieldparams.NumberOfColumns]bool{true, true, false, true}}
|
||||
actual = summaryCache.cache[root1]
|
||||
require.DeepEqual(t, expected, actual)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: root2, Epoch: 43, Indices: []uint64{1}})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch) // Epoch 42 is still the lowest
|
||||
require.Equal(t, 2, len(summaryCache.cache))
|
||||
expected = DataColumnStorageSummary{epoch: 43, mask: [fieldparams.NumberOfColumns]bool{false, true}}
|
||||
actual = summaryCache.cache[root2]
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
t.Run("Not in cache", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
_, ok := summaryCache.get(root)
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
|
||||
t.Run("In cache", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
summaryCache.cache[root] = NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true, false, true, false})
|
||||
actual, ok := summaryCache.get(root)
|
||||
require.Equal(t, true, ok)
|
||||
expected := NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true, false, true, false})
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvict(t *testing.T) {
|
||||
t.Run("No eviction", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
|
||||
evicted := summaryCache.evict(root)
|
||||
require.Equal(t, 0, evicted)
|
||||
})
|
||||
|
||||
t.Run("Eviction", func(t *testing.T) {
|
||||
root1 := [fieldparams.RootLength]byte{1}
|
||||
root2 := [fieldparams.RootLength]byte{2}
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
summaryCache.cache[root1] = NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true, false, true, false})
|
||||
summaryCache.cache[root2] = NewDataColumnStorageSummary(43, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
evicted := summaryCache.evict(root1)
|
||||
require.Equal(t, 2, evicted)
|
||||
require.Equal(t, 1, len(summaryCache.cache))
|
||||
|
||||
_, ok := summaryCache.cache[root1]
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
_, ok = summaryCache.cache[root2]
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPruneUpTo(t *testing.T) {
|
||||
t.Run("No pruning", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
err := summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{1}, Epoch: 42, Indices: []uint64{1}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{2}, Epoch: 43, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
count := summaryCache.pruneUpTo(41)
|
||||
require.Equal(t, uint64(0), count)
|
||||
require.Equal(t, 2, len(summaryCache.cache))
|
||||
require.Equal(t, primitives.Epoch(42), summaryCache.lowestCachedEpoch)
|
||||
})
|
||||
|
||||
t.Run("Pruning", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
err := summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{1}, Epoch: 42, Indices: []uint64{1}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{2}, Epoch: 44, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{3}, Epoch: 45, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
count := summaryCache.pruneUpTo(42)
|
||||
require.Equal(t, uint64(1), count)
|
||||
require.Equal(t, 2, len(summaryCache.cache))
|
||||
require.Equal(t, primitives.Epoch(44), summaryCache.lowestCachedEpoch)
|
||||
|
||||
count = summaryCache.pruneUpTo(45)
|
||||
require.Equal(t, uint64(4), count)
|
||||
require.Equal(t, 0, len(summaryCache.cache))
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, primitives.Epoch(0), summaryCache.highestCachedEpoch)
|
||||
|
||||
})
|
||||
|
||||
t.Run("Clear", func(t *testing.T) {
|
||||
summaryCache := newDataColumnStorageSummaryCache()
|
||||
err := summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{1}, Epoch: 42, Indices: []uint64{1}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{2}, Epoch: 44, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = summaryCache.set(DataColumnsIdent{Root: [fieldparams.RootLength]byte{3}, Epoch: 45, Indices: []uint64{2, 4}})
|
||||
require.NoError(t, err)
|
||||
|
||||
count := summaryCache.clear()
|
||||
require.Equal(t, uint64(5), count)
|
||||
require.Equal(t, 0, len(summaryCache.cache))
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, summaryCache.lowestCachedEpoch)
|
||||
require.Equal(t, primitives.Epoch(0), summaryCache.highestCachedEpoch)
|
||||
})
|
||||
}
|
||||
734
beacon-chain/db/filesystem/data_column_test.go
Normal file
734
beacon-chain/db/filesystem/data_column_test.go
Normal file
@@ -0,0 +1,734 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func TestNewDataColumnStorage(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("No base path", func(t *testing.T) {
|
||||
_, err := NewDataColumnStorage(ctx)
|
||||
require.ErrorIs(t, err, errNoBasePath)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
storage, err := NewDataColumnStorage(ctx, WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dir, storage.base)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWarmCache(t *testing.T) {
|
||||
storage, err := NewDataColumnStorage(
|
||||
context.Background(),
|
||||
WithDataColumnBasePath(t.TempDir()),
|
||||
WithDataColumnRetentionEpochs(10_000),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 128_034, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4001
|
||||
{Slot: 128_034, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4001
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 131_138, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{6}: {
|
||||
{Slot: 131_168, ColumnIndex: 0, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
err = storage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
storage.retentionEpochs = 4_096
|
||||
|
||||
storage.WarmCache()
|
||||
require.Equal(t, primitives.Epoch(4_000), storage.cache.lowestCachedEpoch)
|
||||
require.Equal(t, 6, len(storage.cache.cache))
|
||||
|
||||
summary, ok := storage.cache.get([fieldparams.RootLength]byte{1})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{2})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{3})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_001, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{4})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{5})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{6})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_099, mask: [fieldparams.NumberOfColumns]bool{true}}, summary)
|
||||
}
|
||||
|
||||
func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("wrong numbers of columns", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errWrongNumberOfColumns)
|
||||
})
|
||||
|
||||
t.Run("one of the column index is too large", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errDataColumnIndexTooLarge)
|
||||
})
|
||||
|
||||
t.Run("different slots", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{Slot: 1, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 2, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errDataColumnSidecarsFromDifferentSlots)
|
||||
})
|
||||
|
||||
t.Run("new file - no data columns to save", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{{}: {}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("new file - different data column size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{1, 2, 3, 4}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errWrongSszEncodedDataColumnSidecarSize)
|
||||
})
|
||||
|
||||
t.Run("existing file - wrong incoming SSZ encoded size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}}},
|
||||
)
|
||||
|
||||
// Save data columns into a file.
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Build a data column sidecar for the same block but with a different
|
||||
// column index and an different SSZ encoded size.
|
||||
_, verifiedRoDataColumnSidecars = verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 13, DataColumn: []byte{1, 2, 3, 4}}}},
|
||||
)
|
||||
|
||||
// Try to rewrite the file.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errWrongSszEncodedDataColumnSidecarSize)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
{2}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, inputVerifiedRoDataColumnSidecars = verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{3}: {
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
err = dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
type fixture struct {
|
||||
fileName string
|
||||
blockRoot [fieldparams.RootLength]byte
|
||||
expectedIndices [mandatoryNumberOfColumns]byte
|
||||
dataColumnParams []verification.DataColumnParams
|
||||
}
|
||||
|
||||
fixtures := []fixture{
|
||||
{
|
||||
fileName: "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{1},
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, nonZeroOffset + 4, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, nonZeroOffset + 1, nonZeroOffset, nonZeroOffset + 2, 0, nonZeroOffset + 3,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []verification.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{2},
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, nonZeroOffset, nonZeroOffset + 1, 0, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []verification.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0300000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{3},
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, nonZeroOffset + 1, 0, 0, 0, nonZeroOffset, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []verification.DataColumnParams{
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, fixture := range fixtures {
|
||||
// Build expected data column sidecars.
|
||||
_, expectedDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{fixture.blockRoot: fixture.dataColumnParams},
|
||||
)
|
||||
|
||||
// Build expected bytes.
|
||||
firstSszEncodedDataColumnSidecar, err := expectedDataColumnSidecars[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnSidecarsCount := len(expectedDataColumnSidecars)
|
||||
sszEncodedDataColumnSidecarSize := len(firstSszEncodedDataColumnSidecar)
|
||||
|
||||
sszEncodedDataColumnSidecars := make([]byte, 0, dataColumnSidecarsCount*sszEncodedDataColumnSidecarSize)
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, firstSszEncodedDataColumnSidecar...)
|
||||
for _, dataColumnSidecar := range expectedDataColumnSidecars[1:] {
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
|
||||
var encodedSszEncodedDataColumnSidecarSize [encodedSszEncodedDataColumnSidecarSizeSize]byte
|
||||
binary.BigEndian.PutUint32(encodedSszEncodedDataColumnSidecarSize[:], uint32(sszEncodedDataColumnSidecarSize))
|
||||
|
||||
expectedBytes := make([]byte, 0, headerSize+dataColumnSidecarsCount*sszEncodedDataColumnSidecarSize)
|
||||
expectedBytes = append(expectedBytes, []byte{0x01}...)
|
||||
expectedBytes = append(expectedBytes, encodedSszEncodedDataColumnSidecarSize[:]...)
|
||||
expectedBytes = append(expectedBytes, fixture.expectedIndices[:]...)
|
||||
expectedBytes = append(expectedBytes, sszEncodedDataColumnSidecars...)
|
||||
|
||||
// Check the actual content of the file.
|
||||
actualBytes, err := afero.ReadFile(dataColumnStorage.fs, fixture.fileName)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBytes, actualBytes)
|
||||
|
||||
// Check the summary.
|
||||
indices := map[uint64]bool{}
|
||||
for _, dataColumnParam := range fixture.dataColumnParams {
|
||||
indices[dataColumnParam.ColumnIndex] = true
|
||||
}
|
||||
|
||||
summary := dataColumnStorage.Summary(fixture.blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, indices[index], summary.HasIndex(index))
|
||||
}
|
||||
|
||||
err = dataColumnStorage.Remove(fixture.blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary = dataColumnStorage.Summary(fixture.blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, false, summary.HasIndex(index))
|
||||
}
|
||||
|
||||
_, err = afero.ReadFile(dataColumnStorage.fs, fixture.fileName)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetDataColumnSidecars(t *testing.T) {
|
||||
t.Run("not found", func(t *testing.T) {
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedRODataColumnSidecars))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, expectedVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(expectedVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
|
||||
verifiedRODataColumnSidecars, err = dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
t.Run("not found", func(t *testing.T) {
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Remove([fieldparams.RootLength]byte{1})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{Slot: 32, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 32, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 33, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 33, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dataColumnStorage.Remove([fieldparams.RootLength]byte{1})
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary([fieldparams.RootLength]byte{1})
|
||||
require.Equal(t, primitives.Epoch(0), summary.epoch)
|
||||
require.Equal(t, uint64(0), summary.Count())
|
||||
|
||||
summary = dataColumnStorage.Summary([fieldparams.RootLength]byte{2})
|
||||
require.Equal(t, primitives.Epoch(1), summary.epoch)
|
||||
require.Equal(t, uint64(2), summary.Count())
|
||||
|
||||
actual, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(actual))
|
||||
|
||||
actual, err = dataColumnStorage.Get([fieldparams.RootLength]byte{2}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(actual))
|
||||
})
|
||||
}
|
||||
|
||||
func TestClear(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
{2}: {{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}}},
|
||||
},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
filePaths := []string{
|
||||
"0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
}
|
||||
|
||||
for _, filePath := range filePaths {
|
||||
_, err = afero.ReadFile(dataColumnStorage.fs, filePath)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = dataColumnStorage.Clear()
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary([fieldparams.RootLength]byte{1})
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, false, summary.HasIndex(index))
|
||||
}
|
||||
|
||||
for _, filePath := range filePaths {
|
||||
_, err = afero.ReadFile(dataColumnStorage.fs, filePath)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadata(t *testing.T) {
|
||||
t.Run("wrong version", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Save data columns into a file.
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Alter the version.
|
||||
const filePath = "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs"
|
||||
file, err := dataColumnStorage.fs.OpenFile(filePath, os.O_WRONLY, os.FileMode(0600))
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := file.Write([]byte{42})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, count)
|
||||
|
||||
// Try to read the metadata.
|
||||
_, err = dataColumnStorage.metadata(file)
|
||||
require.ErrorIs(t, err, errWrongVersion)
|
||||
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewStorageIndices(t *testing.T) {
|
||||
t.Run("wrong number of columns", func(t *testing.T) {
|
||||
_, err := newStorageIndices(nil)
|
||||
require.ErrorIs(t, err, errWrongNumberOfColumns)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
var indices [mandatoryNumberOfColumns]byte
|
||||
indices[0] = 1
|
||||
|
||||
storageIndices, err := newStorageIndices(indices[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, indices, storageIndices.indices)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStorageIndicesGet(t *testing.T) {
|
||||
t.Run("index too large", func(t *testing.T) {
|
||||
var indices storageIndices
|
||||
_, _, err := indices.get(1_000_000)
|
||||
require.ErrorIs(t, errDataColumnIndexTooLarge, err)
|
||||
})
|
||||
|
||||
t.Run("index not set", func(t *testing.T) {
|
||||
const expected = false
|
||||
var indices storageIndices
|
||||
actual, _, err := indices.get(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("index set", func(t *testing.T) {
|
||||
const (
|
||||
expectedOk = true
|
||||
expectedPosition = int64(3)
|
||||
)
|
||||
|
||||
indices := storageIndices{indices: [mandatoryNumberOfColumns]byte{0, 131}}
|
||||
actualOk, actualPosition, err := indices.get(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedOk, actualOk)
|
||||
require.Equal(t, expectedPosition, actualPosition)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStorageIndicesLen(t *testing.T) {
|
||||
const expectedLen = 2
|
||||
indices := storageIndices{indices: [mandatoryNumberOfColumns]byte{0, 131, 0, 128}}
|
||||
actualLen := indices.len()
|
||||
require.Equal(t, expectedLen, actualLen)
|
||||
}
|
||||
|
||||
func TestStorageIndicesAll(t *testing.T) {
|
||||
expectedIndices := []uint64{1, 3}
|
||||
indices := storageIndices{indices: [mandatoryNumberOfColumns]byte{0, 131, 0, 128}}
|
||||
actualIndices := indices.all()
|
||||
require.DeepEqual(t, expectedIndices, actualIndices)
|
||||
}
|
||||
|
||||
func TestStorageIndicesSet(t *testing.T) {
|
||||
t.Run("data column index too large", func(t *testing.T) {
|
||||
var indices storageIndices
|
||||
err := indices.set(1_000_000, 0)
|
||||
require.ErrorIs(t, errDataColumnIndexTooLarge, err)
|
||||
})
|
||||
|
||||
t.Run("position too large", func(t *testing.T) {
|
||||
var indices storageIndices
|
||||
err := indices.set(0, 255)
|
||||
require.ErrorIs(t, errDataColumnIndexTooLarge, err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
expected := [mandatoryNumberOfColumns]byte{0, 0, 128, 0, 131}
|
||||
var indices storageIndices
|
||||
|
||||
err := indices.set(2, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = indices.set(4, 3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, expected, indices.indices)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
t.Run(("nothing to prune"), func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := NewDataColumnStorage(context.Background(), WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnStorage.prune()
|
||||
})
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
var compareSlices = func(left, right []string) bool {
|
||||
if len(left) != len(right) {
|
||||
return false
|
||||
}
|
||||
|
||||
leftMap := make(map[string]bool, len(left))
|
||||
for _, leftItem := range left {
|
||||
leftMap[leftItem] = true
|
||||
}
|
||||
|
||||
for _, rightItem := range right {
|
||||
if _, ok := leftMap[rightItem]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_169, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
{Slot: 131_169, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 262_144, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8192
|
||||
{Slot: 262_144, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8292
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := NewDataColumnStorage(context.Background(), WithDataColumnBasePath(dir), WithDataColumnRetentionEpochs(10_000))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
dirs, err := listDir(dataColumnStorage.fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0", "1", "2"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"1", "4000"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"4099", "4098"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"8192"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0000000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/4000")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{
|
||||
"0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0300000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
_, verifiedRoDataColumnSidecars = verification.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
verification.DataColumnsParamsByRoot{
|
||||
{6}: {{Slot: 451_141, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}}, // Period 3 - Epoch 14_098
|
||||
},
|
||||
)
|
||||
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// dataColumnStorage.prune(14_098)
|
||||
dataColumnStorage.prune()
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"1", "2", "3"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"4099"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"8192"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "3")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"14098"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "3/14098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0600000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
})
|
||||
}
|
||||
104
beacon-chain/db/filesystem/doc.go
Normal file
104
beacon-chain/db/filesystem/doc.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package filesystem
|
||||
|
||||
// nolint:dupword
|
||||
/*
|
||||
Data column sidecars storage documentation
|
||||
==========================================
|
||||
|
||||
File organisation
|
||||
-----------------
|
||||
- The first byte represents the version of the file structure (up to 0xff = 255).
|
||||
We set it to 0x01.
|
||||
Note: This is not strictly needed, but it will help a lot if, in the future,
|
||||
we want to modify the file structure.
|
||||
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
|
||||
(See the `Computation of the maximum size of a DataColumnSidecar` section to a description
|
||||
of how this value is computed).
|
||||
- The next 128 bytes represent the index in the file of a given column.
|
||||
The first bit of each byte in the index is set to 0 if there is no data column,
|
||||
and set to 1 if there is a data column.
|
||||
The remaining 7 bits (from 0 to 127) represent the index of the data column.
|
||||
This sentinel bit is needed to distinguish between the column with index 0 and no column.
|
||||
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
|
||||
- The rest of the file is a repeat of the SSZ encoded data columns sidecars.
|
||||
|
||||
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
| Byte offset | Description |
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
| 0 | version (1 byte) | sszEncodedDataColumnSidecarSize (4 bytes) | indices (128 bytes) |
|
||||
|133 + 0*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
|133 + 1*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
|133 + 2*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
| ... | ... |
|
||||
|133 + 127*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|
||||
|------------------------------------------|------------------------------------------------------------------------------------|
|
||||
|
||||
Each file is named after the block root where the data columns were data columns are committed to.
|
||||
Example: `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
|
||||
|
||||
Database organisation
|
||||
---------------------
|
||||
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
|
||||
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by the 4096.
|
||||
- The second layer is a directory corresponding to the epoch.
|
||||
- Then all files are stored in the epoch directory.
|
||||
|
||||
Example:
|
||||
data-columns
|
||||
├── 0
|
||||
│ ├── 3638
|
||||
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
|
||||
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
|
||||
│ │ ├── ...
|
||||
│ │ ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
|
||||
│ │ └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
|
||||
│ └── 3639
|
||||
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
|
||||
│ ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
|
||||
│ ├── ...
|
||||
│ ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
|
||||
│ └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
|
||||
└── 1
|
||||
├── 4096
|
||||
│ ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
|
||||
│ ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
|
||||
│ ├── ...
|
||||
│ ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
|
||||
│ └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
|
||||
└── 4097
|
||||
├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
|
||||
├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
|
||||
├── ...
|
||||
├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
|
||||
└── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
|
||||
|
||||
Computation of the maximum size of a DataColumnSidecar
|
||||
------------------------------------------------------
|
||||
https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#datacolumnsidecar
|
||||
|
||||
|
||||
class DataColumnSidecar(Container):
|
||||
index: ColumnIndex # Index of column in extended matrix
|
||||
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||
signed_block_header: SignedBeaconBlockHeader
|
||||
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
|
||||
|
||||
|
||||
- index: 2 bytes (ColumnIndex)
|
||||
- `column`: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 64 (FIELD_ELEMENTS_PER_CELL) * 32 bytes (BYTES_PER_FIELD_ELEMENT) = 8,388,608 bytes
|
||||
- kzg_commitments: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGCommitment) = 196,608 bytes
|
||||
- kzg_proofs: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGProof) = 196,608 bytes
|
||||
- signed_block_header: 2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 * 2 bytes (Root) + 96 bytes (BLSSignature) = 106 bytes
|
||||
- kzg_commitments_inclusion_proof: 4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) * 32 bytes = 128 bytes
|
||||
|
||||
TOTAL: 8,782,060 bytes = 70,256,480 bits
|
||||
log(70,256,480) / log(2) ~= 26.07
|
||||
|
||||
==> 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
|
||||
|
||||
The maximum size of an SSZ encoded data column can be 2**32 bits = 536,879,912 bytes,
|
||||
which left a room of 536,879,912 bytes - 8,782,060 bytes ~= 503 mega bytes to store the extra data needed by SSZ encoding (which is more than enough.)
|
||||
*/
|
||||
@@ -56,10 +56,6 @@ func identForSidecar(sc blocks.VerifiedROBlob) blobIdent {
|
||||
return newBlobIdent(sc.BlockRoot(), slots.ToEpoch(sc.Slot()), sc.Index)
|
||||
}
|
||||
|
||||
func identForDataColumnSidecar(sc blocks.VerifiedRODataColumn) blobIdent {
|
||||
return newBlobIdent(sc.BlockRoot(), slots.ToEpoch(sc.Slot()), sc.ColumnIndex)
|
||||
}
|
||||
|
||||
func (n blobIdent) sszFname() string {
|
||||
return fmt.Sprintf("%d.%s", n.index, sszExt)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// Blobs
|
||||
blobBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
blobSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "blob_storage_save_latency",
|
||||
@@ -33,4 +34,29 @@ var (
|
||||
Name: "blob_disk_bytes",
|
||||
Help: "Approximate number of bytes occupied by blobs in storage",
|
||||
})
|
||||
|
||||
// Data columns
|
||||
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_save_latency",
|
||||
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
})
|
||||
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_get_latency",
|
||||
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
})
|
||||
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_pruned",
|
||||
Help: "Number of DataColumnSidecar pruned.",
|
||||
})
|
||||
dataColumnWrittenCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_written",
|
||||
Help: "Number of DataColumnSidecar written",
|
||||
})
|
||||
dataColumnDiskCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "data_column_disk_count",
|
||||
Help: "Approximate number of data columns in storage",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// Blobs
|
||||
// -----
|
||||
|
||||
// NewEphemeralBlobStorage should only be used for tests.
|
||||
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
@@ -64,14 +69,63 @@ func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage)
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int, epoch primitives.Epoch) BlobStorageSummarizer {
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: epoch, index: uint64(v[i])}); err != nil {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: 0, index: uint64(v[i])}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Data columns
|
||||
// ------------
|
||||
|
||||
// NewEphemeralDataColumnStorage should only be used for tests.
|
||||
// The instance of DataColumnStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralDataColumnStorage(t testing.TB, opts ...DataColumnStorageOption) *DataColumnStorage {
|
||||
return NewWarmedEphemeralDataColumnStorageUsingFs(t, afero.NewMemMapFs(), opts...)
|
||||
}
|
||||
|
||||
// NewEphemeralDataColumnStorageAndFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the DataColumnStorage API.
|
||||
func NewEphemeralDataColumnStorageAndFs(t testing.TB, opts ...DataColumnStorageOption) (afero.Fs, *DataColumnStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
dcs := NewWarmedEphemeralDataColumnStorageUsingFs(t, fs, opts...)
|
||||
return fs, dcs
|
||||
}
|
||||
|
||||
func NewWarmedEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...DataColumnStorageOption) *DataColumnStorage {
|
||||
bs := NewEphemeralDataColumnStorageUsingFs(t, fs, opts...)
|
||||
bs.WarmCache()
|
||||
return bs
|
||||
}
|
||||
|
||||
func NewEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...DataColumnStorageOption) *DataColumnStorage {
|
||||
opts = append(opts,
|
||||
WithDataColumnRetentionEpochs(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
|
||||
WithDataColumnFs(fs),
|
||||
)
|
||||
|
||||
bs, err := NewDataColumnStorage(context.Background(), opts...)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return bs
|
||||
}
|
||||
|
||||
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
|
||||
c := newDataColumnStorageSummaryCache()
|
||||
for root, indices := range set {
|
||||
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -53,6 +53,7 @@ var (
|
||||
GetPayloadMethodV4,
|
||||
}
|
||||
fuluEngineEndpoints = []string{
|
||||
GetPayloadMethodV5,
|
||||
GetBlobsV2,
|
||||
}
|
||||
)
|
||||
@@ -79,6 +80,8 @@ const (
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// GetPayloadMethodV4 is the get payload method added for electra
|
||||
GetPayloadMethodV4 = "engine_getPayloadV4"
|
||||
// GetPayloadMethodV5 is the get payload method added for fulu
|
||||
GetPayloadMethodV5 = "engine_getPayloadV5"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
@@ -271,6 +274,9 @@ func (s *Service) ForkchoiceUpdated(
|
||||
|
||||
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
|
||||
pe := slots.ToEpoch(slot)
|
||||
if pe >= params.BeaconConfig().FuluForkEpoch {
|
||||
return GetPayloadMethodV5, &pb.ExecutionBundleElectra{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return GetPayloadMethodV4, &pb.ExecutionBundleElectra{}
|
||||
}
|
||||
@@ -303,7 +309,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
}
|
||||
res, err := blocks.NewGetPayloadResponse(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "new get payload response")
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -663,26 +669,26 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, block inter
|
||||
}
|
||||
|
||||
// Fetch all blobsAndCellsProofs from EL
|
||||
blobsAndCellsProofs, err := s.GetBlobsV2(ctx, kzgHashes)
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, kzgHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
}
|
||||
|
||||
var cellsAndProofs []kzg.CellsAndProofs
|
||||
for _, blobAndCellProofs := range blobsAndCellsProofs {
|
||||
if blobAndCellProofs == nil {
|
||||
for _, blobAndProof := range blobAndProofV2s {
|
||||
if blobAndProof == nil {
|
||||
return nil, wrapWithBlockRoot(errors.New("unable to reconstruct data column sidecars, did not get all blobs from EL"), blockRoot, "")
|
||||
}
|
||||
|
||||
var blob kzg.Blob
|
||||
copy(blob[:], blobAndCellProofs.Blob)
|
||||
copy(blob[:], blobAndProof.Blob)
|
||||
cells, err := kzg.ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "could not compute cells")
|
||||
}
|
||||
|
||||
proofs := make([]kzg.Proof, len(blobAndCellProofs.CellProofs))
|
||||
for i, proof := range blobAndCellProofs.CellProofs {
|
||||
proofs := make([]kzg.Proof, len(blobAndProof.KzgProofs))
|
||||
for i, proof := range blobAndProof.KzgProofs {
|
||||
proofs[i] = kzg.Proof(proof)
|
||||
}
|
||||
cellsAndProofs = append(cellsAndProofs, kzg.CellsAndProofs{
|
||||
|
||||
@@ -318,11 +318,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.GetBlobs())
|
||||
})
|
||||
t.Run(GetPayloadMethodV4, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
@@ -373,11 +373,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.GetBlobs())
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
@@ -2502,6 +2502,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
@@ -2578,18 +2579,19 @@ func createBlobServerV2(t *testing.T, numBlobs int, blobMasks []bool) *httptest.
|
||||
|
||||
require.Equal(t, len(blobMasks), numBlobs)
|
||||
|
||||
blobAndCellProofs := make([]*pb.BlobAndCellProofJson, numBlobs)
|
||||
blobAndCellProofs := make([]*pb.BlobAndProofV2Json, numBlobs)
|
||||
for i := range blobAndCellProofs {
|
||||
if !blobMasks[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
blobAndCellProofs[i] = &pb.BlobAndCellProofJson{
|
||||
Blob: []byte("0xblob"),
|
||||
CellProofs: []hexutil.Bytes{},
|
||||
blobAndCellProofs[i] = &pb.BlobAndProofV2Json{
|
||||
Blob: []byte("0xblob"),
|
||||
KzgProofs: []hexutil.Bytes{},
|
||||
}
|
||||
for j := 0; j < int(params.BeaconConfig().NumberOfColumns); j++ {
|
||||
blobAndCellProofs[i].CellProofs = append(blobAndCellProofs[i].CellProofs, []byte(fmt.Sprintf("0xproof%d", j)))
|
||||
cellProof := make([]byte, 48)
|
||||
blobAndCellProofs[i].KzgProofs = append(blobAndCellProofs[i].KzgProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -87,43 +87,45 @@ type serviceFlagOpts struct {
|
||||
// full PoS node. It handles the lifecycle of the entire system and registers
|
||||
// services to a service registry.
|
||||
type BeaconNode struct {
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -189,6 +191,15 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
|
||||
if beacon.DataColumnStorage == nil {
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new data column storage")
|
||||
}
|
||||
|
||||
beacon.DataColumnStorage = dataColumnStorage
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
@@ -279,7 +290,9 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start DB")
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
@@ -491,6 +504,10 @@ func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath str
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if err := b.DataColumnStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
@@ -776,6 +793,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
blockchain.WithBlobStorage(b.BlobStorage),
|
||||
blockchain.WithDataColumnStorage(b.DataColumnStorage),
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
@@ -862,6 +880,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
@@ -889,6 +908,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
@@ -1012,6 +1032,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
})
|
||||
@@ -1151,6 +1172,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
// TODO: Add backfill for data column storage
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
cmd.ValidatorMonitorIndicesFlag.Value.SetInt(1)
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
|
||||
node.Close()
|
||||
@@ -75,7 +75,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
go func() {
|
||||
@@ -99,7 +99,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
node.Start()
|
||||
@@ -130,7 +130,7 @@ func TestClearDB(t *testing.T) {
|
||||
context, cancel := newCliContextWithCancel(&app, set)
|
||||
options := []Option{
|
||||
WithExecutionChainOptions([]execution.Option{execution.WithHttpEndpoint(endpoint)}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
}
|
||||
_, err = New(context, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -50,3 +50,20 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorage = bs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
|
||||
// to be used when initializing data column storage.
|
||||
func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorageOptions = append(bn.DataColumnStorageOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -542,10 +542,27 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockElectra())
|
||||
var (
|
||||
comts [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := kzg.Blob(util.GetRandBlob(i))
|
||||
commitment, err := kzg.BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
comts = append(comts, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlockFulu()
|
||||
b.Block.Body.BlobKzgCommitments = comts
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
blobs := make([]kzg.Blob, 6)
|
||||
sidecars, err := peerdas.DataColumnSidecars(b, blobs)
|
||||
for i := range blobs {
|
||||
blobs[i] = kzg.Blob(util.GetRandBlob(int64(i)))
|
||||
}
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sidecars, err := peerdas.DataColumnSidecars(sb, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
sidecar := sidecars[0]
|
||||
|
||||
@@ -187,7 +187,8 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Compare current epoch with Altair fork epoch
|
||||
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
|
||||
if currentEpoch < altairForkEpoch {
|
||||
// We add `1` to the current epoch because we want to prepare one epoch before the Altair fork.
|
||||
if currentEpoch+1 < altairForkEpoch {
|
||||
// Phase 0 behaviour.
|
||||
if isBitVUpToDate {
|
||||
// Return early if bitfield hasn't changed.
|
||||
@@ -225,7 +226,8 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
if currentEpoch < fuluForkEpoch {
|
||||
// We add `1` to the current epoch because we want to prepare one epoch before the Fulu fork.
|
||||
if currentEpoch+1 < fuluForkEpoch {
|
||||
// Altair behaviour.
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Nothing to do, return early.
|
||||
|
||||
@@ -284,7 +284,7 @@ func (d *DataColumnSidecarsByRootReq) Less(i, j int) bool {
|
||||
return rootCmp < 0
|
||||
}
|
||||
|
||||
return (*d)[i].ColumnIndex < (*d)[j].ColumnIndex
|
||||
return (*d)[i].Index < (*d)[j].Index
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface. It swaps the elements with indexes i and j.
|
||||
|
||||
@@ -203,8 +203,8 @@ func generateDataColumnIdentifiers(n int) []*eth.DataColumnIdentifier {
|
||||
r := make([]*eth.DataColumnIdentifier, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r[i] = ð.DataColumnIdentifier{
|
||||
BlockRoot: bytesutil.PadTo([]byte{byte(i)}, 32),
|
||||
ColumnIndex: uint64(i),
|
||||
BlockRoot: bytesutil.PadTo([]byte{byte(i)}, 32),
|
||||
Index: uint64(i),
|
||||
}
|
||||
}
|
||||
return r
|
||||
@@ -292,24 +292,24 @@ func TestDataColumnSidecarsByRootReq_MarshalUnmarshal(t *testing.T) {
|
||||
func TestDataColumnSidecarsByRootReq_Sort(t *testing.T) {
|
||||
ids := []*eth.DataColumnIdentifier{
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{3}, 32),
|
||||
ColumnIndex: 0,
|
||||
BlockRoot: bytesutil.PadTo([]byte{3}, 32),
|
||||
Index: 0,
|
||||
},
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
ColumnIndex: 2,
|
||||
BlockRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
Index: 2,
|
||||
},
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
ColumnIndex: 1,
|
||||
BlockRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
Index: 1,
|
||||
},
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{1}, 32),
|
||||
ColumnIndex: 2,
|
||||
BlockRoot: bytesutil.PadTo([]byte{1}, 32),
|
||||
Index: 2,
|
||||
},
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{0}, 32),
|
||||
ColumnIndex: 3,
|
||||
BlockRoot: bytesutil.PadTo([]byte{0}, 32),
|
||||
Index: 3,
|
||||
},
|
||||
}
|
||||
req := DataColumnSidecarsByRootReq(ids)
|
||||
@@ -321,8 +321,8 @@ func TestDataColumnSidecarsByRootReq_Sort(t *testing.T) {
|
||||
|
||||
ids = []*eth.DataColumnIdentifier{
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{0}, 32),
|
||||
ColumnIndex: 3,
|
||||
BlockRoot: bytesutil.PadTo([]byte{0}, 32),
|
||||
Index: 3,
|
||||
},
|
||||
}
|
||||
req = DataColumnSidecarsByRootReq(ids)
|
||||
|
||||
@@ -198,7 +198,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 173, len(data))
|
||||
assert.Equal(t, 174, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -571,6 +571,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "100", v)
|
||||
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "MAX_BLOBS_PER_BLOCK_FULU":
|
||||
assert.Equal(t, "12", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -56,6 +56,7 @@ type BeaconDbBlocker struct {
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// Block returns the beacon block for a given identifier. The identifier can be one of:
|
||||
@@ -188,31 +189,42 @@ func (p *BeaconDbBlocker) blobsFromStoredBlobs(
|
||||
// This function assumes that all the non-extended data columns are available in the store.
|
||||
func (p *BeaconDbBlocker) blobsFromNonExtendedStoredDataColumns(
|
||||
root [fieldparams.RootLength]byte,
|
||||
indices map[uint64]bool,
|
||||
blobIndices map[uint64]bool,
|
||||
) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
nonExtendedColumnsCount := uint64(fieldparams.NumberOfColumns / 2)
|
||||
|
||||
// Load the data columns corresponding to the non-extended blobs.
|
||||
storedDataColumnsSidecar := make([]*ethpb.DataColumnSidecar, 0, nonExtendedColumnsCount)
|
||||
dataColumnIndices := make([]uint64, 0, nonExtendedColumnsCount)
|
||||
for index := range nonExtendedColumnsCount {
|
||||
verifiedRODataColumn, err := p.BlobStorage.GetColumn(root, index)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"blockRoot": hexutil.Encode(root[:]),
|
||||
"column": index,
|
||||
}).Error(errors.Wrapf(err, "could not retrieve column %d for block root %#x.", index, root))
|
||||
dataColumnIndices = append(dataColumnIndices, index)
|
||||
}
|
||||
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve column %d for block root %#x", index, root),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
verifiedRODataColumns, err := p.DataColumnStorage.Get(root, dataColumnIndices)
|
||||
if err != nil {
|
||||
log.WithField("blockRoot", hexutil.Encode(root[:])).Error(errors.Wrapf(err, "missing data columns for block root %#x", root))
|
||||
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve data column sidecars for block root %#x", root),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
if uint64(len(verifiedRODataColumns)) != nonExtendedColumnsCount {
|
||||
log.WithField("blockRoot", hexutil.Encode(root[:])).Error("not enough data columns returned to extract blobs")
|
||||
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("not enough data columns returned to extract blobs for block root %#x", root),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
storedDataColumnsSidecar := make([]*ethpb.DataColumnSidecar, 0, nonExtendedColumnsCount)
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
storedDataColumnsSidecar = append(storedDataColumnsSidecar, verifiedRODataColumn.DataColumnSidecar)
|
||||
}
|
||||
|
||||
// Get verified RO blobs from the data columns.
|
||||
verifiedROBlobs, err := peerdas.Blobs(indices, storedDataColumnsSidecar)
|
||||
verifiedROBlobs, err := peerdas.Blobs(blobIndices, storedDataColumnsSidecar)
|
||||
if err != nil {
|
||||
log.WithField("blockRoot", hexutil.Encode(root[:])).Error(errors.Wrap(err, "could not compute blobs from data columns"))
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not compute blobs from data columns"), Reason: core.Internal}
|
||||
@@ -224,28 +236,37 @@ func (p *BeaconDbBlocker) blobsFromNonExtendedStoredDataColumns(
|
||||
// blobsFromReconstructedDataColumns retrieves data columns from the store, reconstruct the whole matrix and returns the verified RO blobs.
|
||||
func (p *BeaconDbBlocker) blobsFromReconstructedDataColumns(
|
||||
root [fieldparams.RootLength]byte,
|
||||
indices map[uint64]bool,
|
||||
storedDataColumnsIndices map[uint64]bool,
|
||||
blobIndices map[uint64]bool,
|
||||
) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
// Load all the data columns we have in the store.
|
||||
// Load all possible data columns for this block root.
|
||||
// Theoretically, we could only retrieve the minimum number of columns needed to reconstruct the missing ones,
|
||||
// but here we make the assumption that the cost of loading all the columns from the store is negligible
|
||||
// compared to the cost of reconstructing them.
|
||||
storedDataColumnsSidecar := make([]*ethpb.DataColumnSidecar, 0, len(storedDataColumnsIndices))
|
||||
for index := range storedDataColumnsIndices {
|
||||
verifiedRODataColumn, err := p.BlobStorage.GetColumn(root, index)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"blockRoot": hexutil.Encode(root[:]),
|
||||
"column": index,
|
||||
}).Error(errors.Wrapf(err, "could not retrieve column %d for block root %#x.", index, root))
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve column %d for block root %#x", index, root),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
verifiedRODataColumnSidecars, err := p.DataColumnStorage.Get(root, nil)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"blockRoot": hexutil.Encode(root[:]),
|
||||
}).Error(errors.Wrapf(err, "get data column sidecars"))
|
||||
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve data column sidecars for block root %#x", root),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
if uint64(len(verifiedRODataColumnSidecars)) < numberOfColumns/2 {
|
||||
log.WithField("blockRoot", hexutil.Encode(root[:])).Error("not enough data columns returned to reconstruct blobs")
|
||||
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("not enough data columns returned to reconstruct blobs for block root %#x", root),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
storedDataColumnsSidecar := make([]*ethpb.DataColumnSidecar, 0, len(verifiedRODataColumnSidecars))
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumnSidecars {
|
||||
storedDataColumnsSidecar = append(storedDataColumnsSidecar, verifiedRODataColumn.DataColumnSidecar)
|
||||
}
|
||||
|
||||
@@ -266,14 +287,13 @@ func (p *BeaconDbBlocker) blobsFromReconstructedDataColumns(
|
||||
firstDataColumnSidecar.KzgCommitmentsInclusionProof,
|
||||
recoveredCellsAndProofs,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.WithField("blockRoot", hexutil.Encode(root[:])).Error(errors.Wrap(err, "could not reconstruct data columns sidecars"))
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not reconstruct data columns sidecars"), Reason: core.Internal}
|
||||
}
|
||||
|
||||
// Get verified RO blobs from the data columns.
|
||||
verifiedROBlobs, err := peerdas.Blobs(indices, reconstructedDataColumnSidecars)
|
||||
verifiedROBlobs, err := peerdas.Blobs(blobIndices, reconstructedDataColumnSidecars)
|
||||
if err != nil {
|
||||
log.WithField("blockRoot", hexutil.Encode(root[:])).Error(errors.Wrap(err, "could not compute blobs from data columns"))
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not compute blobs from data columns"), Reason: core.Internal}
|
||||
@@ -295,7 +315,7 @@ func (p *BeaconDbBlocker) blobsFromStoredDataColumns(indices map[uint64]bool, ro
|
||||
root := bytesutil.ToBytes32(rootBytes)
|
||||
|
||||
// Retrieve the data columns indice actually we store.
|
||||
summary := p.BlobStorage.Summary(root)
|
||||
summary := p.DataColumnStorage.Summary(root)
|
||||
|
||||
storedDataColumnsIndices := make(map[uint64]bool, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
@@ -334,7 +354,7 @@ func (p *BeaconDbBlocker) blobsFromStoredDataColumns(indices map[uint64]bool, ro
|
||||
}
|
||||
|
||||
// Some non-extended data columns are missing, we need to reconstruct them.
|
||||
return p.blobsFromReconstructedDataColumns(root, indices, storedDataColumnsIndices)
|
||||
return p.blobsFromReconstructedDataColumns(root, indices)
|
||||
}
|
||||
|
||||
// Blobs returns the blobs for a given block id identifier and blob indices. The identifier can be one of:
|
||||
|
||||
@@ -348,42 +348,46 @@ func TestBlobsFromStoredDataColumns(t *testing.T) {
|
||||
}
|
||||
|
||||
// Convert blobs to data columns.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create verified RO data columns.
|
||||
verifiedRoDataColumns := make([]*blocks.VerifiedRODataColumn, 0, fieldparams.NumberOfColumns)
|
||||
verifiedRoDataColumns := make([]blocks.VerifiedRODataColumn, 0, fieldparams.NumberOfColumns)
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumns = append(verifiedRoDataColumns, &verifiedRoDataColumn)
|
||||
verifiedRoDataColumns = append(verifiedRoDataColumns, verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Define a blob storage.
|
||||
blobStorage := filesystem.NewEphemeralBlobStorage(t)
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
// Save the data columns in the store.
|
||||
verifiedRoDataColumnsToSave := make([]blocks.VerifiedRODataColumn, 0, len(tc.storedColumnsIndice))
|
||||
for _, columnIndex := range tc.storedColumnsIndice {
|
||||
verifiedRoDataColumn := verifiedRoDataColumns[columnIndex]
|
||||
err := blobStorage.SaveDataColumn(*verifiedRoDataColumn)
|
||||
require.NoError(t, err)
|
||||
verifiedRoDataColumnsToSave = append(verifiedRoDataColumnsToSave, verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnsToSave)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define the blocker.
|
||||
blocker := &BeaconDbBlocker{
|
||||
BlobStorage: blobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
// Get the blobs from the data columns.
|
||||
actual, err := blocker.blobsFromStoredDataColumns(blobsIndex, blockRoot[:])
|
||||
actual, rpcErr := blocker.blobsFromStoredDataColumns(blobsIndex, blockRoot[:])
|
||||
if tc.isError {
|
||||
require.Equal(t, tc.errorReason, err.Reason)
|
||||
require.Equal(t, tc.errorReason, rpcErr.Reason)
|
||||
} else {
|
||||
require.Equal(t, nilError, err)
|
||||
require.Equal(t, nilError, rpcErr)
|
||||
expected := verifiedRoBlobs
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
builderapi "github.com/prysmaticlabs/prysm/v5/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
@@ -370,9 +369,9 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
}
|
||||
|
||||
if isPeerDASEnabled {
|
||||
dataColumnSideCars, err := unblindDataColumnsSidecars(copiedBlock, bundle)
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, bundle.Blobs, bundle.Proofs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind data columns sidecars")
|
||||
return nil, nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return copiedBlock, nil, dataColumnSideCars, nil
|
||||
@@ -397,19 +396,9 @@ func (vs *Server) handleUnblindedBlock(
|
||||
}
|
||||
|
||||
if isPeerDASEnabled {
|
||||
// Convert blobs from slices to array.
|
||||
blobs := make([]kzg.Blob, 0, len(rawBlobs))
|
||||
for _, blob := range rawBlobs {
|
||||
if len(blob) != kzg.BytesPerBlob {
|
||||
return nil, nil, errors.Errorf("invalid blob size. expected %d bytes, got %d bytes", kzg.BytesPerBlob, len(blob))
|
||||
}
|
||||
|
||||
blobs = append(blobs, kzg.Blob(blob))
|
||||
}
|
||||
|
||||
dataColumnSideCars, err := peerdas.DataColumnSidecars(block, blobs)
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "data column sidecars")
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
@@ -476,48 +465,59 @@ func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
root [fieldparams.RootLength]byte,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
dataColumnsWithholdCount := features.Get().DataColumnsWithholdCount
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, sd := range sidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sd, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
sidecar := sd
|
||||
|
||||
eg.Go(func() error {
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := sidecar.ColumnIndex % params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
if sidecar.ColumnIndex < dataColumnsWithholdCount {
|
||||
if sidecar.Index < dataColumnsWithholdCount {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"dataColumnIndex": sidecar.ColumnIndex,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"index": sidecar.Index,
|
||||
}).Warning("Withholding data column")
|
||||
} else {
|
||||
if err := vs.P2P.BroadcastDataColumn(ctx, root, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecar, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumn(ctx, root, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumn(verifiedRODataColumn); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return eg.Wait()
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -70,29 +68,3 @@ func unblindBlobsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.B
|
||||
}
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// TODO: Add tests
|
||||
func unblindDataColumnsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.BlobsBundle) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the block is at least a Deneb block.
|
||||
if block.Version() < version.Deneb {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Convert blobs from slices to array.
|
||||
blobs := make([]kzg.Blob, 0, len(bundle.Blobs))
|
||||
for _, blob := range bundle.Blobs {
|
||||
if len(blob) != kzg.BytesPerBlob {
|
||||
return nil, errors.Errorf("invalid blob size. expected %d bytes, got %d bytes", kzg.BytesPerBlob, len(blob))
|
||||
}
|
||||
|
||||
blobs = append(blobs, kzg.Blob(blob))
|
||||
}
|
||||
|
||||
// Retrieve data columns from blobs.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(block, blobs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
|
||||
@@ -120,6 +120,7 @@ type Config struct {
|
||||
Router *http.ServeMux
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
}
|
||||
@@ -195,6 +196,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlobStorage: s.cfg.BlobStorage,
|
||||
DataColumnStorage: s.cfg.DataColumnStorage,
|
||||
}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
|
||||
coreService := &core.Service{
|
||||
|
||||
@@ -164,7 +164,6 @@ go_test(
|
||||
"block_batcher_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"data_columns_reconstruct_test.go",
|
||||
"data_columns_sampling_test.go",
|
||||
"data_columns_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//runtime:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -90,7 +90,10 @@ func (bs *blobSync) validateNext(rb blocks.ROBlob) error {
|
||||
if err := v.SidecarKzgProofVerified(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.store.Persist(bs.current, rb); err != nil {
|
||||
|
||||
sc := blocks.NewSidecarFromBlobSidecar(rb)
|
||||
|
||||
if err := bs.store.Persist(bs.current, sc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
@@ -89,11 +88,8 @@ func (s *Store) fillBack(ctx context.Context, current primitives.Slot, blocks []
|
||||
status.LowParentRoot, highest.Root(), status.LowSlot, highest.Block().Slot())
|
||||
}
|
||||
|
||||
// TODO: Use the real node ID when backfill is implemented for data columns.
|
||||
emptyNodeID := enode.ID{}
|
||||
|
||||
for i := range blocks {
|
||||
if err := store.IsDataAvailable(ctx, emptyNodeID, current, blocks[i]); err != nil {
|
||||
if err := store.IsDataAvailable(ctx, current, blocks[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,17 +85,17 @@ func RequestDataColumnSidecarsByRoot(
|
||||
|
||||
// Mark columns as successful
|
||||
for _, sidecar := range peerSidecars {
|
||||
colIndex := sidecar.ColumnIndex
|
||||
successfulColumns[colIndex] = true
|
||||
index := sidecar.Index
|
||||
successfulColumns[index] = true
|
||||
}
|
||||
|
||||
for _, colIndex := range dataColumns {
|
||||
if !successfulColumns[colIndex] {
|
||||
for _, index := range dataColumns {
|
||||
if !successfulColumns[index] {
|
||||
// Remove this peer if any requested column wasn't successful
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": peer.String(),
|
||||
"missingColumn": colIndex,
|
||||
"missingColumn": index,
|
||||
}).Debug("Peer failed to return requested data column")
|
||||
break
|
||||
}
|
||||
@@ -150,12 +150,15 @@ func RequestDataColumnSidecarsByRoot(
|
||||
// NOTE: During the initial sync, LazilyPersistentStoreColumn caches sidecars
|
||||
// and saves them to disk within IsDataAvailable. SaveDataColumns is intended
|
||||
// for use when no caching is done (e.g. in the pending blocks queue).
|
||||
func SaveDataColumns(sidecars []blocks.RODataColumn, blobStorage *filesystem.BlobStorage) error {
|
||||
for i := range sidecars {
|
||||
verfiedCol := blocks.NewVerifiedRODataColumn(sidecars[i])
|
||||
if err := blobStorage.SaveDataColumn(verfiedCol); err != nil {
|
||||
return err
|
||||
}
|
||||
func SaveDataColumns(sidecars []blocks.RODataColumn, dataColumnStorage *filesystem.DataColumnStorage) error {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(sidecar)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
if err := dataColumnStorage.Save(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -168,7 +171,7 @@ func FindMissingDataColumns(
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
nodeID enode.ID,
|
||||
custodyGroupCount uint64,
|
||||
blobStorage *filesystem.BlobStorage,
|
||||
dataColumnStorage *filesystem.DataColumnStorage,
|
||||
) (map[uint64]bool, error) {
|
||||
// Blocks before Fulu have no data columns.
|
||||
if block.Version() < version.Fulu {
|
||||
@@ -188,11 +191,11 @@ func FindMissingDataColumns(
|
||||
|
||||
// Retrieve the columns we store for the root.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
summary := blobStorage.Summary(root)
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
|
||||
storedColumns := make(map[uint64]bool, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if summary.HasDataColumnIndex(i) {
|
||||
if summary.HasIndex(i) {
|
||||
storedColumns[i] = true
|
||||
}
|
||||
}
|
||||
@@ -223,8 +226,8 @@ func RequestsForDataColumnsByRoot(
|
||||
req := make(types.DataColumnSidecarsByRootReq, 0, len(missingColumns))
|
||||
for _, column := range missingColumns {
|
||||
req = append(req, ð.DataColumnIdentifier{
|
||||
BlockRoot: root[:],
|
||||
ColumnIndex: column,
|
||||
BlockRoot: root[:],
|
||||
Index: column,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -6,12 +6,10 @@ import (
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -21,16 +19,13 @@ import (
|
||||
const broadCastMissingDataColumnsTimeIntoSlot = 3 * time.Second
|
||||
|
||||
func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColumn blocks.VerifiedRODataColumn) error {
|
||||
// Get the block root.
|
||||
// Get the block root and the slot.
|
||||
blockRoot := verifiedRODataColumn.BlockRoot()
|
||||
slot := verifiedRODataColumn.Slot()
|
||||
|
||||
// Get the columns we store.
|
||||
storedDataColumns, err := s.storedDataColumns(blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stored data columns")
|
||||
}
|
||||
|
||||
storedColumnsCount := uint64(len(storedDataColumns))
|
||||
storedDataColumns := s.cfg.dataColumnStorage.Summary(blockRoot)
|
||||
storedColumnsCount := storedDataColumns.Count()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If less than half of the columns are stored, reconstruction is not possible.
|
||||
@@ -67,14 +62,14 @@ func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColu
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Load the data columns sidecars.
|
||||
dataColumnSideCars := make([]*ethpb.DataColumnSidecar, 0, storedColumnsCount)
|
||||
for index := range storedDataColumns {
|
||||
verifiedRODataColumn, err := s.cfg.blobStorage.GetColumn(blockRoot, index)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get column")
|
||||
}
|
||||
// Load all the possible data columns sidecars, to minimize reconstruction time.
|
||||
verifiedRODataColumnSidecars, err := s.cfg.dataColumnStorage.Get(blockRoot, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get data column sidecars")
|
||||
}
|
||||
|
||||
dataColumnSideCars := make([]*ethpb.DataColumnSidecar, 0, storedColumnsCount)
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumnSidecars {
|
||||
dataColumnSideCars = append(dataColumnSideCars, verifiedRODataColumn.DataColumnSidecar)
|
||||
}
|
||||
|
||||
@@ -95,9 +90,10 @@ func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColu
|
||||
return errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
// Save the data columns sidecars in the database.
|
||||
// Build verified read only data columns to save.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(localNodeInfo.CustodyColumns))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
shouldSave := localNodeInfo.CustodyColumns[dataColumnSidecar.ColumnIndex]
|
||||
shouldSave := localNodeInfo.CustodyColumns[dataColumnSidecar.Index]
|
||||
if !shouldSave {
|
||||
// We do not custody this column, so we dot not need to save it.
|
||||
continue
|
||||
@@ -109,35 +105,41 @@ func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColu
|
||||
}
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
if err := s.cfg.blobStorage.SaveDataColumn(verifiedRoDataColumn); err != nil {
|
||||
return errors.Wrap(err, "save column")
|
||||
}
|
||||
|
||||
// Mark the data column as stored (but not received).
|
||||
if err := s.setStoredDataColumn(blockRoot, dataColumnSidecar.ColumnIndex); err != nil {
|
||||
return errors.Wrap(err, "set stored data column")
|
||||
}
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Debug("Data columns successfully reconstructed from database and saved")
|
||||
// Save the data columns sidecars in the database.
|
||||
if err := s.cfg.dataColumnStorage.Save(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
// Schedule the broadcast.
|
||||
if err := s.scheduleReconstructedDataColumnsBroadcast(ctx, blockRoot, verifiedRODataColumn); err != nil {
|
||||
if err := s.scheduleReconstructedDataColumnsBroadcast(ctx, verifiedRODataColumn); err != nil {
|
||||
return errors.Wrap(err, "schedule reconstructed data columns broadcast")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": slot,
|
||||
"fromColumnsCount": storedColumnsCount,
|
||||
}).Debug("Data columns reconstructed and saved")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) scheduleReconstructedDataColumnsBroadcast(
|
||||
ctx context.Context,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
dataColumn blocks.VerifiedRODataColumn,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
) error {
|
||||
log := log.WithField("root", fmt.Sprintf("%x", blockRoot))
|
||||
// Extract the block root, the proposer index and the slot from the data column sidecar
|
||||
root := dataColumnSidecar.BlockRoot()
|
||||
proposerIndex := dataColumnSidecar.ProposerIndex()
|
||||
slot := dataColumnSidecar.Slot()
|
||||
|
||||
// Retrieve the slot of the block.
|
||||
slot := dataColumn.Slot()
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%x", root),
|
||||
"slot": slot,
|
||||
})
|
||||
|
||||
// Get the time corresponding to the start of the slot.
|
||||
genesisTime := uint64(s.cfg.chain.GenesisTime().Unix())
|
||||
@@ -156,18 +158,6 @@ func (s *Service) scheduleReconstructedDataColumnsBroadcast(
|
||||
s.dataColumsnReconstructionLock.Lock()
|
||||
defer s.dataColumsnReconstructionLock.Unlock()
|
||||
|
||||
// Get the received by gossip data columns.
|
||||
receivedDataColumns, err := s.receivedDataColumns(blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Received data columns")
|
||||
return
|
||||
}
|
||||
|
||||
if receivedDataColumns == nil {
|
||||
log.Error("No received data columns")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the node ID.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
@@ -186,17 +176,13 @@ func (s *Service) scheduleReconstructedDataColumnsBroadcast(
|
||||
}
|
||||
|
||||
// Get the data columns we actually store.
|
||||
storedDataColumns, err := s.storedDataColumns(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%x", blockRoot)).WithError(err).Error("Columns indices")
|
||||
return
|
||||
}
|
||||
summary := s.cfg.dataColumnStorage.Summary(root)
|
||||
|
||||
// Compute the missing data columns (data columns we should custody but we do not have received via gossip.)
|
||||
missingColumns := make(map[uint64]bool, len(localNodeInfo.CustodyColumns))
|
||||
missingColumns := make([]uint64, 0, len(localNodeInfo.CustodyColumns))
|
||||
for column := range localNodeInfo.CustodyColumns {
|
||||
if ok := receivedDataColumns[column]; !ok {
|
||||
missingColumns[column] = true
|
||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, column) {
|
||||
missingColumns = append(missingColumns, column)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,161 +192,38 @@ func (s *Service) scheduleReconstructedDataColumnsBroadcast(
|
||||
return
|
||||
}
|
||||
|
||||
for column := range missingColumns {
|
||||
if ok := storedDataColumns[column]; !ok {
|
||||
for _, column := range missingColumns {
|
||||
if !summary.HasIndex(column) {
|
||||
// This column was not received nor reconstructed. This should not happen.
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%x", blockRoot),
|
||||
"slot": slot,
|
||||
"column": column,
|
||||
}).Error("Data column not received nor reconstructed")
|
||||
continue
|
||||
log.WithField("column", column).Error("Data column not received nor reconstructed")
|
||||
}
|
||||
}
|
||||
|
||||
// Get the non received but reconstructed data column.
|
||||
verifiedRODataColumn, err := s.cfg.blobStorage.GetColumn(blockRoot, column)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Get column")
|
||||
continue
|
||||
}
|
||||
// Get the non received but reconstructed data column.
|
||||
verifiedRODataColumnSidecars, err := s.cfg.dataColumnStorage.Get(root, missingColumns)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("get data column sidecars")
|
||||
return
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumnSidecars {
|
||||
// Compute the subnet for this column.
|
||||
subnet := column % params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(verifiedRODataColumn.Index)
|
||||
|
||||
// Broadcast the missing data column.
|
||||
if err := s.cfg.p2p.BroadcastDataColumn(ctx, blockRoot, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
|
||||
if err := s.cfg.p2p.BroadcastDataColumn(ctx, root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
|
||||
log.WithError(err).Error("Broadcast data column")
|
||||
}
|
||||
}
|
||||
|
||||
// Get the missing data columns under sorted form.
|
||||
missingColumnsList := make([]uint64, 0, len(missingColumns))
|
||||
for column := range missingColumns {
|
||||
missingColumnsList = append(missingColumnsList, column)
|
||||
}
|
||||
|
||||
// Sort the missing data columns.
|
||||
slices.Sort[[]uint64](missingColumnsList)
|
||||
slices.Sort[[]uint64](missingColumns)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%x", blockRoot),
|
||||
"slot": slot,
|
||||
"timeIntoSlot": broadCastMissingDataColumnsTimeIntoSlot,
|
||||
"columns": missingColumnsList,
|
||||
"columns": missingColumns,
|
||||
}).Debug("Start broadcasting not seen via gossip but reconstructed data columns")
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setReceivedDataColumn marks the data column for a given root as received.
|
||||
func (s *Service) setReceivedDataColumn(root [fieldparams.RootLength]byte, columnIndex uint64) error {
|
||||
s.receivedDataColumnsFromRootLock.Lock()
|
||||
defer s.receivedDataColumnsFromRootLock.Unlock()
|
||||
|
||||
if err := setDataColumnCache(s.receivedDataColumnsFromRoot, root, columnIndex); err != nil {
|
||||
return errors.Wrap(err, "set data column cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// receivedDataColumns returns the received data columns for a given root.
|
||||
func (s *Service) receivedDataColumns(root [fieldparams.RootLength]byte) (map[uint64]bool, error) {
|
||||
dataColumns, err := dataColumnsCache(s.receivedDataColumnsFromRoot, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns cache")
|
||||
}
|
||||
|
||||
return dataColumns, nil
|
||||
}
|
||||
|
||||
// setStorededDataColumn marks the data column for a given root as stored.
|
||||
func (s *Service) setStoredDataColumn(root [fieldparams.RootLength]byte, columnIndex uint64) error {
|
||||
s.storedDataColumnsFromRootLock.Lock()
|
||||
defer s.storedDataColumnsFromRootLock.Unlock()
|
||||
|
||||
if err := setDataColumnCache(s.storedDataColumnsFromRoot, root, columnIndex); err != nil {
|
||||
return errors.Wrap(err, "set data column cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// storedDataColumns returns the received data columns for a given root.
|
||||
func (s *Service) storedDataColumns(root [fieldparams.RootLength]byte) (map[uint64]bool, error) {
|
||||
dataColumns, err := dataColumnsCache(s.storedDataColumnsFromRoot, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns cache")
|
||||
}
|
||||
|
||||
return dataColumns, nil
|
||||
}
|
||||
|
||||
// setDataColumnCache sets the data column for a given root in columnsCache.
|
||||
// The caller should hold the lock for the cache.
|
||||
func setDataColumnCache(columnsCache *cache.Cache, root [fieldparams.RootLength]byte, columnIndex uint64) error {
|
||||
if columnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Errorf("column index out of bounds: got %d, expected < %d", columnIndex, fieldparams.NumberOfColumns)
|
||||
}
|
||||
|
||||
rootString := fmt.Sprintf("%#x", root)
|
||||
|
||||
// Get all the data columns for this root.
|
||||
items, ok := columnsCache.Get(rootString)
|
||||
if !ok {
|
||||
var columns [fieldparams.NumberOfColumns]bool
|
||||
columns[columnIndex] = true
|
||||
columnsCache.Set(rootString, columns, cache.DefaultExpiration)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cast the array.
|
||||
columns, ok := items.([fieldparams.NumberOfColumns]bool)
|
||||
if !ok {
|
||||
return errors.New("cannot cast data columns from cache")
|
||||
}
|
||||
|
||||
// Add the data column to the data columns.
|
||||
columns[columnIndex] = true
|
||||
|
||||
// Update the data columns in the cache.
|
||||
columnsCache.Set(rootString, columns, cache.DefaultExpiration)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dataColumnsCache returns the data columns for a given root in columnsCache.
|
||||
func dataColumnsCache(columnsCache *cache.Cache, root [fieldparams.RootLength]byte) (map[uint64]bool, error) {
|
||||
rootString := fmt.Sprintf("%#x", root)
|
||||
|
||||
// Get all the data columns for this root.
|
||||
items, ok := columnsCache.Get(rootString)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Cast the array.
|
||||
dataColumns, ok := items.([fieldparams.NumberOfColumns]bool)
|
||||
if !ok {
|
||||
return nil, errors.New("Cannot cast data columns from cache")
|
||||
}
|
||||
|
||||
// Convert to map.
|
||||
result := columnsArrayToMap(dataColumns)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// columnsArrayToMap converts an array of columns to a map of columns.
|
||||
func columnsArrayToMap(columnsArray [fieldparams.NumberOfColumns]bool) map[uint64]bool {
|
||||
columnsMap := make(map[uint64]bool)
|
||||
|
||||
for i, v := range columnsArray {
|
||||
if v {
|
||||
columnsMap[uint64(i)] = v
|
||||
}
|
||||
}
|
||||
|
||||
return columnsMap
|
||||
}
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestDataColumnsCache(t *testing.T) {
|
||||
var (
|
||||
root1 [fieldparams.RootLength]byte
|
||||
root2 [fieldparams.RootLength]byte
|
||||
)
|
||||
|
||||
root1[0] = 1
|
||||
root2[0] = 2
|
||||
|
||||
columnsCache := cache.New(1*time.Minute, 2*time.Minute)
|
||||
|
||||
// Retrieve a non-existent entry
|
||||
res, err := dataColumnsCache(columnsCache, root1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(res))
|
||||
|
||||
res, err = dataColumnsCache(columnsCache, root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(res))
|
||||
|
||||
// Set an entry in an empty cache for this root
|
||||
err = setDataColumnCache(columnsCache, root1, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = setDataColumnCache(columnsCache, root2, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Retrieve the entry
|
||||
res, err = dataColumnsCache(columnsCache, root1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(res))
|
||||
require.Equal(t, true, res[1])
|
||||
|
||||
res, err = dataColumnsCache(columnsCache, root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(res))
|
||||
require.Equal(t, true, res[2])
|
||||
|
||||
// Set a new entry in the cache
|
||||
err = setDataColumnCache(columnsCache, root1, 11)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = setDataColumnCache(columnsCache, root2, 22)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Retrieve the entries
|
||||
res, err = dataColumnsCache(columnsCache, root1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(res))
|
||||
require.Equal(t, true, res[1])
|
||||
require.Equal(t, true, res[11])
|
||||
|
||||
res, err = dataColumnsCache(columnsCache, root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(res))
|
||||
require.Equal(t, true, res[2])
|
||||
require.Equal(t, true, res[22])
|
||||
}
|
||||
|
||||
func TestColumnsArrayToMap(t *testing.T) {
|
||||
var input [fieldparams.NumberOfColumns]bool
|
||||
input[0] = true
|
||||
input[7] = true
|
||||
input[14] = true
|
||||
input[125] = true
|
||||
|
||||
expected := map[uint64]bool{0: true, 7: true, 14: true, 125: true}
|
||||
|
||||
actual := columnsArrayToMap(input)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
|
||||
for k, v := range expected {
|
||||
require.Equal(t, v, actual[k])
|
||||
}
|
||||
}
|
||||
@@ -464,8 +464,8 @@ func (d *dataColumnSampler1D) sampleDataColumnsFromPeer(
|
||||
req := make(types.DataColumnSidecarsByRootReq, 0)
|
||||
for col := range requestedColumns {
|
||||
req = append(req, ð.DataColumnIdentifier{
|
||||
BlockRoot: blockProcessedData.BlockRoot[:],
|
||||
ColumnIndex: col,
|
||||
BlockRoot: blockProcessedData.BlockRoot[:],
|
||||
Index: col,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -479,7 +479,7 @@ func (d *dataColumnSampler1D) sampleDataColumnsFromPeer(
|
||||
// TODO: Once peer sampling is used, we should verify all sampled data columns in a single batch instead of looping over columns.
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
if verifyColumn(roDataColumn, blockProcessedData, pid, requestedColumns, d.columnVerifier) {
|
||||
retrievedColumns[roDataColumn.ColumnIndex] = true
|
||||
retrievedColumns[roDataColumn.Index] = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -578,7 +578,7 @@ func verifyColumn(
|
||||
requestedColumns map[uint64]bool,
|
||||
dataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) bool {
|
||||
retrievedColumn := roDataColumn.ColumnIndex
|
||||
retrievedColumn := roDataColumn.Index
|
||||
|
||||
// Filter out columns with incorrect root.
|
||||
columnRoot := roDataColumn.BlockRoot()
|
||||
|
||||
@@ -95,12 +95,12 @@ func createAndConnectPeer(
|
||||
|
||||
for _, identifier := range *req {
|
||||
// Filter out the columns not to respond.
|
||||
if columnsNotToRespond[identifier.ColumnIndex] {
|
||||
if columnsNotToRespond[identifier.Index] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the response.
|
||||
resp := dataColumnSidecars[identifier.ColumnIndex]
|
||||
resp := dataColumnSidecars[identifier.Index]
|
||||
|
||||
// Send the response.
|
||||
err := WriteDataColumnSidecarChunk(stream, chainService, p2pService.Encoding(), resp)
|
||||
@@ -182,7 +182,8 @@ func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTes
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := dataColumnSidecars[0].GetSignedBlockHeader().Header.HashTreeRoot()
|
||||
|
||||
@@ -414,7 +414,8 @@ func TestRequestDataColumnSidecarsByRoot(t *testing.T) {
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBlock, blobs)
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Calculate block root
|
||||
@@ -613,11 +614,11 @@ func TestRequestDataColumnSidecarsByRoot(t *testing.T) {
|
||||
return expectedColumns[i] < expectedColumns[j]
|
||||
})
|
||||
sort.Slice(responseCols, func(i, j int) bool {
|
||||
return responseCols[i].DataColumnSidecar.ColumnIndex < responseCols[j].DataColumnSidecar.ColumnIndex
|
||||
return responseCols[i].DataColumnSidecar.Index < responseCols[j].DataColumnSidecar.Index
|
||||
})
|
||||
|
||||
for i := range responseCols {
|
||||
require.Equal(t, expectedColumns[i], responseCols[i].DataColumnSidecar.ColumnIndex)
|
||||
require.Equal(t, expectedColumns[i], responseCols[i].DataColumnSidecar.Index)
|
||||
}
|
||||
|
||||
// Verify peer request optimization
|
||||
@@ -670,7 +671,7 @@ func createAndConnectCustodyPeer(t *testing.T, setup peerSetup, dataColumnSideca
|
||||
if tracker != nil {
|
||||
requestedColumns := make([]uint64, 0, len(*req))
|
||||
for _, identifier := range *req {
|
||||
requestedColumns = append(requestedColumns, identifier.ColumnIndex)
|
||||
requestedColumns = append(requestedColumns, identifier.Index)
|
||||
}
|
||||
tracker.trackRequest(setup.offset, requestedColumns)
|
||||
}
|
||||
@@ -692,14 +693,14 @@ func createAndConnectCustodyPeer(t *testing.T, setup peerSetup, dataColumnSideca
|
||||
|
||||
for _, identifier := range *req {
|
||||
// Check if this column should be skipped using direct map lookup
|
||||
if skipColumns[identifier.ColumnIndex] {
|
||||
if skipColumns[identifier.Index] {
|
||||
continue
|
||||
}
|
||||
|
||||
if !peerInfo.CustodyColumns[identifier.ColumnIndex] {
|
||||
if !peerInfo.CustodyColumns[identifier.Index] {
|
||||
continue
|
||||
}
|
||||
col := dataColumnSidecars[identifier.ColumnIndex]
|
||||
col := dataColumnSidecars[identifier.Index]
|
||||
if err := WriteDataColumnSidecarChunk(stream, chainService, peerP2P.Encoding(), col); err != nil {
|
||||
log.WithError(err).Error("Failed to write data column sidecar chunk")
|
||||
closeStream(stream, log)
|
||||
|
||||
@@ -48,7 +48,6 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_paulbellamy_ratecounter//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -81,6 +81,7 @@ type blocksFetcherConfig struct {
|
||||
peerFilterCapacityWeight float64
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewDataColumnsVerifier
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
@@ -100,6 +101,7 @@ type blocksFetcher struct {
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewDataColumnsVerifier
|
||||
blocksPerPeriod uint64
|
||||
@@ -162,6 +164,7 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
bs: cfg.bs,
|
||||
dcs: cfg.dcs,
|
||||
bv: cfg.bv,
|
||||
cv: cfg.cv,
|
||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||
@@ -824,11 +827,11 @@ func (f *blocksFetcher) missingColumnsFromRoot(
|
||||
root := roblock.Root()
|
||||
|
||||
// Retrieve the summary for the root.
|
||||
summary := f.bs.Summary(root)
|
||||
summary := f.dcs.Summary(root)
|
||||
|
||||
// Compute the set of missing columns.
|
||||
for column := range custodyColumns {
|
||||
if !summary.HasDataColumnIndex(column) {
|
||||
if !summary.HasIndex(column) {
|
||||
if _, ok := missingColumnsByRoot[root]; !ok {
|
||||
missingColumnsByRoot[root] = make(map[uint64]bool)
|
||||
}
|
||||
@@ -1086,7 +1089,7 @@ func (f *blocksFetcher) fetchDataColumnsFromPeers(
|
||||
func sortBwbsByColumnIndex(bwbs []blocks.BlockWithROBlobs) {
|
||||
for _, bwb := range bwbs {
|
||||
sort.Slice(bwb.Columns, func(i, j int) bool {
|
||||
return bwb.Columns[i].ColumnIndex < bwb.Columns[j].ColumnIndex
|
||||
return bwb.Columns[i].Index < bwb.Columns[j].Index
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1277,7 +1280,7 @@ func (f *blocksFetcher) processDataColumns(
|
||||
bwbs[index].Columns = append(bwbs[index].Columns, dataColumn)
|
||||
}
|
||||
// Remove the column from the missing columns.
|
||||
delete(missingColumnsByRoot[blockRoot], dataColumn.ColumnIndex)
|
||||
delete(missingColumnsByRoot[blockRoot], dataColumn.Index)
|
||||
if len(missingColumnsByRoot[blockRoot]) == 0 {
|
||||
delete(missingColumnsByRoot, blockRoot)
|
||||
}
|
||||
|
||||
@@ -1106,7 +1106,7 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk, 0)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 3, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
@@ -1123,7 +1123,7 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk, 0)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
@@ -1140,7 +1140,7 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk, 0)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
@@ -1159,7 +1159,7 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk, 0)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
@@ -1264,7 +1264,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
r1: {0, 1},
|
||||
r7: {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk, 0)
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(bwb[i1].Blobs))
|
||||
@@ -1435,11 +1435,11 @@ func createAndConnectPeer(
|
||||
dataColumn := dataColumnsSidecar[responseParams.columnIndex]
|
||||
|
||||
// Alter the data column if needed.
|
||||
initialValue0, initialValue1 := dataColumn.DataColumn[0][0], dataColumn.DataColumn[0][1]
|
||||
initialValue0, initialValue1 := dataColumn.Column[0][0], dataColumn.Column[0][1]
|
||||
|
||||
if responseParams.alterate {
|
||||
dataColumn.DataColumn[0][0] = 0
|
||||
dataColumn.DataColumn[0][1] = 0
|
||||
dataColumn.Column[0][0] = 0
|
||||
dataColumn.Column[0][1] = 0
|
||||
}
|
||||
|
||||
// Send the response.
|
||||
@@ -1448,8 +1448,8 @@ func createAndConnectPeer(
|
||||
|
||||
if responseParams.alterate {
|
||||
// Restore the data column.
|
||||
dataColumn.DataColumn[0][0] = initialValue0
|
||||
dataColumn.DataColumn[0][1] = initialValue1
|
||||
dataColumn.Column[0][0] = initialValue0
|
||||
dataColumn.Column[0][1] = initialValue1
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1771,7 +1771,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
|
||||
// What data columns do we store for the block in the same position in blocksParams.
|
||||
// len(storedDataColumns) has to be the same than len(blocksParams).
|
||||
storedDataColumns []map[int]bool
|
||||
storedDataColumns []map[uint64]bool
|
||||
|
||||
// Each item in the list represents a peer.
|
||||
// We can specify what the peer will respond to each data column by range request.
|
||||
@@ -1839,7 +1839,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
{slot: 32, hasBlobs: false},
|
||||
{slot: 33, hasBlobs: true},
|
||||
},
|
||||
storedDataColumns: []map[int]bool{
|
||||
storedDataColumns: []map[uint64]bool{
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
@@ -1866,7 +1866,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
{slot: 38, hasBlobs: true},
|
||||
{slot: 39, hasBlobs: false},
|
||||
},
|
||||
storedDataColumns: []map[int]bool{
|
||||
storedDataColumns: []map[uint64]bool{
|
||||
nil, // Slot 25
|
||||
nil, // Slot 27
|
||||
nil, // Slot 32
|
||||
@@ -1967,7 +1967,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
{slot: 35, hasBlobs: false},
|
||||
{slot: 36, hasBlobs: true},
|
||||
},
|
||||
storedDataColumns: []map[int]bool{
|
||||
storedDataColumns: []map[uint64]bool{
|
||||
{6: true, 38: true}, // Slot 33
|
||||
{6: true, 38: true}, // Slot 34
|
||||
nil, // Slot 35
|
||||
@@ -2017,7 +2017,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
blocksParams: []blockParams{
|
||||
{slot: 38, hasBlobs: true},
|
||||
},
|
||||
storedDataColumns: []map[int]bool{{38: true, 102: true}},
|
||||
storedDataColumns: []map[uint64]bool{{38: true, 102: true}},
|
||||
peersParams: []peerParams{
|
||||
{
|
||||
cgc: 128,
|
||||
@@ -2048,7 +2048,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
fuluForkEpoch: 1,
|
||||
currentSlot: 40,
|
||||
blocksParams: []blockParams{{slot: 38, hasBlobs: true}},
|
||||
storedDataColumns: []map[int]bool{{38: true, 102: true}},
|
||||
storedDataColumns: []map[uint64]bool{{38: true, 102: true}},
|
||||
peersParams: []peerParams{
|
||||
{
|
||||
cgc: 128,
|
||||
@@ -2076,7 +2076,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
fuluForkEpoch: 1,
|
||||
currentSlot: 40,
|
||||
blocksParams: []blockParams{{slot: 38, hasBlobs: true}},
|
||||
storedDataColumns: []map[int]bool{{38: true, 102: true}},
|
||||
storedDataColumns: []map[uint64]bool{{38: true, 102: true}},
|
||||
peersParams: []peerParams{
|
||||
{
|
||||
cgc: 128,
|
||||
@@ -2101,7 +2101,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
{slot: 32, hasBlobs: true}, {slot: 33, hasBlobs: true}, {slot: 34, hasBlobs: true}, {slot: 35, hasBlobs: true}, // 4
|
||||
{slot: 36, hasBlobs: true}, {slot: 37, hasBlobs: true}, // 6
|
||||
},
|
||||
storedDataColumns: []map[int]bool{
|
||||
storedDataColumns: []map[uint64]bool{
|
||||
nil, nil, nil, nil, // 4
|
||||
nil, nil, // 6
|
||||
|
||||
@@ -2187,7 +2187,8 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbDataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
pbDataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsSidecarFromSlot[blockParams.slot] = pbDataColumnsSidecar
|
||||
@@ -2216,11 +2217,11 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
params.BeaconConfig().FuluForkEpoch = tc.fuluForkEpoch
|
||||
|
||||
// Save the blocks in the store.
|
||||
storage := make(map[[fieldparams.RootLength]byte][]int)
|
||||
storage := make(map[[fieldparams.RootLength]byte][]uint64)
|
||||
for index, columns := range tc.storedDataColumns {
|
||||
root := roBlocks[index].Root()
|
||||
|
||||
columnsSlice := make([]int, 0, len(columns))
|
||||
columnsSlice := make([]uint64, 0, len(columns))
|
||||
for column := range columns {
|
||||
columnsSlice = append(columnsSlice, column)
|
||||
}
|
||||
@@ -2228,7 +2229,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
storage[root] = columnsSlice
|
||||
}
|
||||
|
||||
blobStorageSummarizer := filesystem.NewMockBlobStorageSummarizer(t, storage, tc.fuluForkEpoch)
|
||||
dataColumnStorageSummarizer := filesystem.NewMockDataColumnStorageSummarizer(t, storage)
|
||||
|
||||
// Create a chain and a clock.
|
||||
chain, clock := defaultMockChain(t, tc.currentSlot)
|
||||
@@ -2274,7 +2275,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
clock: clock,
|
||||
ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Fulu},
|
||||
p2p: p2pSvc,
|
||||
bs: blobStorageSummarizer,
|
||||
dcs: dataColumnStorageSummarizer,
|
||||
cv: newDataColumnsVerifierFromInitializer(ini),
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
|
||||
@@ -73,6 +73,7 @@ type blocksQueueConfig struct {
|
||||
db db.ReadOnlyDatabase
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewDataColumnsVerifier
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
@@ -118,6 +119,7 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
|
||||
db: cfg.db,
|
||||
clock: cfg.clock,
|
||||
bs: cfg.bs,
|
||||
dcs: cfg.dcs,
|
||||
bv: cfg.bv,
|
||||
cv: cfg.cv,
|
||||
custodyInfo: cfg.custodyInfo,
|
||||
|
||||
@@ -81,6 +81,7 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S
|
||||
highestExpectedSlot: highestSlot,
|
||||
mode: mode,
|
||||
bs: s.cfg.BlobStorage,
|
||||
dcs: s.cfg.DataColumnStorage,
|
||||
cv: s.newDataColumnsVerifier,
|
||||
custodyInfo: s.cfg.CustodyInfo,
|
||||
}
|
||||
@@ -187,6 +188,7 @@ func (s *Service) processFetchedDataRegSync(
|
||||
|
||||
blobBatchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
lazilyPersistentStore := das.NewLazilyPersistentStore(s.cfg.BlobStorage, blobBatchVerifier)
|
||||
lazilyPersistentStoreColumn := das.NewLazilyPersistentStoreColumn(s.cfg.DataColumnStorage, s.cfg.P2P.NodeID(), s.cfg.CustodyInfo)
|
||||
|
||||
log := log.WithField("firstSlot", data.bwb[0].Block.Block().Slot())
|
||||
|
||||
@@ -198,7 +200,9 @@ func (s *Service) processFetchedDataRegSync(
|
||||
for _, b := range preFuluBwbs {
|
||||
log := logPre.WithFields(syncFields(b.Block))
|
||||
|
||||
if err := lazilyPersistentStore.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(b.Blobs)
|
||||
|
||||
if err := lazilyPersistentStore.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
log.WithError(err).Warning("Batch failure due to BlobSidecar issues")
|
||||
return
|
||||
}
|
||||
@@ -222,12 +226,12 @@ func (s *Service) processFetchedDataRegSync(
|
||||
logPost = log.WithField("firstUnprocessed", postFuluBwbs[0].Block.Block().Slot())
|
||||
}
|
||||
|
||||
lazilyPersistentStoreColumn := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, s.cfg.CustodyInfo)
|
||||
|
||||
for _, b := range postFuluBwbs {
|
||||
log := logPost.WithFields(syncFields(b.Block))
|
||||
|
||||
if err := lazilyPersistentStoreColumn.PersistColumns(s.clock.CurrentSlot(), b.Columns...); err != nil {
|
||||
sicecars := blocks.NewSidecarsFromDataColumnSidecars(b.Columns)
|
||||
|
||||
if err := lazilyPersistentStoreColumn.Persist(s.clock.CurrentSlot(), sicecars...); err != nil {
|
||||
log.WithError(err).Warning("Batch failure due to DataColumnSidecar issues")
|
||||
return
|
||||
}
|
||||
@@ -387,7 +391,9 @@ func (s *Service) processPreFuluBatchedBlocks(
|
||||
continue
|
||||
}
|
||||
|
||||
if err := persistentStore.Persist(s.clock.CurrentSlot(), bwb.Blobs...); err != nil {
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(bwb.Blobs)
|
||||
|
||||
if err := persistentStore.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
return errors.Wrap(err, "persisting blobs")
|
||||
}
|
||||
}
|
||||
@@ -412,14 +418,16 @@ func (s *Service) processPostFuluBatchedBlocks(
|
||||
return nil
|
||||
}
|
||||
|
||||
persistentStoreColumn := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, s.cfg.CustodyInfo)
|
||||
persistentStoreColumn := das.NewLazilyPersistentStoreColumn(s.cfg.DataColumnStorage, s.cfg.P2P.NodeID(), s.cfg.CustodyInfo)
|
||||
s.logBatchSyncStatus(genesis, firstBlock, bwbCount)
|
||||
for _, bwb := range bwbs {
|
||||
if len(bwb.Columns) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := persistentStoreColumn.PersistColumns(s.clock.CurrentSlot(), bwb.Columns...); err != nil {
|
||||
sidecars := blocks.NewSidecarsFromDataColumnSidecars(bwb.Columns)
|
||||
|
||||
if err := persistentStoreColumn.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
return errors.Wrap(err, "persisting columns")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
"github.com/pkg/errors"
|
||||
@@ -57,6 +56,7 @@ type Config struct {
|
||||
ClockWaiter startup.ClockWaiter
|
||||
InitialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
@@ -344,10 +344,13 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
}
|
||||
shufflePeers(pids)
|
||||
for i := range pids {
|
||||
sidecars, err := sync.SendBlobSidecarByRoot(s.ctx, s.clock, s.cfg.P2P, pids[i], s.ctxMap, &req, rob.Block().Slot())
|
||||
blobSidecars, err := sync.SendBlobSidecarByRoot(s.ctx, s.clock, s.cfg.P2P, pids[i], s.ctxMap, &req, rob.Block().Slot())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
if len(sidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
@@ -358,9 +361,7 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// node ID is not used for checking blobs data availability.
|
||||
emptyNodeID := enode.ID{}
|
||||
if err := avs.IsDataAvailable(s.ctx, emptyNodeID, current, rob); err != nil {
|
||||
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Blobs from peer for origin block were unusable")
|
||||
continue
|
||||
}
|
||||
@@ -395,7 +396,7 @@ func (s *Service) fetchOriginColumns(pids []peer.ID) error {
|
||||
rob,
|
||||
s.cfg.P2P.NodeID(),
|
||||
custodyGroupCount,
|
||||
s.cfg.BlobStorage,
|
||||
s.cfg.DataColumnStorage,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -404,31 +405,22 @@ func (s *Service) fetchOriginColumns(pids []peer.ID) error {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).Debug("All columns for checkpoint block are present")
|
||||
return nil
|
||||
}
|
||||
sidecars, err := sync.RequestDataColumnSidecarsByRoot(
|
||||
s.ctx,
|
||||
missingColumns,
|
||||
rob,
|
||||
r,
|
||||
pids,
|
||||
s.clock,
|
||||
s.cfg.P2P,
|
||||
s.ctxMap,
|
||||
s.newDataColumnsVerifier,
|
||||
)
|
||||
dataColumnSidecars, err := sync.RequestDataColumnSidecarsByRoot(s.ctx, missingColumns, rob, r, pids, s.clock, s.cfg.P2P, s.ctxMap, s.newDataColumnsVerifier)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "request data column sidecars by root")
|
||||
}
|
||||
|
||||
sidecars := blocks.NewSidecarsFromDataColumnSidecars(dataColumnSidecars)
|
||||
|
||||
// FIXME: It's not clear that the caching layer is doing anything here or in
|
||||
// fetchOriginBlobs, which is presumably where this logic was derived from.
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, s.cfg.CustodyInfo)
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.DataColumnStorage, s.cfg.P2P.NodeID(), s.cfg.CustodyInfo)
|
||||
current := s.clock.CurrentSlot()
|
||||
if err := avs.PersistColumns(current, sidecars...); err != nil {
|
||||
if err := avs.Persist(current, sidecars...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(s.ctx, nodeID, current, rob); err != nil {
|
||||
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
|
||||
return fmt.Errorf("couldn't assemble the required columns from peers for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
|
||||
@@ -89,6 +89,13 @@ var (
|
||||
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||
},
|
||||
)
|
||||
rpcDataColumnsByRangeResponseLatency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "rpc_data_columns_by_range_response_latency_milliseconds",
|
||||
Help: "Captures total time to respond to rpc DataColumnsByRange requests in a milliseconds distribution",
|
||||
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||
},
|
||||
)
|
||||
arrivalBlockPropagationHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "block_arrival_latency_milliseconds",
|
||||
|
||||
@@ -173,6 +173,14 @@ func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage gives the sync package direct access to DataColumnStorage.
|
||||
func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.dataColumnStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithVerifierWaiter gives the sync package direct access to the verifier waiter.
|
||||
func WithVerifierWaiter(v *verification.InitializerWaiter) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -213,7 +213,7 @@ func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.Rea
|
||||
b,
|
||||
s.cfg.p2p.NodeID(),
|
||||
s.cfg.custodyInfo.CustodyGroupSamplingSize(peerdas.Target),
|
||||
s.cfg.blobStorage,
|
||||
s.cfg.dataColumnStorage,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -83,7 +83,7 @@ func (s *Service) sendBeaconBlocksRequest(
|
||||
blk,
|
||||
s.cfg.p2p.NodeID(),
|
||||
s.cfg.custodyInfo.CustodyGroupSamplingSize(peerdas.Actual),
|
||||
s.cfg.blobStorage,
|
||||
s.cfg.dataColumnStorage,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find missing data columns")
|
||||
@@ -242,7 +242,7 @@ func (s *Service) requestAndSaveDataColumnSidecars(
|
||||
return errors.Wrap(err, "request data column sidecars")
|
||||
}
|
||||
|
||||
if err := SaveDataColumns(sidecars, s.cfg.blobStorage); err != nil {
|
||||
if err := SaveDataColumns(sidecars, s.cfg.dataColumnStorage); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
|
||||
@@ -31,32 +31,13 @@ func (s *Service) streamDataColumnBatch(ctx context.Context, batch blockBatch, w
|
||||
// Get the block blockRoot.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Retrieve stored data columns indices for this block root.
|
||||
summary := s.cfg.blobStorage.Summary(blockRoot)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
storedDataColumnsIndices := make(map[uint64]bool, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if summary.HasDataColumnIndex(i) {
|
||||
storedDataColumnsIndices[i] = true
|
||||
}
|
||||
verifiedRODataColumns, err := s.cfg.dataColumnStorage.Get(blockRoot, wantedDataColumnIndices)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return wQuota, errors.Wrapf(err, "get data column sidecars: block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
for _, dataColumnIndex := range wantedDataColumnIndices {
|
||||
isDataColumnStored := storedDataColumnsIndices[dataColumnIndex]
|
||||
|
||||
// Skip if the data column is not stored.
|
||||
if !isDataColumnStored {
|
||||
continue
|
||||
}
|
||||
|
||||
// We won't check for file not found since the .Indices method should normally prevent that from happening.
|
||||
verifiedRODataColumn, err := s.cfg.blobStorage.GetColumn(blockRoot, dataColumnIndex)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return wQuota, errors.Wrapf(err, "could not retrieve data column sidecar: index %d, block root %#x", dataColumnIndex, blockRoot)
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), verifiedRODataColumn.DataColumnSidecar); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
@@ -153,11 +134,11 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
for batch, ok = batcher.next(ctx, stream); ok; batch, ok = batcher.next(ctx, stream) {
|
||||
batchStart := time.Now()
|
||||
wQuota, err = s.streamDataColumnBatch(ctx, batch, wQuota, wantedColumns, stream)
|
||||
rpcBlobsByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||
rpcDataColumnsByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// once we have written MAX_REQUEST_BLOB_SIDECARS, we're done serving the request
|
||||
// once the quota is reached, we're done serving the request
|
||||
if wQuota == 0 {
|
||||
break
|
||||
}
|
||||
@@ -178,11 +159,10 @@ func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg i
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the count limit to the number of blobs in a batch.
|
||||
// Set the count limit to the number of data columns in a batch.
|
||||
func columnBatchLimit() uint64 {
|
||||
// TODO: Do something correct
|
||||
return math.MaxUint64
|
||||
// return uint64(flags.Get().BlockBatchLimit) / fieldparams.MaxBlobsPerBlock
|
||||
}
|
||||
|
||||
// TODO: Generalize between data columns and blobs, while the validation parameters used are different they
|
||||
|
||||
@@ -11,14 +11,11 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -60,24 +57,21 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
return errors.Wrap(err, "validate data columns by root request")
|
||||
}
|
||||
|
||||
// Sort the identifiers so that requests for the same blob root will be adjacent, minimizing db lookups.
|
||||
// Sort the identifiers so that requests for the same data columns root will be adjacent, minimizing db lookups.
|
||||
sort.Sort(&requestedColumnIdents)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
requestedColumnsByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
requestedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]uint64)
|
||||
for _, columnIdent := range requestedColumnIdents {
|
||||
var root [fieldparams.RootLength]byte
|
||||
copy(root[:], columnIdent.BlockRoot)
|
||||
requestedColumnsByRoot[root] = append(requestedColumnsByRoot[root], columnIdent.Index)
|
||||
}
|
||||
|
||||
columnIndex := columnIdent.ColumnIndex
|
||||
|
||||
if _, ok := requestedColumnsByRoot[root]; !ok {
|
||||
requestedColumnsByRoot[root] = map[uint64]bool{columnIndex: true}
|
||||
continue
|
||||
}
|
||||
|
||||
requestedColumnsByRoot[root][columnIndex] = true
|
||||
// Sort by column index for each root.
|
||||
for _, columns := range requestedColumnsByRoot {
|
||||
slices.Sort[[]uint64](columns)
|
||||
}
|
||||
|
||||
requestedColumnsByRootLog := make(map[string]interface{})
|
||||
@@ -85,7 +79,7 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
rootStr := fmt.Sprintf("%#x", root)
|
||||
requestedColumnsByRootLog[rootStr] = "all"
|
||||
if uint64(len(columns)) != numberOfColumns {
|
||||
requestedColumnsByRootLog[rootStr] = uint64MapToSortedSlice(columns)
|
||||
requestedColumnsByRootLog[rootStr] = columns
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,75 +93,61 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
cs := s.cfg.clock.CurrentSlot()
|
||||
minReqSlot, err := DataColumnsRPCMinValidSlot(cs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unexpected error computing min valid blob request slot, current_slot=%d", cs)
|
||||
return errors.Wrapf(err, "unexpected error computing min valid data columns request slot, currentSlot=%d", cs)
|
||||
}
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peer": remotePeer,
|
||||
"peer": stream.Conn().RemotePeer(),
|
||||
"columns": requestedColumnsByRootLog,
|
||||
})
|
||||
|
||||
log.Debug("Serving data column sidecar by root request")
|
||||
|
||||
// Subscribe to the data column feed.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.cfg.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
for i := range requestedColumnIdents {
|
||||
count := 0
|
||||
for root, columns := range requestedColumnsByRoot {
|
||||
if err := ctx.Err(); err != nil {
|
||||
closeStream(stream, log)
|
||||
return errors.Wrap(err, "context error")
|
||||
}
|
||||
|
||||
// Throttle request processing to no more than batchSize/sec.
|
||||
if ticker != nil && i != 0 && i%batchSize == 0 {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debug("Throttling data column sidecar request")
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return nil
|
||||
// TODO: Find a more efficient way to throttle requests...
|
||||
for range columns {
|
||||
if ticker != nil && count != 0 && count%batchSize == 0 {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debug("Throttling data column sidecar request")
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
count++
|
||||
}
|
||||
|
||||
s.rateLimiter.add(stream, 1)
|
||||
requestedRoot, requestedIndex := bytesutil.ToBytes32(requestedColumnIdents[i].BlockRoot), requestedColumnIdents[i].ColumnIndex
|
||||
s.rateLimiter.add(stream, int64(len(columns)))
|
||||
|
||||
// TODO: Differentiate between blobs and columns for our storage engine
|
||||
// Retrieve the data column from the database.
|
||||
verifiedRODataColumn, err := s.cfg.blobStorage.GetColumn(requestedRoot, requestedIndex)
|
||||
|
||||
if err != nil && !db.IsNotFound(err) {
|
||||
verifiedRODataColumns, err := s.cfg.dataColumnStorage.Get(root, columns)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return errors.Wrap(err, "get column")
|
||||
return errors.Wrap(err, "get data column sidecars")
|
||||
}
|
||||
|
||||
// If the data column is not found in the db, just skip it.
|
||||
if err != nil && db.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
if verifiedRODataColumn.SignedBlockHeader.Header.Slot < minReqSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
// If any root in the request content references a block earlier than minimum_request_epoch,
|
||||
// peers MAY respond with error code 3: ResourceUnavailable or not include the data column in the response.
|
||||
// note: we are deviating from the spec to allow requests for data column that are before minimum_request_epoch,
|
||||
// up to the beginning of the retention period.
|
||||
if verifiedRODataColumn.SignedBlockHeader.Header.Slot < minReqSlot {
|
||||
s.writeErrorResponseToStream(responseCodeResourceUnavailable, types.ErrDataColumnLTMinRequest.Error(), stream)
|
||||
log.WithError(types.ErrDataColumnLTMinRequest).
|
||||
Debugf("requested data column for block %#x before minimum_request_epoch", requestedColumnIdents[i].BlockRoot)
|
||||
return types.ErrDataColumnLTMinRequest
|
||||
}
|
||||
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), verifiedRODataColumn.DataColumnSidecar); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
return chunkErr
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), verifiedRODataColumn.DataColumnSidecar); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
return chunkErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,9 +26,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errBlobChunkedReadFailure = errors.New("failed to read stream of chunk-encoded blobs")
|
||||
var errBlobUnmarshal = errors.New("Could not unmarshal chunk-encoded blob")
|
||||
|
||||
// Any error from the following declaration block should result in peer downscoring.
|
||||
var (
|
||||
// ErrInvalidFetchedData is used to signal that an error occurred which should result in peer downscoring.
|
||||
@@ -41,6 +38,9 @@ var (
|
||||
errBlobResponseOutOfBounds = errors.Wrap(ErrInvalidFetchedData, "received BlobSidecar with slot outside BlobSidecarsByRangeRequest bounds")
|
||||
errChunkResponseBlockMismatch = errors.Wrap(ErrInvalidFetchedData, "blob block details do not match")
|
||||
errChunkResponseParentMismatch = errors.Wrap(ErrInvalidFetchedData, "parent root for response element doesn't match previous element root")
|
||||
errBlobChunkedReadFailure = errors.New("failed to read stream of chunk-encoded blobs")
|
||||
errBlobUnmarshal = errors.New("Could not unmarshal chunk-encoded blob")
|
||||
errDataColumnChunkedReadFailure = errors.New("failed to read stream of chunk-encoded data columns")
|
||||
)
|
||||
|
||||
// BeaconBlockProcessor defines a block processing function, which allows to start utilizing
|
||||
@@ -317,7 +317,7 @@ func dataColumnIndexValidatorFromRangeReq(req *ethpb.DataColumnSidecarsByRangeRe
|
||||
}
|
||||
|
||||
return func(sc blocks.RODataColumn) bool {
|
||||
columnIndex := sc.ColumnIndex
|
||||
columnIndex := sc.Index
|
||||
|
||||
valid := columnIds[columnIndex]
|
||||
|
||||
@@ -441,7 +441,7 @@ func readChunkedDataColumnSideCar(
|
||||
}
|
||||
|
||||
if statusCode != 0 {
|
||||
return nil, errors.Wrap(errBlobChunkedReadFailure, errMessage)
|
||||
return nil, errors.Wrap(errDataColumnChunkedReadFailure, errMessage)
|
||||
}
|
||||
// Retrieve the fork digest.
|
||||
ctxBytes, err := readContextFromStream(stream)
|
||||
@@ -599,7 +599,7 @@ func dataColumnValidatorFromRootReq(req *p2ptypes.DataColumnSidecarsByRootReq) D
|
||||
columnsIndexFromRoot[blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
|
||||
columnsIndexFromRoot[blockRoot][sc.ColumnIndex] = true
|
||||
columnsIndexFromRoot[blockRoot][sc.Index] = true
|
||||
}
|
||||
|
||||
return func(sc blocks.RODataColumn) bool {
|
||||
@@ -611,7 +611,7 @@ func dataColumnValidatorFromRootReq(req *p2ptypes.DataColumnSidecarsByRootReq) D
|
||||
return false
|
||||
}
|
||||
|
||||
if !columnsIndex[sc.ColumnIndex] {
|
||||
if !columnsIndex[sc.Index] {
|
||||
columnsIndexSlice := make([]uint64, 0, len(columnsIndex))
|
||||
|
||||
for index := range columnsIndex {
|
||||
@@ -624,7 +624,7 @@ func dataColumnValidatorFromRootReq(req *p2ptypes.DataColumnSidecarsByRootReq) D
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"column": sc.ColumnIndex,
|
||||
"column": sc.Index,
|
||||
"reaquestedColumns": columnsIndexSlice,
|
||||
}).Debug("Data column sidecar column index not requested")
|
||||
|
||||
|
||||
@@ -103,6 +103,7 @@ type config struct {
|
||||
clock *startup.Clock
|
||||
stateNotifier statefeed.Notifier
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
@@ -142,7 +143,6 @@ type Service struct {
|
||||
seenBlockCache *lru.Cache
|
||||
seenBlobLock sync.RWMutex
|
||||
seenBlobCache *lru.Cache
|
||||
seenDataColumnLock sync.RWMutex
|
||||
seenDataColumnCache *lru.Cache
|
||||
seenAggregatedAttestationLock sync.RWMutex
|
||||
seenAggregatedAttestationCache *lru.Cache
|
||||
@@ -171,32 +171,21 @@ type Service struct {
|
||||
availableBlocker coverage.AvailableBlocker
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
dataColumsnReconstructionLock sync.Mutex
|
||||
receivedDataColumnsFromRoot *gcache.Cache
|
||||
receivedDataColumnsFromRootLock sync.RWMutex
|
||||
storedDataColumnsFromRoot *gcache.Cache
|
||||
storedDataColumnsFromRootLock sync.RWMutex
|
||||
ctxMap ContextByteVersions
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
const (
|
||||
dataColumnCacheExpiration = 1 * time.Minute
|
||||
dataColumnCacheCleanupInterval = 2 * time.Minute
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
receivedDataColumnsFromRoot: gcache.New(dataColumnCacheExpiration, dataColumnCacheCleanupInterval),
|
||||
storedDataColumnsFromRoot: gcache.New(dataColumnCacheExpiration, dataColumnCacheCleanupInterval),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
|
||||
@@ -95,11 +95,11 @@ func (s *Service) reconstructAndBroadcastBlobsInDataColumn(ctx context.Context,
|
||||
return
|
||||
}
|
||||
|
||||
if s.cfg.blobStorage == nil {
|
||||
log.Warn("Blob storage is not enabled, skip saving data column, but continue to reconstruct and broadcast blobs")
|
||||
if s.cfg.dataColumnStorage == nil {
|
||||
log.Warning("Data column storage is not enabled, skip saving data column, but continue to reconstruct and broadcast data column")
|
||||
}
|
||||
|
||||
// when this function is called, it's from the time when the block is received, so in almost all situations we need to get the data column from EL instead of the blob storage.
|
||||
// When this function is called, it's from the time when the block is received, so in almost all situations we need to get the data column from EL instead of the blob storage.
|
||||
sidecars, err := s.cfg.executionReconstructor.ReconstructDataColumnSidecars(ctx, roSignedBlock, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Cannot reconstruct data column sidecars after receiving the block")
|
||||
@@ -109,8 +109,8 @@ func (s *Service) reconstructAndBroadcastBlobsInDataColumn(ctx context.Context,
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
s.cfg.custodyInfo.Mut.RLock()
|
||||
defer s.cfg.custodyInfo.Mut.RUnlock()
|
||||
samplingSize := s.cfg.custodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
info, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
groupCount := s.cfg.custodyInfo.ActualGroupCount()
|
||||
info, _, err := peerdas.Info(nodeID, groupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get peer info")
|
||||
return
|
||||
@@ -118,12 +118,12 @@ func (s *Service) reconstructAndBroadcastBlobsInDataColumn(ctx context.Context,
|
||||
|
||||
// Broadcast data column and then save to db (if needs to be in custody)
|
||||
for _, sidecar := range sidecars {
|
||||
if !info.CustodyColumns[sidecar.ColumnIndex] {
|
||||
if !info.CustodyColumns[sidecar.Index] {
|
||||
continue
|
||||
}
|
||||
|
||||
// first broadcast the data column
|
||||
if err := s.cfg.p2p.BroadcastDataColumn(ctx, blockRoot, sidecar.ColumnIndex, sidecar.DataColumnSidecar); err != nil {
|
||||
if err := s.cfg.p2p.BroadcastDataColumn(ctx, blockRoot, sidecar.Index, sidecar.DataColumnSidecar); err != nil {
|
||||
log.WithFields(dataColumnFields(sidecar.RODataColumn)).WithError(err).Error("Failed to broadcast data column")
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
@@ -232,7 +231,7 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
|
||||
},
|
||||
Signature: []byte("signature"),
|
||||
},
|
||||
ColumnIndex: uint64(i),
|
||||
Index: uint64(i),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
allColumns[i] = blocks.VerifiedRODataColumn{RODataColumn: rod}
|
||||
@@ -253,7 +252,7 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
|
||||
name: "Constructed 128 data columns with all blobs",
|
||||
blobCount: 1,
|
||||
dataColumnSidecars: allColumns,
|
||||
expectedDataColumnCount: 8, // default is 8
|
||||
expectedDataColumnCount: 4, // default is 4
|
||||
},
|
||||
}
|
||||
|
||||
@@ -271,9 +270,7 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
|
||||
operationNotifier: &chainMock.MockOperationNotifier{},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
seenDataColumnCache: lruwrpr.New(1),
|
||||
receivedDataColumnsFromRoot: gcache.New(1*time.Minute, 2*time.Minute),
|
||||
storedDataColumnsFromRoot: gcache.New(1*time.Minute, 2*time.Minute),
|
||||
seenDataColumnCache: lruwrpr.New(1),
|
||||
}
|
||||
|
||||
kzgCommitments := make([][]byte, 0, tt.blobCount)
|
||||
|
||||
@@ -32,8 +32,7 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
||||
func (s *Service) receiveDataColumn(ctx context.Context, dc blocks.VerifiedRODataColumn) error {
|
||||
slot := dc.SignedBlockHeader.Header.Slot
|
||||
proposerIndex := dc.SignedBlockHeader.Header.ProposerIndex
|
||||
columnIndex := dc.ColumnIndex
|
||||
blockRoot := dc.BlockRoot()
|
||||
columnIndex := dc.Index
|
||||
|
||||
s.setSeenDataColumnIndex(slot, proposerIndex, columnIndex)
|
||||
|
||||
@@ -41,15 +40,6 @@ func (s *Service) receiveDataColumn(ctx context.Context, dc blocks.VerifiedRODat
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
// Mark the data column as both received and stored.
|
||||
if err := s.setReceivedDataColumn(blockRoot, columnIndex); err != nil {
|
||||
return errors.Wrap(err, "set received data column")
|
||||
}
|
||||
|
||||
if err := s.setStoredDataColumn(blockRoot, columnIndex); err != nil {
|
||||
return errors.Wrap(err, "set stored data column")
|
||||
}
|
||||
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.DataColumnSidecarReceived,
|
||||
Data: &opfeed.DataColumnSidecarReceivedData{
|
||||
|
||||
@@ -174,7 +174,7 @@ func dataColumnFields(b blocks.RODataColumn) logrus.Fields {
|
||||
"slot": b.Slot(),
|
||||
"proposerIndex": b.ProposerIndex(),
|
||||
"blockRoot": fmt.Sprintf("%#x", b.BlockRoot()),
|
||||
"columnIndex": b.ColumnIndex,
|
||||
"columnIndex": b.Index,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
@@ -69,7 +69,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
if dataColumnsIgnoreSlotMultiple != 0 && blockSlot%dataColumnsIgnoreSlotMultiple == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockSlot,
|
||||
"columnIndex": roDataColumn.ColumnIndex,
|
||||
"columnIndex": roDataColumn.Index,
|
||||
"blockRoot": fmt.Sprintf("%#x", roDataColumn.BlockRoot()),
|
||||
}).Warning("Voluntary ignore data column sidecar gossip")
|
||||
|
||||
@@ -85,7 +85,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar is for the correct subnet -- i.e. compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id.
|
||||
want := fmt.Sprintf("data_column_sidecar_%d", computeSubnetForColumnSidecar(roDataColumn.ColumnIndex))
|
||||
want := fmt.Sprintf("data_column_sidecar_%d", peerdas.ComputeSubnetForDataColumnSidecar(roDataColumn.Index))
|
||||
if !strings.Contains(*msg.Topic, want) {
|
||||
log.Debug("Column Sidecar index does not match topic")
|
||||
return pubsub.ValidationReject, fmt.Errorf("wrong topic name: %s", *msg.Topic)
|
||||
@@ -96,7 +96,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
}
|
||||
|
||||
// [IGNORE] The sidecar is the first sidecar for the tuple (block_header.slot, block_header.proposer_index, sidecar.index) with valid header signature, sidecar inclusion proof, and kzg proof.
|
||||
if s.hasSeenDataColumnIndex(roDataColumn.Slot(), roDataColumn.ProposerIndex(), roDataColumn.DataColumnSidecar.ColumnIndex) {
|
||||
if s.hasSeenDataColumnIndex(roDataColumn.Slot(), roDataColumn.ProposerIndex(), roDataColumn.DataColumnSidecar.Index) {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
@@ -189,8 +189,6 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
|
||||
// Returns true if the column with the same slot, proposer index, and column index has been seen before.
|
||||
func (s *Service) hasSeenDataColumnIndex(slot primitives.Slot, proposerIndex primitives.ValidatorIndex, index uint64) bool {
|
||||
s.seenDataColumnLock.RLock()
|
||||
defer s.seenDataColumnLock.RUnlock()
|
||||
b := append(bytesutil.Bytes32(uint64(slot)), bytesutil.Bytes32(uint64(proposerIndex))...)
|
||||
b = append(b, bytesutil.Bytes32(index)...)
|
||||
_, seen := s.seenDataColumnCache.Get(string(b))
|
||||
@@ -199,14 +197,7 @@ func (s *Service) hasSeenDataColumnIndex(slot primitives.Slot, proposerIndex pri
|
||||
|
||||
// Sets the data column with the same slot, proposer index, and data column index as seen.
|
||||
func (s *Service) setSeenDataColumnIndex(slot primitives.Slot, proposerIndex primitives.ValidatorIndex, index uint64) {
|
||||
s.seenDataColumnLock.Lock()
|
||||
defer s.seenDataColumnLock.Unlock()
|
||||
|
||||
b := append(bytesutil.Bytes32(uint64(slot)), bytesutil.Bytes32(uint64(proposerIndex))...)
|
||||
b = append(b, bytesutil.Bytes32(index)...)
|
||||
s.seenDataColumnCache.Add(string(b), true)
|
||||
}
|
||||
|
||||
func computeSubnetForColumnSidecar(colIdx uint64) uint64 {
|
||||
return colIdx % params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["blob.go"],
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"data_columns.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
package verify
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
@@ -53,69 +49,3 @@ func BlobAlignsWithBlock(blob blocks.ROBlob, block blocks.ROBlock) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type WrappedBlockDataColumn struct {
|
||||
ROBlock interfaces.ReadOnlyBeaconBlock
|
||||
RODataColumn blocks.RODataColumn
|
||||
}
|
||||
|
||||
func DataColumnsAlignWithBlock(
|
||||
wrappedBlockDataColumns []WrappedBlockDataColumn,
|
||||
dataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) error {
|
||||
for _, wrappedBlockDataColumn := range wrappedBlockDataColumns {
|
||||
dataColumn := wrappedBlockDataColumn.RODataColumn
|
||||
block := wrappedBlockDataColumn.ROBlock
|
||||
|
||||
// Extract the block root from the data column.
|
||||
blockRoot := dataColumn.BlockRoot()
|
||||
|
||||
// Retrieve the KZG commitments from the block.
|
||||
blockKZGCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments from the data column.
|
||||
dataColumnKZGCommitments := dataColumn.KzgCommitments
|
||||
|
||||
// Verify the commitments in the block match the commitments in the data column.
|
||||
if !reflect.DeepEqual(blockKZGCommitments, dataColumnKZGCommitments) {
|
||||
// Retrieve the data columns slot.
|
||||
dataColumSlot := dataColumn.Slot()
|
||||
|
||||
return errors.Wrapf(
|
||||
ErrMismatchedColumnCommitments,
|
||||
"data column commitments `%#v` != block commitments `%#v` for block root %#x at slot %d",
|
||||
dataColumnKZGCommitments,
|
||||
blockKZGCommitments,
|
||||
blockRoot,
|
||||
dataColumSlot,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
dataColumns := make([]blocks.RODataColumn, 0, len(wrappedBlockDataColumns))
|
||||
for _, wrappedBlowrappedBlockDataColumn := range wrappedBlockDataColumns {
|
||||
dataColumn := wrappedBlowrappedBlockDataColumn.RODataColumn
|
||||
dataColumns = append(dataColumns, dataColumn)
|
||||
}
|
||||
|
||||
// Verify if data columns index are in bounds.
|
||||
verifier := dataColumnsVerifier(dataColumns, verification.InitsyncColumnSidecarRequirements)
|
||||
if err := verifier.DataColumnsIndexInBounds(); err != nil {
|
||||
return errors.Wrap(err, "data column index in bounds")
|
||||
}
|
||||
|
||||
// Verify the KZG inclusion proof verification.
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return errors.Wrap(err, "inclusion proof verification")
|
||||
}
|
||||
|
||||
// Verify the KZG proof verification.
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return errors.Wrap(err, "KZG proof verification")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
76
beacon-chain/sync/verify/data_columns.go
Normal file
76
beacon-chain/sync/verify/data_columns.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package verify
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
)
|
||||
|
||||
type WrappedBlockDataColumn struct {
|
||||
ROBlock interfaces.ReadOnlyBeaconBlock
|
||||
RODataColumn blocks.RODataColumn
|
||||
}
|
||||
|
||||
func DataColumnsAlignWithBlock(
|
||||
wrappedBlockDataColumns []WrappedBlockDataColumn,
|
||||
dataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) error {
|
||||
for _, wrappedBlockDataColumn := range wrappedBlockDataColumns {
|
||||
dataColumn := wrappedBlockDataColumn.RODataColumn
|
||||
block := wrappedBlockDataColumn.ROBlock
|
||||
|
||||
// Extract the block root from the data column.
|
||||
blockRoot := dataColumn.BlockRoot()
|
||||
|
||||
// Retrieve the KZG commitments from the block.
|
||||
blockKZGCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments from the data column.
|
||||
dataColumnKZGCommitments := dataColumn.KzgCommitments
|
||||
|
||||
// Verify the commitments in the block match the commitments in the data column.
|
||||
if !reflect.DeepEqual(blockKZGCommitments, dataColumnKZGCommitments) {
|
||||
// Retrieve the data columns slot.
|
||||
dataColumSlot := dataColumn.Slot()
|
||||
|
||||
return errors.Wrapf(
|
||||
ErrMismatchedColumnCommitments,
|
||||
"data column commitments `%#v` != block commitments `%#v` for block root %#x at slot %d",
|
||||
dataColumnKZGCommitments,
|
||||
blockKZGCommitments,
|
||||
blockRoot,
|
||||
dataColumSlot,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
dataColumns := make([]blocks.RODataColumn, 0, len(wrappedBlockDataColumns))
|
||||
for _, wrappedBlowrappedBlockDataColumn := range wrappedBlockDataColumns {
|
||||
dataColumn := wrappedBlowrappedBlockDataColumn.RODataColumn
|
||||
dataColumns = append(dataColumns, dataColumn)
|
||||
}
|
||||
|
||||
// Verify if data columns index are in bounds.
|
||||
verifier := dataColumnsVerifier(dataColumns, verification.InitsyncColumnSidecarRequirements)
|
||||
if err := verifier.DataColumnsIndexInBounds(); err != nil {
|
||||
return errors.Wrap(err, "data column index in bounds")
|
||||
}
|
||||
|
||||
// Verify the KZG inclusion proof verification.
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return errors.Wrap(err, "inclusion proof verification")
|
||||
}
|
||||
|
||||
// Verify the KZG proof verification.
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return errors.Wrap(err, "KZG proof verification")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -39,6 +39,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -121,7 +121,7 @@ func (dv *RODataColumnsVerifier) DataColumnsIndexInBounds() (err error) {
|
||||
defer dv.recordResult(RequireDataColumnIndexInBounds, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
if dataColumn.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
if dataColumn.Index >= fieldparams.NumberOfColumns {
|
||||
fields := logging.DataColumnFields(dataColumn)
|
||||
log.WithFields(fields).Debug("Sidecar index >= NUMBER_OF_COLUMNS")
|
||||
return columnErrBuilder(ErrColumnIndexInvalid)
|
||||
|
||||
@@ -29,7 +29,8 @@ func GenerateTestDataColumns(t *testing.T, parent [fieldparams.RootLength]byte,
|
||||
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(roBlock, blobs)
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(roBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, len(dataColumnSidecars))
|
||||
@@ -72,7 +73,7 @@ func TestDataColumnsIndexInBounds(t *testing.T) {
|
||||
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
for _, column := range columns {
|
||||
column.ColumnIndex = tc.columnsIndex
|
||||
column.Index = tc.columnsIndex
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipColumnSidecarRequirements)
|
||||
|
||||
@@ -3,7 +3,24 @@ package verification
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type (
|
||||
DataColumnParams struct {
|
||||
Slot primitives.Slot
|
||||
ColumnIndex uint64
|
||||
KzgCommitments [][]byte
|
||||
DataColumn []byte // A whole data cell will be filled with the content of one item of this slice.
|
||||
}
|
||||
|
||||
DataColumnsParamsByRoot map[[fieldparams.RootLength]byte][]DataColumnParams
|
||||
)
|
||||
|
||||
// FakeVerifyForTest can be used by tests that need a VerifiedROBlob but don't want to do all the
|
||||
@@ -45,3 +62,63 @@ func FakeVerifyDataColumnSliceForTest(t *testing.T, b []blocks.RODataColumn) []b
|
||||
}
|
||||
return vcs
|
||||
}
|
||||
|
||||
func CreateTestVerifiedRoDataColumnSidecars(t *testing.T, dataColumnParamsByBlockRoot DataColumnsParamsByRoot) ([]blocks.RODataColumn, []blocks.VerifiedRODataColumn) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
count := 0
|
||||
for _, indices := range dataColumnParamsByBlockRoot {
|
||||
count += len(indices)
|
||||
}
|
||||
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||
rodataColumnSidecars := make([]blocks.RODataColumn, 0, count)
|
||||
for blockRoot, params := range dataColumnParamsByBlockRoot {
|
||||
for _, param := range params {
|
||||
dataColumn := make([][]byte, 0, len(param.DataColumn))
|
||||
for _, value := range param.DataColumn {
|
||||
cell := make([]byte, ckzg4844.BytesPerCell)
|
||||
for i := range ckzg4844.BytesPerCell {
|
||||
cell[i] = value
|
||||
}
|
||||
dataColumn = append(dataColumn, cell)
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 4)
|
||||
for i := range kzgCommitmentsInclusionProof {
|
||||
kzgCommitmentsInclusionProof[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
dataColumnSidecar := ðpb.DataColumnSidecar{
|
||||
Index: param.ColumnIndex,
|
||||
KzgCommitments: param.KzgCommitments,
|
||||
Column: dataColumn,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: param.Slot,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
}
|
||||
|
||||
roDataColumnSidecar, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rodataColumnSidecars = append(rodataColumnSidecars, roDataColumnSidecar)
|
||||
|
||||
verifiedRoDataColumnSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumnSidecar)
|
||||
}
|
||||
}
|
||||
|
||||
return rodataColumnSidecars, verifiedRoDataColumnSidecars
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
@@ -22,18 +23,31 @@ func VerifiedROBlobFromDisk(fs afero.Fs, root [32]byte, path string) (blocks.Ver
|
||||
return blocks.NewVerifiedROBlob(ro), nil
|
||||
}
|
||||
|
||||
func VerifiedRODataColumnFromDisk(fs afero.Fs, root [32]byte, path string) (blocks.VerifiedRODataColumn, error) {
|
||||
encoded, err := afero.ReadFile(fs, path)
|
||||
func VerifiedRODataColumnFromDisk(file afero.File, root [fieldparams.RootLength]byte, sszEncodedDataColumnSidecarSize uint32) (blocks.VerifiedRODataColumn, error) {
|
||||
// Read the ssz encoded data column sidecar from the file
|
||||
sszEncodedDataColumnSidecar := make([]byte, sszEncodedDataColumnSidecarSize)
|
||||
count, err := file.Read(sszEncodedDataColumnSidecar)
|
||||
if err != nil {
|
||||
return VerifiedRODataColumnError(err)
|
||||
}
|
||||
s := ðpb.DataColumnSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
if uint32(count) != sszEncodedDataColumnSidecarSize {
|
||||
return VerifiedRODataColumnError(err)
|
||||
}
|
||||
ro, err := blocks.NewRODataColumnWithRoot(s, root)
|
||||
|
||||
// Unmarshal the SSZ encoded data column sidecar.
|
||||
dataColumnSidecar := ðpb.DataColumnSidecar{}
|
||||
if err := dataColumnSidecar.UnmarshalSSZ(sszEncodedDataColumnSidecar); err != nil {
|
||||
return VerifiedRODataColumnError(err)
|
||||
}
|
||||
|
||||
// Create a RO data column.
|
||||
roDataColumnSidecar, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, root)
|
||||
if err != nil {
|
||||
return VerifiedRODataColumnError(err)
|
||||
}
|
||||
return blocks.NewVerifiedRODataColumn(ro), nil
|
||||
|
||||
// Create a verified RO data column.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
|
||||
return verifiedRODataColumn, nil
|
||||
}
|
||||
|
||||
@@ -149,6 +149,7 @@ var appFlags = []cli.Flag{
|
||||
storage.BlobStoragePathFlag,
|
||||
storage.BlobRetentionEpochFlag,
|
||||
storage.BlobStorageLayout,
|
||||
storage.DataColumnStoragePathFlag,
|
||||
bflags.EnableExperimentalBackfill,
|
||||
bflags.BackfillBatchSize,
|
||||
bflags.BackfillWorkerCount,
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// BlobStoragePathFlag defines a flag to start the beacon chain from a give genesis state file.
|
||||
BlobStoragePathFlag = &cli.PathFlag{
|
||||
Name: "blob-path",
|
||||
Usage: "Location for blob storage. Default location will be a 'blobs' directory next to the beacon db.",
|
||||
@@ -30,6 +29,10 @@ var (
|
||||
Usage: layoutFlagUsage(),
|
||||
Value: filesystem.LayoutNameFlat,
|
||||
}
|
||||
DataColumnStoragePathFlag = &cli.PathFlag{
|
||||
Name: "data-column-path",
|
||||
Usage: "Location for data column storage. Default location will be a 'data-columns' directory next to the beacon db.",
|
||||
}
|
||||
)
|
||||
|
||||
func layoutOptions() string {
|
||||
@@ -58,11 +61,17 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts := []node.Option{node.WithBlobStorageOptions(
|
||||
filesystem.WithBlobRetentionEpochs(e),
|
||||
filesystem.WithBasePath(blobStoragePath(c)),
|
||||
filesystem.WithLayout(c.String(BlobStorageLayout.Name)), // This is validated in the Action func for BlobStorageLayout.
|
||||
)}
|
||||
opts := []node.Option{
|
||||
node.WithBlobStorageOptions(
|
||||
filesystem.WithBlobRetentionEpochs(e),
|
||||
filesystem.WithBasePath(blobStoragePath(c)),
|
||||
filesystem.WithLayout(c.String(BlobStorageLayout.Name)), // This is validated in the Action func for BlobStorageLayout.
|
||||
),
|
||||
node.WithDataColumnStorageOptions(
|
||||
filesystem.WithDataColumnRetentionEpochs(e),
|
||||
filesystem.WithDataColumnBasePath(dataColumnStoragePath(c)),
|
||||
),
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
@@ -75,6 +84,15 @@ func blobStoragePath(c *cli.Context) string {
|
||||
return blobsPath
|
||||
}
|
||||
|
||||
func dataColumnStoragePath(c *cli.Context) string {
|
||||
dataColumnsPath := c.Path(DataColumnStoragePathFlag.Name)
|
||||
if dataColumnsPath == "" {
|
||||
// append a "data-columns" subdir to the end of the data dir path
|
||||
dataColumnsPath = path.Join(c.String(cmd.DataDirFlag.Name), "data-columns")
|
||||
}
|
||||
return dataColumnsPath
|
||||
}
|
||||
|
||||
var errInvalidBlobRetentionEpochs = errors.New("value is smaller than spec minimum")
|
||||
|
||||
// blobRetentionEpoch returns the spec default MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user