mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 23:18:15 -05:00
Implement PeerDAS sync (#15564)
* PeerDAS: Implement sync * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Satyajit's comment. * Partially fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Add tests for `sendDataColumnSidecarsRequest`. * Fix Satyajit's comment. * Implement `TestSendDataColumnSidecarsRequest`. * Implement `TestFetchDataColumnSidecarsFromPeers`. * Implement `TestUpdateResults`. * Implement `TestSelectPeers`. * Implement `TestCategorizeIndices`. * Fix James' comment. * Fix James's comment. * Fix James' commit. * Fix James' comment. * Fix James' comment. * Fix flakiness in `TestSelectPeers`. * Update cmd/beacon-chain/flags/config.go Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com> * Fix Preston's comment. * Fix James's comment. * Implement `TestFetchDataColumnSidecars`. * Revert "Fix Potuz's comment." This reverts commitc45230b455. * Fix Potuz's comment. * Revert "Fix James' comment." This reverts commita3f919205a. * Fix James' comment. * Fix Preston's comment. * Fix James' comment. * `selectPeers`: Avoid map with key but empty value. * Fix typo. * Fix Potuz's comment. * Fix Potuz's comment. * Fix James' comment. * Add DataColumnStorage and SubscribeAllDataSubnets flag. * Add extra flags * Fix Potuz's and Preston's comment. * Add rate limiter check. --------- Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com> Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
This commit is contained in:
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
@@ -53,22 +54,24 @@ type Config struct {
|
||||
ClockWaiter startup.ClockWaiter
|
||||
InitialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// Service service.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
genesisTime time.Time
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
genesisTime time.Time
|
||||
}
|
||||
|
||||
// Option is a functional option for the initial-sync Service.
|
||||
@@ -149,6 +152,7 @@ func (s *Service) Start() {
|
||||
return
|
||||
}
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
s.newDataColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
gt := clock.GenesisTime()
|
||||
if gt.IsZero() {
|
||||
@@ -175,19 +179,22 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.chainStarted.Set()
|
||||
log.Info("Starting initial chain sync...")
|
||||
|
||||
// Are we already in sync, or close to it?
|
||||
if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) {
|
||||
log.Info("Already synced to the current chain head")
|
||||
s.markSynced()
|
||||
return
|
||||
}
|
||||
|
||||
peers, err := s.waitForMinimumPeers()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error waiting for minimum number of peers")
|
||||
return
|
||||
}
|
||||
if err := s.fetchOriginBlobs(peers); err != nil {
|
||||
log.WithError(err).Error("Failed to fetch missing blobs for checkpoint origin")
|
||||
|
||||
if err := s.fetchOriginSidecars(peers); err != nil {
|
||||
log.WithError(err).Error("Error fetching origin sidecars")
|
||||
return
|
||||
}
|
||||
if err := s.roundRobinSync(); err != nil {
|
||||
@@ -200,6 +207,48 @@ func (s *Service) Start() {
|
||||
s.markSynced()
|
||||
}
|
||||
|
||||
// fetchOriginSidecars fetches origin sidecars
|
||||
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
block, err := s.cfg.DB.Block(s.ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "block")
|
||||
}
|
||||
|
||||
currentSlot, blockSlot := s.clock.CurrentSlot(), block.Block().Slot()
|
||||
currentEpoch, blockEpoch := slots.ToEpoch(currentSlot), slots.ToEpoch(blockSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
blockVersion := roBlock.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.fetchOriginColumns(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin columns")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := s.fetchOriginBlobs(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin blobs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop initial sync.
|
||||
func (s *Service) Stop() error {
|
||||
s.cancel()
|
||||
@@ -304,23 +353,9 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
r, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
}
|
||||
blk, err := s.cfg.DB.Block(s.ctx, r)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).Error("Block for checkpoint sync origin root not found in db")
|
||||
return err
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(s.clock.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
rob, err := blocks.NewROBlockWithRoot(blk, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
r := rob.Root()
|
||||
|
||||
req, err := missingBlobRequest(rob, s.cfg.BlobStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -335,16 +370,17 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(blobSidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
current := s.clock.CurrentSlot()
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
if err := avs.Persist(current, sidecars...); err != nil {
|
||||
if err := avs.Persist(current, blobSidecars...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Blobs from peer for origin block were unusable")
|
||||
continue
|
||||
@@ -355,6 +391,67 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early if the origin block has no blob commitments.
|
||||
commitments, err := roBlock.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch blob commitments")
|
||||
}
|
||||
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the columns to request.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
info, _, err := peerdas.Info(s.cfg.P2P.NodeID(), samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch peer info")
|
||||
}
|
||||
|
||||
// Fetch origin data column sidecars.
|
||||
root := roBlock.Root()
|
||||
|
||||
params := sync.DataColumnSidecarsParams{
|
||||
Ctx: s.ctx,
|
||||
Tor: s.clock,
|
||||
P2P: s.cfg.P2P,
|
||||
CtxMap: s.ctxMap,
|
||||
Storage: s.cfg.DataColumnStorage,
|
||||
NewVerifier: s.newDataColumnsVerifier,
|
||||
}
|
||||
|
||||
verfifiedRoDataColumnsByRoot, err := sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
// Save origin data columns to disk.
|
||||
verifiedRoDataColumnsSidecars, ok := verfifiedRoDataColumnsByRoot[root]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
|
||||
}
|
||||
|
||||
if err := s.cfg.DataColumnStorage.Save(verifiedRoDataColumnsSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"blobCount": len(commitments),
|
||||
"columnCount": len(verifiedRoDataColumnsSidecars),
|
||||
}).Info("Successfully downloaded data columns for checkpoint sync block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func shufflePeers(pids []peer.ID) {
|
||||
rg := rand.NewGenerator()
|
||||
rg.Shuffle(len(pids), func(i, j int) {
|
||||
@@ -367,3 +464,9 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier {
|
||||
return func(roDataColumns []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return ini.NewDataColumnsVerifier(roDataColumns, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user