mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
27 Commits
fix-ethspe
...
backfill-i
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2049422a9d | ||
|
|
8c7f4d8697 | ||
|
|
c941e47b7e | ||
|
|
54991bbc52 | ||
|
|
7e32bbc199 | ||
|
|
a1be3d68cd | ||
|
|
88af3f90a5 | ||
|
|
600169a53b | ||
|
|
a5e4fccb47 | ||
|
|
e589588f47 | ||
|
|
41884d8d9d | ||
|
|
db074cbf12 | ||
|
|
360e89767f | ||
|
|
238d5c07df | ||
|
|
2292d955a3 | ||
|
|
76bc30e8ba | ||
|
|
a5c7c6da06 | ||
|
|
4b09dd4aa5 | ||
|
|
1dab5a9f8a | ||
|
|
d681232fe6 | ||
|
|
1d24f89c96 | ||
|
|
967193e6a2 | ||
|
|
9e40551852 | ||
|
|
a8cab58f7e | ||
|
|
df86f57507 | ||
|
|
9b0a3e9632 | ||
|
|
5e079aa62c |
@@ -290,3 +290,9 @@ type GetProposerLookaheadResponse struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` // validator indexes
|
||||
}
|
||||
|
||||
type GetBlobsResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` //blobs
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ go_library(
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"setup_forchoice.go",
|
||||
"setup_forkchoice.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
@@ -50,7 +50,6 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -63,6 +62,7 @@ go_library(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -148,7 +148,6 @@ go_test(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -161,6 +160,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
|
||||
@@ -14,7 +14,10 @@ const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
const (
|
||||
BytesPerCell = ckzg4844.BytesPerCell
|
||||
BytesPerProof = ckzg4844.BytesPerProof
|
||||
)
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
@@ -23,7 +26,7 @@ type Cell [BytesPerCell]byte
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
type Proof [BytesPerProof]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -35,7 +35,7 @@ func WithMaxGoroutines(x int) Option {
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -247,7 +247,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
pendingNodes[i] = args
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -284,14 +284,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
// Insert all nodes but the last one to forkchoice
|
||||
// Insert all nodes to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
||||
@@ -664,14 +660,14 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
signedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
roBlock consensusblocks.ROBlock,
|
||||
) error {
|
||||
block := signedBlock.Block()
|
||||
block := roBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
@@ -691,8 +687,6 @@ func (s *Service) areDataColumnsAvailable(
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
@@ -726,6 +720,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
|
||||
@@ -3,14 +3,12 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
@@ -133,35 +131,26 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
})
|
||||
}
|
||||
|
||||
// processLightClientUpdates saves the light client data in lcStore, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
if err := s.processLightClientUpdate(cfg); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client update")
|
||||
}
|
||||
if err := s.processLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client optimistic update")
|
||||
}
|
||||
if err := s.processLightClientFinalityUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to process light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
// processLightClientUpdate saves the light client update for this block
|
||||
// if it's better than the already saved one, when feature flag is enabled.
|
||||
func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
attestedRoot := cfg.roblock.Block().ParentRoot()
|
||||
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
if attestedBlock == nil || attestedBlock.IsNil() {
|
||||
return errors.New("attested block is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested block")
|
||||
return
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
if attestedState == nil || attestedState.IsNil() {
|
||||
return errors.New("attested state is nil")
|
||||
log.Error("processLightClientUpdates: Could not get attested state")
|
||||
return
|
||||
}
|
||||
|
||||
finalizedRoot := attestedState.FinalizedCheckpoint().Root
|
||||
@@ -169,98 +158,17 @@ func (s *Service) processLightClientUpdate(cfg *postBlockProcessConfig) error {
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping saving light client update because finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not get finalized block")
|
||||
return
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientUpdateFromBeaconState(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock)
|
||||
err = s.lcStore.SaveLCData(cfg.ctx, cfg.postState, cfg.roblock, attestedState, attestedBlock, finalizedBlock, s.headRoot())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create light client update")
|
||||
log.WithError(err).Error("processLightClientUpdates: Could not save light client data")
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(attestedState.Slot()))
|
||||
|
||||
return s.lcStore.SaveLightClientUpdate(cfg.ctx, period, update)
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientFinalityUpdate(
|
||||
ctx context.Context,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState,
|
||||
) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
|
||||
|
||||
if finalizedCheckpoint == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckpoint.Root)
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping processing light client finality update: Finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock, finalizedBlock)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create light client finality update")
|
||||
}
|
||||
|
||||
if !lightclient.IsBetterFinalityUpdate(newUpdate, s.lcStore.LastFinalityUpdate()) {
|
||||
log.Debug("Skip saving light client finality update: current update is better")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
postState state.BeaconState) error {
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
newUpdate, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, postState, signed, attestedState, attestedBlock)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
|
||||
log.WithError(err).Debug("Skipping processing light client optimistic update")
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "could not create light client optimistic update")
|
||||
}
|
||||
|
||||
if !lightclient.IsBetterOptimisticUpdate(newUpdate, s.lcStore.LastOptimisticUpdate()) {
|
||||
log.Debug("Skip saving light client optimistic update: current update is better")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
log.Debug("Processed light client updates")
|
||||
}
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
@@ -469,15 +377,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
root := signed.Block().ParentRoot()
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := roblock.Block().ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -496,12 +397,13 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
}
|
||||
if len(pendingNodes) == 1 {
|
||||
if len(pendingNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return ErrNotDescendantOfFinalized
|
||||
}
|
||||
slices.Reverse(pendingNodes)
|
||||
return s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -24,6 +23,7 @@ import (
|
||||
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations/kv"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -2795,6 +2795,11 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
s, tr := minimalTestService(t, WithLCStore())
|
||||
ctx := tr.ctx
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, headState, [32]byte{1, 2}))
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
for testVersion := version.Altair; testVersion <= version.Electra; testVersion++ {
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
@@ -2817,6 +2822,8 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -2831,10 +2838,9 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
t.Run("no old update", func(t *testing.T) {
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
|
||||
s.processLightClientUpdates(cfg)
|
||||
// Check that the light client update is saved
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
@@ -2848,12 +2854,12 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
s.processLightClientUpdates(cfg)
|
||||
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
@@ -2877,12 +2883,12 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.lcStore.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.processLightClientUpdate(cfg))
|
||||
s.processLightClientUpdates(cfg)
|
||||
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period)
|
||||
u, err := s.lcStore.LightClientUpdate(ctx, period, l.Block)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
@@ -2952,14 +2958,18 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -2977,7 +2987,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -2989,7 +3001,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3037,7 +3051,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3099,7 +3115,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3118,7 +3136,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
require.NoError(t, err)
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
@@ -3190,6 +3210,11 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
|
||||
headState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveState(ctx, headState, [32]byte{1, 2}))
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
oldOptions []util.LightClientOption
|
||||
@@ -3205,7 +3230,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
{
|
||||
name: "Same age",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority()}, // supermajority does not matter here and is only added to result in two different updates
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority(0)}, // supermajority does not matter here and is only added to result in two different updates
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
@@ -3249,14 +3274,14 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
|
||||
var oldActualUpdate interfaces.LightClientOptimisticUpdate
|
||||
var err error
|
||||
if tc.oldOptions != nil {
|
||||
// config for old update
|
||||
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
|
||||
require.NoError(t, s.processLightClientOptimisticUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
|
||||
s.processLightClientUpdates(cfgOld)
|
||||
|
||||
oldActualUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(lOld.Ctx, lOld.State, lOld.Block, lOld.AttestedState, lOld.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -3270,7 +3295,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
|
||||
// config for new update
|
||||
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
|
||||
require.NoError(t, s.processLightClientOptimisticUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
|
||||
s.processLightClientUpdates(cfgNew)
|
||||
|
||||
newActualUpdate, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lNew.Ctx, lNew.State, lNew.Block, lNew.AttestedState, lNew.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -3311,6 +3336,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
s, tr := minimalTestService(t)
|
||||
s.cfg.P2P = &mockp2p.FakeP2P{}
|
||||
ctx := tr.ctx
|
||||
s.head = &head{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -3389,15 +3415,18 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.P2P, s.cfg.StateNotifier.StateFeed(), s.cfg.BeaconDB)
|
||||
|
||||
var actualOldUpdate, actualNewUpdate interfaces.LightClientFinalityUpdate
|
||||
var err error
|
||||
|
||||
if tc.oldOptions != nil {
|
||||
// config for old update
|
||||
lOld, cfgOld := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.oldOptions...)
|
||||
require.NoError(t, s.processLightClientFinalityUpdate(cfgOld.ctx, cfgOld.roblock, cfgOld.postState))
|
||||
blkRoot, err := lOld.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.head.block = lOld.Block
|
||||
s.head.root = blkRoot
|
||||
s.processLightClientUpdates(cfgOld)
|
||||
|
||||
// check that the old update is saved
|
||||
actualOldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgOld.postState, cfgOld.roblock, lOld.AttestedState, lOld.AttestedBlock, lOld.FinalizedBlock)
|
||||
@@ -3408,7 +3437,11 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
// config for new update
|
||||
lNew, cfgNew := setupLightClientTestRequirements(ctx, t, s, testVersion, tc.newOptions...)
|
||||
require.NoError(t, s.processLightClientFinalityUpdate(cfgNew.ctx, cfgNew.roblock, cfgNew.postState))
|
||||
blkRoot, err := lNew.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.head.block = lNew.Block
|
||||
s.head.root = blkRoot
|
||||
s.processLightClientUpdates(cfgNew)
|
||||
|
||||
// check that the actual old update and the actual new update are different
|
||||
actualNewUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, cfgNew.postState, cfgNew.roblock, lNew.AttestedState, lNew.AttestedBlock, lNew.FinalizedBlock)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -93,7 +92,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
blockCopy, err := block.Copy()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "block copy")
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
@@ -104,17 +103,17 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "validator execution and consensus")
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
daWaitedTime, err := s.handleDA(ctx, avs, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle da")
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
@@ -135,10 +134,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := s.postBlockProcess(args); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(err, "post block process")
|
||||
}
|
||||
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "update checkpoints")
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if s.slasherEnabled {
|
||||
@@ -152,12 +151,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "check save hot state db")
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "handle caches")
|
||||
}
|
||||
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
|
||||
return nil
|
||||
@@ -240,37 +239,19 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
}(time.Now())
|
||||
|
||||
if avs == nil {
|
||||
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), block)
|
||||
} else {
|
||||
err = s.isDataAvailable(ctx, block)
|
||||
}
|
||||
|
||||
var rob blocks.ROBlock
|
||||
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
return
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
|
||||
|
||||
return
|
||||
return elapsed, err
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
@@ -320,13 +301,28 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
if features.Get().EnableLightClient {
|
||||
// Save a light client bootstrap for the finalized checkpoint
|
||||
go func() {
|
||||
err := s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root)
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve state for finalized root to save light client bootstrap")
|
||||
return
|
||||
}
|
||||
err = s.lcStore.SaveLightClientBootstrap(s.ctx, finalized.Root, st)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not save light client bootstrap by block root")
|
||||
} else {
|
||||
log.Debugf("Saved light client bootstrap for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
|
||||
// Clean up the light client store caches
|
||||
go func() {
|
||||
err := s.lcStore.MigrateToCold(s.ctx, finalized.Root)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not migrate light client store to cold storage")
|
||||
} else {
|
||||
log.Debugf("Migrated light client store to cold storage for finalized root %#x", finalized.Root)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -192,7 +192,9 @@ func TestHandleDA(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := minimalTestService(t)
|
||||
elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
|
||||
block, err := blocks.NewROBlockWithRoot(signedBeaconBlock, [32]byte{})
|
||||
require.NoError(t, err)
|
||||
elapsed, err := s.handleDA(t.Context(), nil, block)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
|
||||
}
|
||||
@@ -594,11 +596,7 @@ func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: cp.Epoch, Root: [32]byte(cp.Root)}))
|
||||
|
||||
sss, err := s.cfg.BeaconDB.State(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sss)
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.FinalizedState)
|
||||
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
|
||||
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
@@ -14,13 +14,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
f "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -112,6 +113,7 @@ func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.Read
|
||||
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
|
||||
}
|
||||
}
|
||||
slices.Reverse(chain)
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
@@ -124,5 +124,5 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
require.NotEqual(t, fRoot, root)
|
||||
require.Equal(t, root, service.startupHeadRoot())
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount())
|
||||
require.Equal(t, 3, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
|
||||
@@ -11,21 +11,21 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -723,7 +723,8 @@ func (c *ChainService) ReceiveDataColumn(dc blocks.VerifiedRODataColumn) error {
|
||||
}
|
||||
|
||||
// ReceiveDataColumns implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumns(_ []blocks.VerifiedRODataColumn) error {
|
||||
func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) error {
|
||||
c.DataColumns = append(c.DataColumns, dcs...)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,202 +0,0 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
|
||||
|
||||
type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate // tracks the best finality update seen so far
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate // tracks the best optimistic update seen so far
|
||||
p2p p2p.Accessor
|
||||
stateFeed event.SubscriberSender
|
||||
}
|
||||
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase, p p2p.Accessor, e event.SubscriberSender) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
stateFeed: e,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (interfaces.LightClientBootstrap, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client bootstrap from the database
|
||||
bootstrap, err := s.beaconDB.LightClientBootstrap(ctx, blockRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bootstrap == nil { // not found
|
||||
return nil, ErrLightClientBootstrapNotFound
|
||||
}
|
||||
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
|
||||
state, err := s.beaconDB.State(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
if state == nil {
|
||||
return errors.Errorf("failed to fetch state for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
// Save the light client bootstrap to the database
|
||||
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) ([]interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client updatesMap from the database
|
||||
updatesMap, err := s.beaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var updates []interfaces.LightClientUpdate
|
||||
for i := startPeriod; i <= endPeriod; i++ {
|
||||
update, ok := updatesMap[i]
|
||||
if !ok {
|
||||
// Only return the first contiguous range of updates
|
||||
break
|
||||
}
|
||||
updates = append(updates, update)
|
||||
}
|
||||
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client update for the given period from the database
|
||||
update, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update interfaces.LightClientUpdate) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
oldUpdate, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get current light client update")
|
||||
}
|
||||
|
||||
if oldUpdate == nil {
|
||||
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
|
||||
isNewUpdateBetter, err := IsBetterUpdate(update, oldUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
|
||||
if isNewUpdateBetter {
|
||||
if err := s.beaconDB.SaveLightClientUpdate(ctx, period, update); err != nil {
|
||||
return errors.Wrapf(err, "could not save light client update")
|
||||
}
|
||||
log.WithField("period", period).Debug("Saved new light client update")
|
||||
return nil
|
||||
}
|
||||
log.WithField("period", period).Debug("New light client update is not better than the current one, skipping save")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
log.Debug("Saved new light client finality update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
log.Debug("Saved new light client optimistic update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastOptimisticUpdate
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
package light_client_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestLightClientStore(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Initialize the light client store
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), &p2pTesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
// Create test light client updates for Capella and Deneb
|
||||
lCapella := util.NewTestLightClient(t, version.Capella)
|
||||
opUpdateCapella, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateCapella, "OptimisticUpdateCapella is nil")
|
||||
finUpdateCapella, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateCapella, "FinalityUpdateCapella is nil")
|
||||
|
||||
lDeneb := util.NewTestLightClient(t, version.Deneb)
|
||||
opUpdateDeneb, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateDeneb, "OptimisticUpdateDeneb is nil")
|
||||
finUpdateDeneb, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateDeneb, "FinalityUpdateDeneb is nil")
|
||||
|
||||
// Initially the store should have nil values for both updates
|
||||
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get finality with Capella update. Optimistic update should be nil
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella, false)
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get optimistic with Capella update. Finality update should be Capella
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella, false)
|
||||
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with Deneb update
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb, false)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb, false)
|
||||
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), p2p, new(event.Feed))
|
||||
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update0, lcStore.LastFinalityUpdate()), "update0 should be better than nil")
|
||||
// update0 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update0, lcStore.LastFinalityUpdate()), "update0 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l1 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update1, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update1, update0), "update1 should be better than update0")
|
||||
// update1 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update1, lcStore.LastFinalityUpdate()), "update1 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 2 with same finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update2, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update2, update1), "update2 should be better than update1")
|
||||
// update2 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update2, lcStore.LastFinalityUpdate()), "update2 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l3 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(3), util.WithSupermajority())
|
||||
update3, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l3.Ctx, l3.State, l3.Block, l3.AttestedState, l3.AttestedBlock, l3.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update3, update2), "update3 should be better than update2")
|
||||
// update3 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update3, lcStore.LastFinalityUpdate()), "update3 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l4 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(1), util.WithSupermajority())
|
||||
update4, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l4.Ctx, l4.State, l4.Block, l4.AttestedState, l4.AttestedBlock, l4.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update4, update3), "update4 should be better than update3")
|
||||
// update4 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update4, lcStore.LastFinalityUpdate()), "update4 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l5 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update5, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l5.Ctx, l5.State, l5.Block, l5.AttestedState, l5.AttestedBlock, l5.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update5, update4), "update5 should be better than update4")
|
||||
// update5 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update5, lcStore.LastFinalityUpdate()), "update5 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l6 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(3))
|
||||
update6, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l6.Ctx, l6.State, l6.Block, l6.AttestedState, l6.AttestedBlock, l6.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update6, update5), "update6 should be better than update5")
|
||||
// update6 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update6, lcStore.LastFinalityUpdate()), "update6 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
@@ -19,11 +19,11 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -4,15 +4,10 @@ import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
@@ -20,12 +15,9 @@ import (
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
ErrCustodyGroupTooLarge = errors.New("custody group too large")
|
||||
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
|
||||
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
ErrCustodyGroupTooLarge = errors.New("custody group too large")
|
||||
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
|
||||
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
|
||||
|
||||
// maxUint256 is the maximum value of an uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
@@ -117,44 +109,6 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof KZG commitments")
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := dataColumnsSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
@@ -194,72 +148,3 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// dataColumnsSidecars computes the data column sidecars from the signed block header, the blob KZG commiments,
|
||||
// the KZG commitment includion proofs and cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars
|
||||
func dataColumnsSidecars(
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
blobKzgCommitments [][]byte,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
start := time.Now()
|
||||
if len(blobKzgCommitments) != len(cellsAndProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
blobsCount := len(cellsAndProofs)
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
for columnIndex := range numberOfColumns {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := range blobsCount {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
// Validate that we have enough cells and proofs for this column index
|
||||
if columnIndex >= uint64(len(cellsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds cells length %d for blob %d", columnIndex, len(cellsForRow), rowIndex)
|
||||
}
|
||||
if columnIndex >= uint64(len(proofsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds proofs length %d for blob %d", columnIndex, len(proofsForRow), rowIndex)
|
||||
}
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: columnIndex,
|
||||
Column: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProofs: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
@@ -3,13 +3,9 @@ package peerdas_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
@@ -31,93 +27,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
t.Run("nil signed block", func(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("empty cells and proofs", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.DataColumnSidecars(signedBeaconBlock, []kzg.CellsAndProofs{})
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 1)
|
||||
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds cells length", err)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds proofs length", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
|
||||
@@ -63,17 +63,14 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
t.Run("invalid proof", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
@@ -256,9 +253,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
for i := range int64(b.N) {
|
||||
// Generate new random sidecars to ensure the KZG backend does not cache anything.
|
||||
sidecars := generateRandomSidecars(b, i, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars)
|
||||
|
||||
for _, sidecar := range roDataColumnSidecars {
|
||||
for _, sidecar := range sidecars {
|
||||
sidecars := []blocks.RODataColumn{sidecar}
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
@@ -282,7 +278,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
||||
b.ResetTimer()
|
||||
|
||||
for j := range int64(b.N) {
|
||||
allSidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
allSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
|
||||
for k := int64(0); k < numberOfColumns; k += columnsCount {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
seed := int64(b.N*i) + numberOfColumns*j + blobCount*k
|
||||
@@ -292,10 +288,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
||||
allSidecars = append(allSidecars, sidecars[k:k+columnsCount]...)
|
||||
}
|
||||
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, allSidecars)
|
||||
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -323,8 +317,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
|
||||
for j := range int64(batchCount) {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
sidecars := generateRandomSidecars(b, int64(batchCount)*i+j*blobCount, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars[:columnsCount])
|
||||
allSidecars = append(allSidecars, roDataColumnSidecars)
|
||||
allSidecars = append(allSidecars, sidecars)
|
||||
}
|
||||
|
||||
for _, sidecars := range allSidecars {
|
||||
@@ -358,7 +351,7 @@ func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgPr
|
||||
return roSidecar
|
||||
}
|
||||
|
||||
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataColumnSidecar {
|
||||
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []blocks.RODataColumn {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
|
||||
commitments := make([][]byte, 0, blobCount)
|
||||
@@ -379,20 +372,10 @@ func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataCo
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
rob, err := blocks.NewROBlock(sBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
|
||||
return sidecars
|
||||
}
|
||||
|
||||
func generateRODataColumnSidecars(t testing.TB, sidecars []*ethpb.DataColumnSidecar) []blocks.RODataColumn {
|
||||
roDataColumnSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
roDataColumnSidecars = append(roDataColumnSidecars, roCol)
|
||||
}
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -16,6 +16,7 @@ var (
|
||||
ErrBlobIndexTooHigh = errors.New("blob index is too high")
|
||||
ErrBlockRootMismatch = errors.New("block root mismatch")
|
||||
ErrBlobsCellsProofsMismatch = errors.New("blobs and cells proofs mismatch")
|
||||
ErrNilBlobAndProof = errors.New("nil blob and proof")
|
||||
)
|
||||
|
||||
// MinimumColumnCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
@@ -28,19 +29,19 @@ func MinimumColumnCountToReconstruct() uint64 {
|
||||
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
|
||||
// All input sidecars must be committed to the same block.
|
||||
// `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
|
||||
func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if there is at least one input sidecar.
|
||||
if len(inVerifiedRoSidecars) == 0 {
|
||||
if len(verifiedRoSidecars) == 0 {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Safely retrieve the first sidecar as a reference.
|
||||
referenceSidecar := inVerifiedRoSidecars[0]
|
||||
referenceSidecar := verifiedRoSidecars[0]
|
||||
|
||||
// Check if all columns have the same length and are commmitted to the same block.
|
||||
blobCount := len(referenceSidecar.Column)
|
||||
blockRoot := referenceSidecar.BlockRoot()
|
||||
for _, sidecar := range inVerifiedRoSidecars[1:] {
|
||||
for _, sidecar := range verifiedRoSidecars[1:] {
|
||||
if len(sidecar.Column) != blobCount {
|
||||
return nil, ErrColumnLengthsDiffer
|
||||
}
|
||||
@@ -51,8 +52,8 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
}
|
||||
|
||||
// Deduplicate sidecars.
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(inVerifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range inVerifiedRoSidecars {
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(verifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range verifiedRoSidecars {
|
||||
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
|
||||
}
|
||||
|
||||
@@ -62,12 +63,6 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Sidecars are verified and are committed to the same block.
|
||||
// All signed block headers, KZG commitments, and inclusion proofs are the same.
|
||||
signedBlockHeader := referenceSidecar.SignedBlockHeader
|
||||
kzgCommitments := referenceSidecar.KzgCommitments
|
||||
kzgCommitmentsInclusionProof := referenceSidecar.KzgCommitmentsInclusionProof
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
var wg errgroup.Group
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
@@ -100,78 +95,20 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
|
||||
}
|
||||
|
||||
outSidecars, err := dataColumnsSidecars(signedBlockHeader, kzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
outSidecars, err := DataColumnSidecars(cellsAndProofs, PopulateFromSidecar(referenceSidecar))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars from items")
|
||||
}
|
||||
|
||||
// Input sidecars are verified, and we reconstructed ourselves the missing sidecars.
|
||||
// As a consequence, reconstructed sidecars are also verified.
|
||||
outVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
|
||||
reconstructedVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
|
||||
for _, sidecar := range outSidecars {
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO data column with root")
|
||||
}
|
||||
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
outVerifiedRoSidecars = append(outVerifiedRoSidecars, verifiedRoSidecar)
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(sidecar)
|
||||
reconstructedVerifiedRoSidecars = append(reconstructedVerifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
return outVerifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
|
||||
// cell proofs corresponding the extended blobs. The main purpose of this function is to
|
||||
// construct data column sidecars from data obtained from the execution client via:
|
||||
// - `engine_getBlobsV2` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2, or
|
||||
// - `engine_getPayloadV5` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getpayloadv5
|
||||
// Note: In this function, to stick with the `BlobsBundleV2` format returned by the execution client in `engine_getPayloadV5`,
|
||||
// cell proofs are "flattened".
|
||||
func ConstructDataColumnSidecars(block interfaces.ReadOnlySignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the cells count is equal to the cell proofs count.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := DataColumnSidecars(block, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidcars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
return reconstructedVerifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars.
|
||||
@@ -256,6 +193,89 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
|
||||
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofs computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([]kzg.CellsAndProofs, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns*kzg.BytesPerProof)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: kzgProofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
// blobSidecarsFromDataColumnSidecars converts verified data column sidecars to verified blob sidecars.
|
||||
func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
|
||||
referenceSidecar := dataColumnSidecars[0]
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/pkg/errors"
|
||||
@@ -124,50 +124,6 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
const (
|
||||
blobCount = 3
|
||||
cellsPerBlob = fieldparams.CellsPerBlob
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, _, baseVerifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
// Extract blobs and proofs from the sidecars.
|
||||
blobs := make([][]byte, 0, blobCount)
|
||||
cellProofs := make([][]byte, 0, cellsPerBlob)
|
||||
for blobIndex := range blobCount {
|
||||
blob := make([]byte, 0, cellsPerBlob)
|
||||
for columnIndex := range cellsPerBlob {
|
||||
cell := baseVerifiedRoSidecars[columnIndex].Column[blobIndex]
|
||||
blob = append(blob, cell...)
|
||||
}
|
||||
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProof := baseVerifiedRoSidecars[columnIndex].KzgProofs[blobIndex]
|
||||
cellProofs = append(cellProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
actual, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract the base verified ro sidecars into sidecars.
|
||||
expected := make([]*ethpb.DataColumnSidecar, 0, len(baseVerifiedRoSidecars))
|
||||
for _, verifiedRoSidecar := range baseVerifiedRoSidecars {
|
||||
expected = append(expected, verifiedRoSidecar.DataColumnSidecar)
|
||||
}
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
@@ -250,7 +206,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
// Compute cells and proofs from blob sidecars.
|
||||
var wg errgroup.Group
|
||||
blobs := make([][]byte, blobCount)
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
inputCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
blobs[i] = blob
|
||||
@@ -267,7 +223,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
// It is safe for multiple goroutines to concurrently write to the same slice,
|
||||
// as long as they are writing to different indices, which is the case here.
|
||||
cellsAndProofs[i] = cp
|
||||
inputCellsAndProofs[i] = cp
|
||||
|
||||
return nil
|
||||
})
|
||||
@@ -278,25 +234,24 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
// Flatten proofs.
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range cellsAndProofs {
|
||||
for _, cp := range inputCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Construct data column sidecars.
|
||||
// It is OK to use the public function `ConstructDataColumnSidecars`, as long as
|
||||
// `TestConstructDataColumnSidecars` tests pass.
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
|
||||
// Compute celles and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to verified data column sidecars.
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roSidecar, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
|
||||
for _, roDataColumnSidecar := range roDataColumnSidecars {
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
@@ -339,3 +294,162 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("mismatched blob and proof counts", func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Create one blob but proofs for two blobs
|
||||
blobs := [][]byte{{}}
|
||||
|
||||
// Create proofs for 2 blobs worth of columns
|
||||
cellProofs := make([][]byte, 2*numberOfColumns)
|
||||
|
||||
_, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlobsCellsProofsMismatch)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 2
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
// Extract blobs and compute expected cells and proofs
|
||||
blobs := make([][]byte, blobCount)
|
||||
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
blobs[i] = blob
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
|
||||
expectedCellsAndProofs[i] = cp
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := wg.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flatten proofs
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range expectedCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsAndProofs))
|
||||
|
||||
// Verify the results match expected
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
|
||||
}
|
||||
|
||||
// Compare proofs
|
||||
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
|
||||
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
t.Run("nil blob and proof", func(t *testing.T) {
|
||||
_, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
|
||||
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
const blobCount = 2
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
// Extract blobs and compute expected cells and proofs
|
||||
blobsAndProofs := make([]*pb.BlobAndProofV2, blobCount)
|
||||
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
var wg errgroup.Group
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
expectedCellsAndProofs[i] = cellsAndProofs
|
||||
|
||||
kzgProofs := make([][]byte, 0, len(cellsAndProofs.Proofs))
|
||||
for _, proof := range cellsAndProofs.Proofs {
|
||||
kzgProofs = append(kzgProofs, proof[:])
|
||||
}
|
||||
|
||||
blobAndProof := &pb.BlobAndProofV2{
|
||||
Blob: blob,
|
||||
KzgProofs: kzgProofs,
|
||||
}
|
||||
blobsAndProofs[i] = blobAndProof
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = wg.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsAndProofs))
|
||||
|
||||
// Verify the results match expected
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
|
||||
}
|
||||
|
||||
// Compare proofs
|
||||
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
|
||||
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,12 +1,76 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
beaconState "github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNilSignedBlockOrEmptyCellsAndProofs = errors.New("nil signed block or empty cells and proofs")
|
||||
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
|
||||
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
|
||||
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
|
||||
)
|
||||
|
||||
var (
|
||||
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
|
||||
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
|
||||
)
|
||||
|
||||
const (
|
||||
BlockType = "BeaconBlock"
|
||||
SidecarType = "DataColumnSidecar"
|
||||
)
|
||||
|
||||
type (
|
||||
// ConstructionPopulator is an interface that can be satisfied by a type that can use data from a struct
|
||||
// like a DataColumnSidecar or a BeaconBlock to set the fields in a data column sidecar that cannot
|
||||
// be obtained from the engine api.
|
||||
ConstructionPopulator interface {
|
||||
Slot() primitives.Slot
|
||||
Root() [fieldparams.RootLength]byte
|
||||
ProposerIndex() primitives.ValidatorIndex
|
||||
Commitments() ([][]byte, error)
|
||||
Type() string
|
||||
|
||||
extract() (*blockInfo, error)
|
||||
}
|
||||
|
||||
// BlockReconstructionSource is a ConstructionPopulator that uses a beacon block as the source of data
|
||||
BlockReconstructionSource struct {
|
||||
blocks.ROBlock
|
||||
}
|
||||
|
||||
// DataColumnSidecar is a ConstructionPopulator that uses a data column sidecar as the source of data
|
||||
SidecarReconstructionSource struct {
|
||||
blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
blockInfo struct {
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader
|
||||
kzgCommitments [][]byte
|
||||
kzgInclusionProof [][]byte
|
||||
}
|
||||
)
|
||||
|
||||
// PopulateFromBlock creates a BlockReconstructionSource from a beacon block
|
||||
func PopulateFromBlock(block blocks.ROBlock) *BlockReconstructionSource {
|
||||
return &BlockReconstructionSource{ROBlock: block}
|
||||
}
|
||||
|
||||
// PopulateFromSidecar creates a SidecarReconstructionSource from a data column sidecar
|
||||
func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstructionSource {
|
||||
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
|
||||
}
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
@@ -28,3 +92,159 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
count := totalNodeBalance / balancePerAdditionalCustodyGroup
|
||||
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars, given ConstructionPopulator and the cells/proofs associated with each blob in the
|
||||
// block, assembles sidecars which can be distributed to peers.
|
||||
// This is an adapted version of
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars,
|
||||
// which is designed to be used both when constructing sidecars from a block and from a sidecar, replacing
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block and
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_column_sidecar
|
||||
func DataColumnSidecars(rows []kzg.CellsAndProofs, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
|
||||
if len(rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
cells, proofs, err := rotateRowsToCols(rows, params.BeaconConfig().NumberOfColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
||||
}
|
||||
|
||||
maxIdx := params.BeaconConfig().NumberOfColumns
|
||||
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
|
||||
for idx := range maxIdx {
|
||||
info, err := src.extract()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "extract block info")
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: idx,
|
||||
Column: cells[idx],
|
||||
KzgCommitments: info.kzgCommitments,
|
||||
KzgProofs: proofs[idx],
|
||||
SignedBlockHeader: info.signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: info.kzgInclusionProof,
|
||||
}
|
||||
|
||||
if len(sidecar.KzgCommitments) != len(sidecar.Column) || len(sidecar.KzgCommitments) != len(sidecar.KzgProofs) {
|
||||
return nil, ErrSizeMismatch
|
||||
}
|
||||
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, src.Root())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new ro data column")
|
||||
}
|
||||
roSidecars = append(roSidecars, roSidecar)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return roSidecars, nil
|
||||
}
|
||||
|
||||
// Slot returns the slot of the source
|
||||
func (s *BlockReconstructionSource) Slot() primitives.Slot {
|
||||
return s.Block().Slot()
|
||||
}
|
||||
|
||||
// ProposerIndex returns the proposer index of the source
|
||||
func (s *BlockReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
|
||||
return s.Block().ProposerIndex()
|
||||
}
|
||||
|
||||
// Commitments returns the blob KZG commitments of the source
|
||||
func (s *BlockReconstructionSource) Commitments() ([][]byte, error) {
|
||||
c, err := s.Block().Body().BlobKzgCommitments()
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Type returns the type of the source
|
||||
func (s *BlockReconstructionSource) Type() string {
|
||||
return BlockType
|
||||
}
|
||||
|
||||
// extract extracts the block information from the source
|
||||
func (b *BlockReconstructionSource) extract() (*blockInfo, error) {
|
||||
block := b.Block()
|
||||
|
||||
header, err := b.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "header")
|
||||
}
|
||||
|
||||
commitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "commitments")
|
||||
}
|
||||
|
||||
inclusionProof, err := blocks.MerkleProofKZGCommitments(block.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof kzg commitments")
|
||||
}
|
||||
|
||||
info := &blockInfo{
|
||||
signedBlockHeader: header,
|
||||
kzgCommitments: commitments,
|
||||
kzgInclusionProof: inclusionProof,
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// rotateRowsToCols takes a 2D slice of cells and proofs, where the x is rows (blobs) and y is columns,
|
||||
// and returns a 2D slice where x is columns and y is rows.
|
||||
func rotateRowsToCols(rows []kzg.CellsAndProofs, numCols uint64) ([][][]byte, [][][]byte, error) {
|
||||
if len(rows) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
cellCols := make([][][]byte, numCols)
|
||||
proofCols := make([][][]byte, numCols)
|
||||
for i, cp := range rows {
|
||||
if uint64(len(cp.Cells)) != numCols {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough cells")
|
||||
}
|
||||
if len(cp.Cells) != len(cp.Proofs) {
|
||||
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs")
|
||||
}
|
||||
for j := uint64(0); j < numCols; j++ {
|
||||
if i == 0 {
|
||||
cellCols[j] = make([][]byte, len(rows))
|
||||
proofCols[j] = make([][]byte, len(rows))
|
||||
}
|
||||
cellCols[j][i] = cp.Cells[j][:]
|
||||
proofCols[j][i] = cp.Proofs[j][:]
|
||||
}
|
||||
}
|
||||
return cellCols, proofCols, nil
|
||||
}
|
||||
|
||||
// Root returns the block root of the source
|
||||
func (s *SidecarReconstructionSource) Root() [fieldparams.RootLength]byte {
|
||||
return s.BlockRoot()
|
||||
}
|
||||
|
||||
// Commmitments returns the blob KZG commitments of the source
|
||||
func (s *SidecarReconstructionSource) Commitments() ([][]byte, error) {
|
||||
return s.KzgCommitments, nil
|
||||
}
|
||||
|
||||
// Type returns the type of the source
|
||||
func (s *SidecarReconstructionSource) Type() string {
|
||||
return SidecarType
|
||||
}
|
||||
|
||||
// extract extracts the block information from the source
|
||||
func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
|
||||
info := &blockInfo{
|
||||
signedBlockHeader: s.SignedBlockHeader,
|
||||
kzgCommitments: s.KzgCommitments,
|
||||
kzgInclusionProof: s.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -3,11 +3,15 @@ package peerdas_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
@@ -53,3 +57,218 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
|
||||
Proofs: make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
_, err = peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
require.ErrorContains(t, "not enough proofs", err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
commitment2 := make([]byte, 48)
|
||||
|
||||
// Set different values to distinguish commitments
|
||||
commitment1[0] = 0x01
|
||||
commitment2[0] = 0x02
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{commitment1, commitment2}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
// Set distinct values in cells and proofs for testing
|
||||
for i := range numberOfColumns {
|
||||
cellsAndProofs[0].Cells[i][0] = byte(i)
|
||||
cellsAndProofs[0].Proofs[i][0] = byte(i)
|
||||
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
|
||||
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sidecars)
|
||||
require.Equal(t, int(numberOfColumns), len(sidecars))
|
||||
|
||||
// Verify each sidecar has the expected structure
|
||||
for i, sidecar := range sidecars {
|
||||
require.Equal(t, uint64(i), sidecar.Index)
|
||||
require.Equal(t, 2, len(sidecar.Column))
|
||||
require.Equal(t, 2, len(sidecar.KzgCommitments))
|
||||
require.Equal(t, 2, len(sidecar.KzgProofs))
|
||||
|
||||
// Verify commitments match what we set
|
||||
require.DeepEqual(t, commitment1, sidecar.KzgCommitments[0])
|
||||
require.DeepEqual(t, commitment2, sidecar.KzgCommitments[1])
|
||||
|
||||
// Verify column data comes from the correct cells
|
||||
require.Equal(t, byte(i), sidecar.Column[0][0])
|
||||
require.Equal(t, byte(i+128), sidecar.Column[1][0])
|
||||
|
||||
// Verify proofs come from the correct proofs
|
||||
require.Equal(t, byte(i), sidecar.KzgProofs[0][0])
|
||||
require.Equal(t, byte(i+128), sidecar.KzgProofs[1][0])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructionSource(t *testing.T) {
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
commitment2 := make([]byte, 48)
|
||||
|
||||
// Set different values to distinguish commitments
|
||||
commitment1[0] = 0x01
|
||||
commitment2[0] = 0x02
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{commitment1, commitment2}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, numberOfColumns),
|
||||
},
|
||||
}
|
||||
|
||||
// Set distinct values in cells and proofs for testing
|
||||
for i := range numberOfColumns {
|
||||
cellsAndProofs[0].Cells[i][0] = byte(i)
|
||||
cellsAndProofs[0].Proofs[i][0] = byte(i)
|
||||
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
|
||||
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sidecars)
|
||||
require.Equal(t, int(numberOfColumns), len(sidecars))
|
||||
|
||||
t.Run("from block", func(t *testing.T) {
|
||||
src := peerdas.PopulateFromBlock(rob)
|
||||
require.Equal(t, rob.Block().Slot(), src.Slot())
|
||||
require.Equal(t, rob.Root(), src.Root())
|
||||
require.Equal(t, rob.Block().ProposerIndex(), src.ProposerIndex())
|
||||
|
||||
commitments, err := src.Commitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(commitments))
|
||||
require.DeepEqual(t, commitment1, commitments[0])
|
||||
require.DeepEqual(t, commitment2, commitments[1])
|
||||
|
||||
require.Equal(t, peerdas.BlockType, src.Type())
|
||||
})
|
||||
|
||||
t.Run("from sidecar", func(t *testing.T) {
|
||||
referenceSidecar := blocks.NewVerifiedRODataColumn(sidecars[0])
|
||||
src := peerdas.PopulateFromSidecar(referenceSidecar)
|
||||
require.Equal(t, referenceSidecar.Slot(), src.Slot())
|
||||
require.Equal(t, referenceSidecar.BlockRoot(), src.Root())
|
||||
require.Equal(t, referenceSidecar.ProposerIndex(), src.ProposerIndex())
|
||||
|
||||
commitments, err := src.Commitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(commitments))
|
||||
require.DeepEqual(t, commitment1, commitments[0])
|
||||
require.DeepEqual(t, commitment2, commitments[1])
|
||||
|
||||
require.Equal(t, peerdas.SidecarType, src.Type())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -259,7 +259,6 @@ func (dcs *DataColumnStorage) Summary(root [fieldparams.RootLength]byte) DataCol
|
||||
}
|
||||
|
||||
// Save saves data column sidecars into the database and asynchronously performs pruning.
|
||||
// The returned channel is closed when the pruning is complete.
|
||||
func (dcs *DataColumnStorage) Save(dataColumnSidecars []blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -103,6 +104,7 @@ go_test(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -101,10 +102,7 @@ const (
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
errMissingBlobsAndProofsFromEL = errors.New("engine api payload body response is missing blobs and proofs")
|
||||
)
|
||||
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
// engine_forkchoiceUpdatedV1 endpoint.
|
||||
@@ -123,7 +121,7 @@ type Reconstructor interface {
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error)
|
||||
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -651,22 +649,40 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
root := populator.Root()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": block.Slot(),
|
||||
})
|
||||
|
||||
kzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
|
||||
commitments, err := populator.Commitments()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
|
||||
return nil, wrapWithBlockRoot(err, root, "commitments")
|
||||
}
|
||||
|
||||
cellsAndProofs, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, populator)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
||||
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
||||
|
||||
return verifiedROSidecars, nil
|
||||
}
|
||||
|
||||
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
|
||||
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
// Collect KZG hashes for all blobs.
|
||||
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
||||
for _, commitment := range kzgCommitments {
|
||||
@@ -677,47 +693,32 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
|
||||
// Fetch all blobsAndCellsProofs from the execution client.
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
return nil, errors.Wrapf(err, "get blobs V2")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
log.Debug("No blobs returned from execution client")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Extract the blobs and proofs from the blobAndProofV2s.
|
||||
blobs, cellProofs := make([][]byte, 0, len(blobAndProofV2s)), make([][]byte, 0, len(blobAndProofV2s))
|
||||
for _, blobsAndProofs := range blobAndProofV2s {
|
||||
if blobsAndProofs == nil {
|
||||
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
|
||||
}
|
||||
|
||||
blobs, cellProofs = append(blobs, blobsAndProofs.Blob), append(cellProofs, blobsAndProofs.KzgProofs...)
|
||||
}
|
||||
|
||||
// Construct the data column sidcars from the blobs and cell proofs provided by the execution client.
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
|
||||
// Compute cells and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
|
||||
return nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
|
||||
// Finally, construct verified RO data column sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
|
||||
}
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
|
||||
func upgradeSidecarsToVerifiedSidecars(roSidecars []blocks.RODataColumn) []blocks.VerifiedRODataColumn {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
for _, roSidecar := range roSidecars {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
log.Debug("Data columns successfully reconstructed from the execution client")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
return verifiedRODataColumns
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
@@ -1009,6 +1010,6 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
func wrapWithBlockRoot(err error, blockRoot [fieldparams.RootLength]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
mocks "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -2556,7 +2557,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -2580,11 +2581,14 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(sb, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("nothing received", func(t *testing.T) {
|
||||
@@ -2594,7 +2598,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
@@ -2607,23 +2611,22 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("missing some blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{false, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
// t.Run("missing some blobs", func(t *testing.T) {
|
||||
// blobMasks := []bool{false, true, true, true, true, true}
|
||||
// srv := createBlobServerV2(t, 6, blobMasks)
|
||||
// defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
// rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
// defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
// _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
// require.ErrorContains(t, "fetch cells and proofs from execution client", err)
|
||||
// })
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -116,7 +117,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructDataColumnSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
|
||||
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
|
||||
@@ -463,22 +463,20 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
|
||||
}
|
||||
|
||||
// InsertChain inserts all nodes corresponding to blocks in the slice
|
||||
// `blocks`. This slice must be ordered from child to parent. It includes all
|
||||
// blocks **except** the first one (that is the one with the highest slot
|
||||
// number). All blocks are assumed to be a strict chain
|
||||
// where blocks[i].Parent = blocks[i+1]. Also, we assume that the parent of the
|
||||
// last block in this list is already included in forkchoice store.
|
||||
// `blocks`. This slice must be ordered in increasing slot order and
|
||||
// each consecutive entry must be a child of the previous one.
|
||||
// The parent of the first block in this list must already be present in forkchoice.
|
||||
func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
|
||||
if len(chain) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := len(chain) - 1; i > 0; i-- {
|
||||
for _, bcp := range chain {
|
||||
if _, err := f.store.insert(ctx,
|
||||
chain[i].Block,
|
||||
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
|
||||
bcp.Block,
|
||||
bcp.JustifiedCheckpoint.Epoch, bcp.FinalizedCheckpoint.Epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.updateCheckpoints(ctx, chain[i].JustifiedCheckpoint, chain[i].FinalizedCheckpoint); err != nil {
|
||||
if err := f.updateCheckpoints(ctx, bcp.JustifiedCheckpoint, bcp.FinalizedCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -630,14 +630,15 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
}
|
||||
args := make([]*forkchoicetypes.BlockAndCheckpoints, 10)
|
||||
for i := 0; i < len(blks); i++ {
|
||||
args[i] = blks[10-i-1]
|
||||
}
|
||||
require.NoError(t, f.InsertChain(t.Context(), args))
|
||||
// InsertChain now expects blocks in increasing slot order
|
||||
require.NoError(t, f.InsertChain(t.Context(), blks))
|
||||
|
||||
// Test partial insertion: first insert the foundation blocks, then a subset
|
||||
f = setup(1, 1)
|
||||
require.NoError(t, f.InsertChain(t.Context(), args[2:]))
|
||||
// Insert first 2 blocks to establish a chain from genesis
|
||||
require.NoError(t, f.InsertChain(t.Context(), blks[:2]))
|
||||
// Then insert the remaining blocks
|
||||
require.NoError(t, f.InsertChain(t.Context(), blks[2:]))
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateCheckpoints(t *testing.T) {
|
||||
|
||||
@@ -3,11 +3,12 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cache.go",
|
||||
"helpers.go",
|
||||
"lightclient.go",
|
||||
"store.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client",
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
@@ -38,25 +39,30 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"cache_test.go",
|
||||
"lightclient_test.go",
|
||||
"store_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
26
beacon-chain/light-client/cache.go
Normal file
26
beacon-chain/light-client/cache.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// cache tracks LC data over the non finalized chain for different branches.
|
||||
type cache struct {
|
||||
items map[[32]byte]*cacheItem
|
||||
}
|
||||
|
||||
// cacheItem represents the LC data for a block. It tracks the best update and finality update seen in that branch.
|
||||
type cacheItem struct {
|
||||
parent *cacheItem // parent item in the cache, can be nil
|
||||
period uint64 // sync committee period
|
||||
slot primitives.Slot // slot of the signature block
|
||||
bestUpdate interfaces.LightClientUpdate
|
||||
bestFinalityUpdate interfaces.LightClientFinalityUpdate
|
||||
}
|
||||
|
||||
func newLightClientCache() *cache {
|
||||
return &cache{
|
||||
items: make(map[[32]byte]*cacheItem),
|
||||
}
|
||||
}
|
||||
24
beacon-chain/light-client/cache_test.go
Normal file
24
beacon-chain/light-client/cache_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestLCCache(t *testing.T) {
|
||||
lcCache := newLightClientCache()
|
||||
require.NotNil(t, lcCache)
|
||||
|
||||
item := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: nil,
|
||||
bestFinalityUpdate: nil,
|
||||
}
|
||||
|
||||
blkRoot := [32]byte{4, 5, 6}
|
||||
|
||||
lcCache.items[blkRoot] = item
|
||||
|
||||
require.Equal(t, item, lcCache.items[blkRoot], "Expected to find the item in the cache")
|
||||
}
|
||||
@@ -592,6 +592,10 @@ func HasFinality(update interfaces.LightClientUpdate) (bool, error) {
|
||||
}
|
||||
|
||||
func IsBetterUpdate(newUpdate, oldUpdate interfaces.LightClientUpdate) (bool, error) {
|
||||
if oldUpdate == nil || oldUpdate.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
maxActiveParticipants := newUpdate.SyncAggregate().SyncCommitteeBits.Len()
|
||||
newNumActiveParticipants := newUpdate.SyncAggregate().SyncCommitteeBits.Count()
|
||||
oldNumActiveParticipants := oldUpdate.SyncAggregate().SyncCommitteeBits.Count()
|
||||
@@ -778,7 +782,7 @@ func IsFinalityUpdateValidForBroadcast(newUpdate, oldUpdate interfaces.LightClie
|
||||
// This does not concern broadcasting, but rather the decision of whether to save the new update.
|
||||
// For broadcasting checks, use IsFinalityUpdateValidForBroadcast.
|
||||
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
if oldUpdate == nil || oldUpdate.IsNil() {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -804,7 +808,7 @@ func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityU
|
||||
}
|
||||
|
||||
func IsBetterOptimisticUpdate(newUpdate, oldUpdate interfaces.LightClientOptimisticUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
if oldUpdate == nil || oldUpdate.IsNil() {
|
||||
return true
|
||||
}
|
||||
// The attested_header.beacon.slot is greater than that of all previously forwarded optimistic updates
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
light_client "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
consensustypes "github.com/OffchainLabs/prysm/v6/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
383
beacon-chain/light-client/store.go
Normal file
383
beacon-chain/light-client/store.go
Normal file
@@ -0,0 +1,383 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrLightClientBootstrapNotFound = errors.New("light client bootstrap not found")
|
||||
|
||||
type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate // tracks the best finality update seen so far
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate // tracks the best optimistic update seen so far
|
||||
p2p p2p.Accessor
|
||||
stateFeed event.SubscriberSender
|
||||
cache *cache // non finality cache
|
||||
}
|
||||
|
||||
func NewLightClientStore(p p2p.Accessor, e event.SubscriberSender, db iface.HeadAccessDatabase) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
stateFeed: e,
|
||||
cache: newLightClientCache(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) SaveLCData(ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
headBlockRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// compute required data
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client update")
|
||||
}
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client finality update")
|
||||
}
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client optimistic update")
|
||||
}
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
blockRoot, err := attestedBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute attested block root")
|
||||
}
|
||||
parentRoot := [32]byte(update.AttestedHeader().Beacon().ParentRoot)
|
||||
signatureBlockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute signature block root")
|
||||
}
|
||||
|
||||
newBlockIsHead := signatureBlockRoot == headBlockRoot
|
||||
|
||||
// create the new cache item
|
||||
newCacheItem := &cacheItem{
|
||||
period: period,
|
||||
slot: attestedBlock.Block().Slot(),
|
||||
}
|
||||
|
||||
// check if parent exists in cache
|
||||
parentItem, ok := s.cache.items[parentRoot]
|
||||
if ok {
|
||||
newCacheItem.parent = parentItem
|
||||
} else {
|
||||
// if not, create an item for the parent, but don't need to save it since it's the accumulated best update and is just used for comparison
|
||||
bestUpdateSoFar, err := s.beaconDB.LightClientUpdate(ctx, period)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get best light client update for period %d", period)
|
||||
}
|
||||
parentItem = &cacheItem{
|
||||
period: period,
|
||||
bestUpdate: bestUpdateSoFar,
|
||||
bestFinalityUpdate: s.lastFinalityUpdate,
|
||||
}
|
||||
}
|
||||
|
||||
// if at a period boundary, no need to compare data, just save new ones
|
||||
if parentItem.period != period {
|
||||
newCacheItem.bestUpdate = update
|
||||
newCacheItem.bestFinalityUpdate = finalityUpdate
|
||||
s.cache.items[blockRoot] = newCacheItem
|
||||
|
||||
s.setLastOptimisticUpdate(optimisticUpdate, true)
|
||||
|
||||
// if the new block is not head, we don't want to change our lastFinalityUpdate
|
||||
if newBlockIsHead {
|
||||
s.setLastFinalityUpdate(finalityUpdate, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// if in the same period, compare updates
|
||||
isUpdateBetter, err := IsBetterUpdate(update, parentItem.bestUpdate)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not compare light client updates")
|
||||
}
|
||||
if isUpdateBetter {
|
||||
newCacheItem.bestUpdate = update
|
||||
} else {
|
||||
newCacheItem.bestUpdate = parentItem.bestUpdate
|
||||
}
|
||||
|
||||
isBetterFinalityUpdate := IsBetterFinalityUpdate(finalityUpdate, parentItem.bestFinalityUpdate)
|
||||
if isBetterFinalityUpdate {
|
||||
newCacheItem.bestFinalityUpdate = finalityUpdate
|
||||
} else {
|
||||
newCacheItem.bestFinalityUpdate = parentItem.bestFinalityUpdate
|
||||
}
|
||||
|
||||
// save new item in cache
|
||||
s.cache.items[blockRoot] = newCacheItem
|
||||
|
||||
// save lastOptimisticUpdate if better
|
||||
if isBetterOptimisticUpdate := IsBetterOptimisticUpdate(optimisticUpdate, s.lastOptimisticUpdate); isBetterOptimisticUpdate {
|
||||
s.setLastOptimisticUpdate(optimisticUpdate, true)
|
||||
}
|
||||
|
||||
// if the new block is considered the head, set the last finality update
|
||||
if newBlockIsHead {
|
||||
s.setLastFinalityUpdate(newCacheItem.bestFinalityUpdate, isBetterFinalityUpdate)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot [32]byte) (interfaces.LightClientBootstrap, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client bootstrap from the database
|
||||
bootstrap, err := s.beaconDB.LightClientBootstrap(ctx, blockRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bootstrap == nil { // not found
|
||||
return nil, ErrLightClientBootstrapNotFound
|
||||
}
|
||||
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot [32]byte, state state.BeaconState) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for root %x", blockRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("nil block for root %x", blockRoot)
|
||||
}
|
||||
|
||||
bootstrap, err := NewLightClientBootstrapFromBeaconState(ctx, state.Slot(), state, blk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create light client bootstrap for block root %x", blockRoot)
|
||||
}
|
||||
|
||||
// Save the light client bootstrap to the database
|
||||
if err := s.beaconDB.SaveLightClientBootstrap(ctx, blockRoot[:], bootstrap); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64, headBlock interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.LightClientUpdate, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Fetch the light client updatesMap from the database
|
||||
updatesMap, err := s.beaconDB.LightClientUpdates(ctx, startPeriod, endPeriod)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get updates from the database")
|
||||
}
|
||||
|
||||
cacheUpdatesByPeriod, err := s.getCacheUpdatesByPeriod(headBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get updates from cache")
|
||||
}
|
||||
|
||||
for period, update := range cacheUpdatesByPeriod {
|
||||
updatesMap[period] = update
|
||||
}
|
||||
|
||||
var updates []interfaces.LightClientUpdate
|
||||
|
||||
for i := startPeriod; i <= endPeriod; i++ {
|
||||
update, ok := updatesMap[i]
|
||||
if !ok {
|
||||
// Only return the first contiguous range of updates
|
||||
break
|
||||
}
|
||||
updates = append(updates, update)
|
||||
}
|
||||
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64, headBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.LightClientUpdate, error) {
|
||||
// we don't need to lock here because the LightClientUpdates method locks the store
|
||||
updates, err := s.LightClientUpdates(ctx, period, period, headBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get light client update for period %d", period)
|
||||
}
|
||||
if len(updates) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return updates[0], nil
|
||||
}
|
||||
|
||||
func (s *Store) getCacheUpdatesByPeriod(headBlock interfaces.ReadOnlySignedBeaconBlock) (map[uint64]interfaces.LightClientUpdate, error) {
|
||||
updatesByPeriod := make(map[uint64]interfaces.LightClientUpdate)
|
||||
|
||||
cacheHeadRoot := headBlock.Block().ParentRoot()
|
||||
|
||||
cacheHeadItem, ok := s.cache.items[cacheHeadRoot]
|
||||
if !ok {
|
||||
log.Debugf("Head root %x not found in light client cache. Returning empty updates map for non finality cache.", cacheHeadRoot)
|
||||
return updatesByPeriod, nil
|
||||
}
|
||||
|
||||
for cacheHeadItem != nil {
|
||||
if _, exists := updatesByPeriod[cacheHeadItem.period]; !exists {
|
||||
updatesByPeriod[cacheHeadItem.period] = cacheHeadItem.bestUpdate
|
||||
}
|
||||
cacheHeadItem = cacheHeadItem.parent
|
||||
}
|
||||
|
||||
return updatesByPeriod, nil
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.setLastFinalityUpdate(update, broadcast)
|
||||
}
|
||||
|
||||
func (s *Store) setLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
log.Debug("Saved new light client finality update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.setLastOptimisticUpdate(update, broadcast)
|
||||
}
|
||||
|
||||
func (s *Store) setLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
log.Debug("Saved new light client optimistic update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.lastOptimisticUpdate
|
||||
}
|
||||
|
||||
func (s *Store) MigrateToCold(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// If there cache is empty (some problem in processing data), we can skip migration.
|
||||
// This is a safety check and should not happen in normal operation.
|
||||
if len(s.cache.items) == 0 {
|
||||
log.Debug("Non-finality cache is empty. Skipping migration.")
|
||||
return nil
|
||||
}
|
||||
|
||||
blk, err := s.beaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch block for finalized root %x", finalizedRoot)
|
||||
}
|
||||
if blk == nil {
|
||||
return errors.Errorf("nil block for finalized root %x", finalizedRoot)
|
||||
}
|
||||
finalizedSlot := blk.Block().Slot()
|
||||
finalizedCacheHeadRoot := blk.Block().ParentRoot()
|
||||
|
||||
var finalizedCacheHead *cacheItem
|
||||
var ok bool
|
||||
|
||||
finalizedCacheHead, ok = s.cache.items[finalizedCacheHeadRoot]
|
||||
if !ok {
|
||||
log.Debugf("Finalized block's parent root %x not found in light client cache. Cleaning the broken part of the cache.", finalizedCacheHeadRoot)
|
||||
|
||||
// delete non-finality cache items older than finalized slot
|
||||
s.cleanCache(finalizedSlot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
updateByPeriod := make(map[uint64]interfaces.LightClientUpdate)
|
||||
// Traverse the cache from the head item to the tail, collecting updates
|
||||
for item := finalizedCacheHead; item != nil; item = item.parent {
|
||||
if _, seen := updateByPeriod[item.period]; seen {
|
||||
// We already have an update for this period, skip this item
|
||||
continue
|
||||
}
|
||||
updateByPeriod[item.period] = item.bestUpdate
|
||||
}
|
||||
|
||||
// save updates to db
|
||||
for period, update := range updateByPeriod {
|
||||
err = s.beaconDB.SaveLightClientUpdate(ctx, period, update)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("failed to save light client update for period %d. Skipping this period.", period)
|
||||
}
|
||||
}
|
||||
|
||||
// delete non-finality cache items older than finalized slot
|
||||
s.cleanCache(finalizedSlot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) cleanCache(finalizedSlot primitives.Slot) {
|
||||
// delete non-finality cache items older than finalized slot
|
||||
for k, v := range s.cache.items {
|
||||
if v.slot < finalizedSlot {
|
||||
delete(s.cache.items, k)
|
||||
}
|
||||
if v.parent != nil && v.parent.slot < finalizedSlot {
|
||||
v.parent = nil // remove parent reference
|
||||
}
|
||||
}
|
||||
}
|
||||
959
beacon-chain/light-client/store_test.go
Normal file
959
beacon-chain/light-client/store_test.go
Normal file
@@ -0,0 +1,959 @@
|
||||
package light_client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
func TestLightClientStore(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Initialize the light client store
|
||||
lcStore := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
// Create test light client updates for Capella and Deneb
|
||||
lCapella := util.NewTestLightClient(t, version.Capella)
|
||||
opUpdateCapella, err := NewLightClientOptimisticUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateCapella, "OptimisticUpdateCapella is nil")
|
||||
finUpdateCapella, err := NewLightClientFinalityUpdateFromBeaconState(lCapella.Ctx, lCapella.State, lCapella.Block, lCapella.AttestedState, lCapella.AttestedBlock, lCapella.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateCapella, "FinalityUpdateCapella is nil")
|
||||
|
||||
lDeneb := util.NewTestLightClient(t, version.Deneb)
|
||||
opUpdateDeneb, err := NewLightClientOptimisticUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, opUpdateDeneb, "OptimisticUpdateDeneb is nil")
|
||||
finUpdateDeneb, err := NewLightClientFinalityUpdateFromBeaconState(lDeneb.Ctx, lDeneb.State, lDeneb.Block, lDeneb.AttestedState, lDeneb.AttestedBlock, lDeneb.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finUpdateDeneb, "FinalityUpdateDeneb is nil")
|
||||
|
||||
// Initially the store should have nil values for both updates
|
||||
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get finality with Capella update. Optimistic update should be nil
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella, false)
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get optimistic with Capella update. Finality update should be Capella
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella, false)
|
||||
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with Deneb update
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb, false)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb, false)
|
||||
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update0, lcStore.LastFinalityUpdate()), "update0 should be better than nil")
|
||||
// update0 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, IsFinalityUpdateValidForBroadcast(update0, lcStore.LastFinalityUpdate()), "update0 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l1 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update1, err := NewLightClientFinalityUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update1, update0), "update1 should be better than update0")
|
||||
// update1 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update1, lcStore.LastFinalityUpdate()), "update1 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 2 with same finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(2), util.WithSupermajority(0))
|
||||
update2, err := NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update2, update1), "update2 should be better than update1")
|
||||
// update2 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, IsFinalityUpdateValidForBroadcast(update2, lcStore.LastFinalityUpdate()), "update2 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l3 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(3), util.WithSupermajority(0))
|
||||
update3, err := NewLightClientFinalityUpdateFromBeaconState(l3.Ctx, l3.State, l3.Block, l3.AttestedState, l3.AttestedBlock, l3.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update3, update2), "update3 should be better than update2")
|
||||
// update3 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update3, lcStore.LastFinalityUpdate()), "update3 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l4 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0))
|
||||
update4, err := NewLightClientFinalityUpdateFromBeaconState(l4.Ctx, l4.State, l4.Block, l4.AttestedState, l4.AttestedBlock, l4.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update4, update3), "update4 should be better than update3")
|
||||
// update4 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, IsFinalityUpdateValidForBroadcast(update4, lcStore.LastFinalityUpdate()), "update4 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l5 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(2), util.WithSupermajority(0))
|
||||
update5, err := NewLightClientFinalityUpdateFromBeaconState(l5.Ctx, l5.State, l5.Block, l5.AttestedState, l5.AttestedBlock, l5.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update5, update4), "update5 should be better than update4")
|
||||
// update5 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update5, lcStore.LastFinalityUpdate()), "update5 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l6 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(3))
|
||||
update6, err := NewLightClientFinalityUpdateFromBeaconState(l6.Ctx, l6.State, l6.Block, l6.AttestedState, l6.AttestedBlock, l6.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, IsBetterFinalityUpdate(update6, update5), "update6 should be better than update5")
|
||||
// update6 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, IsFinalityUpdateValidForBroadcast(update6, lcStore.LastFinalityUpdate()), "update6 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SaveLCData(t *testing.T) {
|
||||
t.Run("no parent in cache or db - new is head", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
blkRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("no parent in cache or db - new not head", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
blkRoot, err := l.FinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.IsNil(t, s.lastFinalityUpdate, "Expected to not find the last finality update in the store since the block is not head")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("parent in db", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
|
||||
// save an update for this period in db
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveLightClientUpdate(l.Ctx, period, update), "Failed to save light client update in db")
|
||||
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0)) // updates from this setup should be all better
|
||||
|
||||
blkRoot, err := l2.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err = NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l2.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("parent in cache", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0)) // updates from this setup should be all better
|
||||
|
||||
// save the cache item for this period in cache
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
item := &cacheItem{
|
||||
period: period,
|
||||
bestUpdate: update,
|
||||
bestFinalityUpdate: finalityUpdate,
|
||||
}
|
||||
attestedBlockRoot := l2.AttestedBlock.Block().ParentRoot() // we want this item to be the parent of the new block
|
||||
s.cache.items[attestedBlockRoot] = item
|
||||
|
||||
blkRoot, err := l2.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err = NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err = NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l2.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
|
||||
t.Run("parent in the previous period", func(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
l2 := util.NewTestLightClient(t, version.Bellatrix, util.WithIncreasedAttestedSlot(1), util.WithSupermajority(0)) // updates from this setup should be all better
|
||||
|
||||
// save the cache item for this period1 in cache
|
||||
period1 := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err := NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
item := &cacheItem{
|
||||
period: period1,
|
||||
bestUpdate: update,
|
||||
bestFinalityUpdate: finalityUpdate,
|
||||
}
|
||||
attestedBlockRoot := l2.AttestedBlock.Block().ParentRoot() // we want this item to be the parent of the new block
|
||||
s.cache.items[attestedBlockRoot] = item
|
||||
|
||||
blkRoot, err := l2.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SaveLCData(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock, blkRoot), "Failed to save light client data")
|
||||
|
||||
update, err = NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalityUpdate, err = NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
optimisticUpdate, err := NewLightClientOptimisticUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attstedBlkRoot, err := l2.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, finalityUpdate, s.lastFinalityUpdate, "Expected to find the last finality update in the store")
|
||||
require.DeepEqual(t, optimisticUpdate, s.lastOptimisticUpdate, "Expected to find the last optimistic update in the store")
|
||||
require.DeepEqual(t, update, s.cache.items[attstedBlkRoot].bestUpdate, "Expected to find the update in the non-finality cache")
|
||||
require.DeepEqual(t, finalityUpdate, s.cache.items[attstedBlkRoot].bestFinalityUpdate, "Expected to find the finality update in the non-finality cache")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLightClientStore_MigrateToCold(t *testing.T) {
|
||||
// This tests the scenario where chain advances but the cache is empty.
|
||||
// It should see that there is nothing in the cache to migrate and just update the tail to the new finalized root.
|
||||
t.Run("empty cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
newBlock := util.NewBeaconBlock()
|
||||
newBlock.Block.Slot = primitives.Slot(32 + uint64(i))
|
||||
newBlock.Block.ParentRoot = finalizedBlockRoot[:]
|
||||
signedNewBlock, err := blocks.NewSignedBeaconBlock(newBlock)
|
||||
require.NoError(t, err)
|
||||
blockRoot, err := signedNewBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedNewBlock))
|
||||
finalizedBlockRoot = blockRoot
|
||||
}
|
||||
|
||||
err := s.MigrateToCold(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(s.cache.items))
|
||||
})
|
||||
|
||||
// This tests the scenario where chain advances but the CANONICAL cache is empty.
|
||||
// It should see that there is nothing in the canonical cache to migrate and just update the tail to the new finalized root AND delete anything non-canonical.
|
||||
t.Run("non canonical cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
newBlock := util.NewBeaconBlock()
|
||||
newBlock.Block.Slot = primitives.Slot(32 + uint64(i))
|
||||
newBlock.Block.ParentRoot = finalizedBlockRoot[:]
|
||||
signedNewBlock, err := blocks.NewSignedBeaconBlock(newBlock)
|
||||
require.NoError(t, err)
|
||||
blockRoot, err := signedNewBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedNewBlock))
|
||||
finalizedBlockRoot = blockRoot
|
||||
}
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 1, len(s.cache.items))
|
||||
|
||||
err := s.MigrateToCold(ctx, finalizedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
})
|
||||
|
||||
// db has update - cache has both canonical and non-canonical items.
|
||||
// should update the update in db and delete cache.
|
||||
t.Run("mixed cache - finality immediately after cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, finalizedBlockRoot))
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
// Save an update for this period in db
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
require.NoError(t, beaconDB.SaveLightClientUpdate(ctx, period, update))
|
||||
|
||||
lastBlockRoot := finalizedBlockRoot
|
||||
lastAttestedRoot := finalizedBlockRoot
|
||||
lastUpdate := update
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
lastBlockRoot, err = l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
update, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastUpdate = update
|
||||
}
|
||||
|
||||
require.Equal(t, 3, len(s.cache.items))
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 4, len(s.cache.items))
|
||||
|
||||
err = s.MigrateToCold(ctx, lastBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
u, err := beaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdate, u)
|
||||
})
|
||||
|
||||
// db has update - cache has both canonical and non-canonical items. finalized height is in the middle.
|
||||
// should update the update in db and delete items in cache before finalized slot.
|
||||
t.Run("mixed cache - finality middle of cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, finalizedBlockRoot))
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
// Save an update for this period in db
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
require.NoError(t, beaconDB.SaveLightClientUpdate(ctx, period, update))
|
||||
|
||||
lastBlockRoot := finalizedBlockRoot
|
||||
lastUpdate := update
|
||||
lastAttestedRoot := [32]byte{}
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
root, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastBlockRoot = root
|
||||
update, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastUpdate = update
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, 3, len(s.cache.items))
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 4, len(s.cache.items))
|
||||
|
||||
for i := 4; i < 7; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(0), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, 7, len(s.cache.items))
|
||||
|
||||
err = s.MigrateToCold(ctx, lastBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
u, err := beaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdate, u)
|
||||
})
|
||||
|
||||
// we have multiple periods in the cache before finalization happens. we expect all of them to be saved in db.
|
||||
t.Run("finality after multiple periods in cache", func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.EpochsPerSyncCommitteePeriod = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
finalizedBlockRoot, _ := saveInitialFinalizedCheckpointData(t, ctx, beaconDB)
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, finalizedBlockRoot))
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), beaconDB)
|
||||
require.NotNil(t, s)
|
||||
|
||||
// Save an update for this period1 in db
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
period1 := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
require.NoError(t, beaconDB.SaveLightClientUpdate(ctx, period1, update))
|
||||
|
||||
lastBlockRoot := finalizedBlockRoot
|
||||
lastUpdatePeriod1 := update
|
||||
lastAttestedRoot := [32]byte{}
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
root, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastBlockRoot = root
|
||||
lastUpdatePeriod1, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
period2 := period1
|
||||
var lastUpdatePeriod2 interfaces.LightClientUpdate
|
||||
for i := 1; i < 4; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)+33), util.WithSupermajority(uint64(i)), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
root, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
lastBlockRoot = root
|
||||
lastUpdatePeriod2, err = NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
period2 = slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedBlock.Block().Slot()))
|
||||
}
|
||||
|
||||
require.Equal(t, 6, len(s.cache.items))
|
||||
|
||||
// Add a non-canonical item to the cache
|
||||
cacheItem := &cacheItem{
|
||||
period: 0,
|
||||
slot: 33,
|
||||
}
|
||||
nonCanonicalBlockRoot := [32]byte{1, 2, 3, 4}
|
||||
s.cache.items[nonCanonicalBlockRoot] = cacheItem
|
||||
|
||||
require.Equal(t, 7, len(s.cache.items))
|
||||
|
||||
for i := 4; i < 7; i++ {
|
||||
l = util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(uint64(i)+33), util.WithSupermajority(0), util.WithAttestedParentRoot(lastAttestedRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, l.Block))
|
||||
require.NoError(t, s.SaveLCData(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock, [32]byte{1}))
|
||||
lastAttestedRoot, err = l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, 10, len(s.cache.items))
|
||||
|
||||
err = s.MigrateToCold(ctx, lastBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(s.cache.items), "Expected the non-canonical item in the cache to be deleted")
|
||||
u, err := beaconDB.LightClientUpdate(ctx, period2)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdatePeriod2, u)
|
||||
u, err = beaconDB.LightClientUpdate(ctx, period1)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, lastUpdatePeriod1, u)
|
||||
})
|
||||
}
|
||||
|
||||
func saveInitialFinalizedCheckpointData(t *testing.T, ctx context.Context, beaconDB db.Database) ([32]byte, interfaces.SignedBeaconBlock) {
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesisState, genesisRoot))
|
||||
|
||||
finalizedState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
finalizedBlock := util.NewBeaconBlock()
|
||||
finalizedBlock.Block.Slot = 32
|
||||
finalizedBlock.Block.ParentRoot = genesisRoot[:]
|
||||
signedFinalizedBlock, err := blocks.NewSignedBeaconBlock(finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalizedBlockHeader, err := signedFinalizedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, finalizedState.SetLatestBlockHeader(finalizedBlockHeader.Header))
|
||||
finalizedStateRoot, err := finalizedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
finalizedBlock.Block.StateRoot = finalizedStateRoot[:]
|
||||
signedFinalizedBlock, err = blocks.NewSignedBeaconBlock(finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
finalizedBlockRoot, err := signedFinalizedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ethpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: finalizedBlockRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedFinalizedBlock))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, finalizedState, finalizedBlockRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &cp))
|
||||
|
||||
return finalizedBlockRoot, signedFinalizedBlock
|
||||
}
|
||||
|
||||
func TestLightClientStore_LightClientUpdatesByRange(t *testing.T) {
|
||||
t.Run("no updates", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, finalizedBlock := saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 2, 5, finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(updates))
|
||||
})
|
||||
|
||||
t.Run("single update from db", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, finalizedBlock := saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 3, update))
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 3, finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("multiple updates from db", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, finalizedBlock := saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 3, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 4, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 5, update))
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 5, finalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[1], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[2], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("single update from cache", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cacheItem := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItem
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{3})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 3, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("multiple updates from cache", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cacheItemP3 := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItemP3
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP3,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 3, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[1], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[2], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
t.Run("multiple updates from both db and cache - no overlap", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 1, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 2, update))
|
||||
|
||||
cacheItemP3 := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItemP3
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP3,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 1, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 5, len(updates))
|
||||
for i := 0; i < 5; i++ {
|
||||
require.DeepEqual(t, update, updates[i], "Expected to find the update in the store")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("multiple updates from both db and cache - overlap", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l1 := util.NewTestLightClient(t, version.Altair)
|
||||
update1, err := NewLightClientUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update2, err := NewLightClientUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepNotEqual(t, update1, update2)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 1, update1))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 2, update1))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 3, update1))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 4, update1))
|
||||
|
||||
cacheItemP3 := &cacheItem{
|
||||
period: 3,
|
||||
bestUpdate: update2,
|
||||
}
|
||||
s.cache.items[[32]byte{3}] = cacheItemP3
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update2,
|
||||
parent: cacheItemP3,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update2,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 1, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 5, len(updates))
|
||||
// first two updates should be update1
|
||||
for i := 0; i < 2; i++ {
|
||||
require.DeepEqual(t, update1, updates[i], "Expected to find the update in the store")
|
||||
}
|
||||
// next three updates should be update2 - as cache overrides db
|
||||
for i := 2; i < 5; i++ {
|
||||
require.DeepEqual(t, update2, updates[i], "Expected to find the update in the store")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("first continuous range", func(t *testing.T) {
|
||||
d := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, _ = saveInitialFinalizedCheckpointData(t, ctx, d)
|
||||
|
||||
s := NewLightClientStore(&p2pTesting.FakeP2P{}, new(event.Feed), d)
|
||||
require.NotNil(t, s)
|
||||
|
||||
l1 := util.NewTestLightClient(t, version.Altair)
|
||||
update, err := NewLightClientUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 1, update))
|
||||
require.NoError(t, d.SaveLightClientUpdate(ctx, 2, update))
|
||||
|
||||
cacheItemP4 := &cacheItem{
|
||||
period: 4,
|
||||
bestUpdate: update,
|
||||
}
|
||||
s.cache.items[[32]byte{4}] = cacheItemP4
|
||||
|
||||
cacheItemP5 := &cacheItem{
|
||||
period: 5,
|
||||
bestUpdate: update,
|
||||
parent: cacheItemP4,
|
||||
}
|
||||
s.cache.items[[32]byte{5}] = cacheItemP5
|
||||
|
||||
_, headBlock := saveStateAndBlockWithParentRoot(t, ctx, d, [32]byte{5})
|
||||
|
||||
updates, err := s.LightClientUpdates(ctx, 1, 5, headBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(updates))
|
||||
require.DeepEqual(t, update, updates[0], "Expected to find the update in the store")
|
||||
require.DeepEqual(t, update, updates[1], "Expected to find the update in the store")
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func saveStateAndBlockWithParentRoot(t *testing.T, ctx context.Context, d db.Database, parentRoot [32]byte) ([32]byte, interfaces.SignedBeaconBlock) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
util.SaveBlock(t, ctx, d, blk)
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, d.SaveState(ctx, st, blkRoot))
|
||||
|
||||
signedFinalizedBlock, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
return blkRoot, signedFinalizedBlock
|
||||
}
|
||||
@@ -23,7 +23,6 @@ go_library(
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -32,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/monitor:go_default_library",
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
@@ -34,6 +33,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/monitor"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/node/registration"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
@@ -128,6 +128,7 @@ type BeaconNode struct {
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
ConfigOptions []params.Option
|
||||
syncToggle *regularsync.ServiceToggler
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -161,6 +162,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
|
||||
syncToggle: regularsync.NewServiceToggler(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -234,6 +236,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.BackfillOpts,
|
||||
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
|
||||
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
|
||||
backfill.WithServiceToggle(beacon.syncToggle),
|
||||
)
|
||||
|
||||
if err := registerServices(cliCtx, beacon, synchronizer, bfs); err != nil {
|
||||
@@ -253,10 +256,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
// their initialization.
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db, beacon.fetchP2P(), beacon.StateFeed())
|
||||
}
|
||||
|
||||
return beacon, nil
|
||||
}
|
||||
|
||||
@@ -349,6 +348,11 @@ func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *sta
|
||||
return errors.Wrap(err, "could not register P2P service")
|
||||
}
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
log.Debugln("Registering Light Client Store")
|
||||
beacon.registerLightClientStore()
|
||||
}
|
||||
|
||||
log.Debugln("Registering Backfill Service")
|
||||
if err := beacon.RegisterBackfillService(cliCtx, bfs); err != nil {
|
||||
return errors.Wrap(err, "could not register Back Fill service")
|
||||
@@ -370,7 +374,7 @@ func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *sta
|
||||
}
|
||||
|
||||
log.Debugln("Registering Initial Sync Service")
|
||||
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete); err != nil {
|
||||
if err := beacon.registerInitialSyncService(beacon.initialSyncComplete, beacon.syncToggle); err != nil {
|
||||
return errors.Wrap(err, "could not register initial sync service")
|
||||
}
|
||||
|
||||
@@ -621,35 +625,55 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseIPNetStrings(ipWhitelist []string) ([]*net.IPNet, error) {
|
||||
ipNets := make([]*net.IPNet, 0, len(ipWhitelist))
|
||||
for _, cidr := range ipWhitelist {
|
||||
_, ipNet, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("cidr", cidr).Error("Invalid CIDR in IP colocation whitelist")
|
||||
return nil, err
|
||||
}
|
||||
ipNets = append(ipNets, ipNet)
|
||||
log.WithField("cidr", cidr).Info("Added IP to colocation whitelist")
|
||||
}
|
||||
return ipNets, nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not register p2p service")
|
||||
}
|
||||
|
||||
colocationWhitelist, err := parseIPNetStrings(slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PColocationWhitelist.Name)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register p2p service: %w", err)
|
||||
}
|
||||
|
||||
svc, err := p2p.NewService(b.ctx, &p2p.Config{
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
DiscoveryDir: filepath.Join(dataDir, "discovery"),
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
DiscoveryDir: filepath.Join(dataDir, "discovery"),
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
IPColocationWhitelist: colocationWhitelist,
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -827,7 +851,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
func (b *BeaconNode) registerInitialSyncService(complete chan struct{}, toggler *regularsync.ServiceToggler) error {
|
||||
var chainService *blockchain.Service
|
||||
if err := b.services.FetchService(&chainService); err != nil {
|
||||
return err
|
||||
@@ -836,6 +860,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
opts := []initialsync.Option{
|
||||
initialsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
initialsync.WithSyncChecker(b.syncChecker),
|
||||
initialsync.WithServiceToggle(toggler),
|
||||
}
|
||||
is := initialsync.NewService(b.ctx, &initialsync.Config{
|
||||
DB: b.db,
|
||||
@@ -1119,6 +1144,11 @@ func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.
|
||||
return b.services.RegisterService(bf)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerLightClientStore() {
|
||||
lcs := lightclient.NewLightClientStore(b.fetchP2P(), b.StateFeed(), b.db)
|
||||
b.lcStore = lcs
|
||||
}
|
||||
|
||||
func hasNetworkFlag(cliCtx *cli.Context) bool {
|
||||
for _, flag := range features.NetworkFlags {
|
||||
for _, name := range flag.Names() {
|
||||
|
||||
@@ -74,7 +74,9 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("datadir", tmp, "node data directory")
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
set.Bool("enable-light-client", true, "enable light client")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
require.NoError(t, set.Set("enable-light-client", "true"))
|
||||
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
@@ -88,6 +90,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
|
||||
node, err := New(ctx, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, node.lcStore)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
go func() {
|
||||
node.Start()
|
||||
@@ -262,3 +265,46 @@ func TestCORS(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIPNetStrings(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
whitelist []string
|
||||
wantCount int
|
||||
wantError string
|
||||
}{
|
||||
{
|
||||
name: "empty whitelist",
|
||||
whitelist: []string{},
|
||||
wantCount: 0,
|
||||
},
|
||||
{
|
||||
name: "single IP whitelist",
|
||||
whitelist: []string{"192.168.1.1/32"},
|
||||
wantCount: 1,
|
||||
},
|
||||
{
|
||||
name: "multiple IPs whitelist",
|
||||
whitelist: []string{"192.168.1.0/24", "10.0.0.0/8", "34.42.19.170/32"},
|
||||
wantCount: 3,
|
||||
},
|
||||
{
|
||||
name: "invalid CIDR returns error",
|
||||
whitelist: []string{"192.168.1.0/24", "invalid-cidr", "10.0.0.0/8"},
|
||||
wantCount: 0,
|
||||
wantError: "invalid CIDR address",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := parseIPNetStrings(tt.whitelist)
|
||||
assert.Equal(t, tt.wantCount, len(result))
|
||||
if len(tt.wantError) == 0 {
|
||||
assert.Equal(t, nil, err)
|
||||
} else {
|
||||
assert.ErrorContains(t, tt.wantError, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ go_library(
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -95,6 +96,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/net/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
@@ -184,6 +186,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
@@ -308,19 +309,13 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
root [fieldparams.RootLength]byte,
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", dataColumnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
@@ -330,16 +325,15 @@ func (s *Service) BroadcastDataColumnSidecar(
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
@@ -385,7 +379,7 @@ func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
|
||||
"columnSubnet": columnSubnet,
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
|
||||
|
||||
@@ -711,13 +711,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
|
||||
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
sidecar := roSidecars[0].DataColumnSidecar
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, nil)
|
||||
require.ErrorContains(t, "attempted to broadcast nil", err)
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
verifiedRoSidecar := verifiedRoSidecars[0]
|
||||
|
||||
// Subscribe to the topic.
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
@@ -727,7 +722,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, sidecar)
|
||||
err = service.BroadcastDataColumnSidecar(subnet, verifiedRoSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
@@ -739,5 +734,5 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
|
||||
var result ethpb.DataColumnSidecar
|
||||
require.NoError(t, service.Encoding().DecodeGossip(msg.Data, &result))
|
||||
require.DeepEqual(t, &result, sidecar)
|
||||
require.DeepEqual(t, &result, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
@@ -10,34 +11,56 @@ import (
|
||||
|
||||
// This is the default queue size used if we have specified an invalid one.
|
||||
const defaultPubsubQueueSize = 600
|
||||
const (
|
||||
// defaultConnManagerPruneAbove sets the number of peers where ConnectionManager
|
||||
// will begin to internally prune peers. This value is set based on the internal
|
||||
// value of the libp2p DefaultConectionManager "high water mark". The "low water mark"
|
||||
// is the number of peers where ConnManager will stop pruning. This value is computed
|
||||
// by subtracting connManagerPruneAmount from the high water mark.
|
||||
defaultConnManagerPruneAbove = 192
|
||||
connManagerPruneAmount = 32
|
||||
)
|
||||
|
||||
// Config for the p2p service. These parameters are set from application level flags
|
||||
// to initialize the p2p service.
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
DisableLivenessCheck bool
|
||||
StaticPeers []string
|
||||
Discv5BootStrapAddrs []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
PingInterval time.Duration
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabaseWithSeqNum
|
||||
ClockWaiter startup.ClockWaiter
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
DisableLivenessCheck bool
|
||||
StaticPeers []string
|
||||
Discv5BootStrapAddrs []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
PingInterval time.Duration
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
IPColocationWhitelist []*net.IPNet
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabaseWithSeqNum
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// connManagerLowHigh picks the low and high water marks for the connection manager based
|
||||
// on the MaxPeers setting. The high water mark will be at least the default high water mark
|
||||
// (192), or MaxPeers + 32, whichever is higher. The low water mark is set to be 32 less than
|
||||
// the high water mark. This is done to ensure the ConnManager never prunes peers that the
|
||||
// node has connected to based on the MaxPeers setting.
|
||||
func (cfg *Config) connManagerLowHigh() (int, int) {
|
||||
maxPeersPlusMargin := int(cfg.MaxPeers) + connManagerPruneAmount
|
||||
high := max(maxPeersPlusMargin, defaultConnManagerPruneAbove)
|
||||
low := high - connManagerPruneAmount
|
||||
return low, high
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -75,7 +76,7 @@ var (
|
||||
tenEpochs = 10 * oneEpochDuration()
|
||||
)
|
||||
|
||||
func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds) {
|
||||
func peerScoringParams(colocationWhitelist []*net.IPNet) (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds) {
|
||||
thresholds := &pubsub.PeerScoreThresholds{
|
||||
GossipThreshold: -4000,
|
||||
PublishThreshold: -8000,
|
||||
@@ -83,6 +84,7 @@ func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds)
|
||||
AcceptPXThreshold: 100,
|
||||
OpportunisticGraftThreshold: 5,
|
||||
}
|
||||
|
||||
scoreParams := &pubsub.PeerScoreParams{
|
||||
Topics: make(map[string]*pubsub.TopicScoreParams),
|
||||
TopicScoreCap: 32.72,
|
||||
@@ -92,7 +94,7 @@ func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds)
|
||||
AppSpecificWeight: 1,
|
||||
IPColocationFactorWeight: -35.11,
|
||||
IPColocationFactorThreshold: 10,
|
||||
IPColocationFactorWhitelist: nil,
|
||||
IPColocationFactorWhitelist: colocationWhitelist,
|
||||
BehaviourPenaltyWeight: -15.92,
|
||||
BehaviourPenaltyThreshold: 6,
|
||||
BehaviourPenaltyDecay: scoreDecay(tenEpochs),
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -51,7 +52,7 @@ type (
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumnSidecar(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
BroadcastDataColumnSidecar(columnSubnet uint64, dataColumnSidecar blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
mplex "github.com/libp2p/go-libp2p-mplex"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
@@ -58,6 +59,22 @@ func MultiAddressBuilder(ip net.IP, tcpPort, quicPort uint) ([]ma.Multiaddr, err
|
||||
return multiaddrs, nil
|
||||
}
|
||||
|
||||
// setConnManagerOption sets the connection manager option for libp2p based on the
|
||||
// MaxPeers setting in the p2p config. If MaxPeers is set to a value higher than the
|
||||
// default high water mark, we create a new connection manager with a high water mark
|
||||
// that is higher than MaxPeers. Otherwise, we do not set a connection manager option
|
||||
// and allow the libp2p fallback defaults to be applied. Rationale below:
|
||||
// see: https://github.com/OffchainLabs/prysm/issues/15607
|
||||
func setConnManagerOption(cfg *Config, opts []libp2p.Option) ([]libp2p.Option, error) {
|
||||
low, high := cfg.connManagerLowHigh()
|
||||
cm, err := connmgr.NewConnManager(low, high)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new ConnManager")
|
||||
}
|
||||
opts = append(opts, libp2p.ConnectionManager(cm))
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
|
||||
cfg := s.cfg
|
||||
@@ -84,7 +101,6 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
|
||||
}
|
||||
|
||||
log.Infof("Running node with peer id of %s ", id.String())
|
||||
|
||||
options := []libp2p.Option{
|
||||
@@ -98,6 +114,10 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.Ping(false), // Disable Ping Service.
|
||||
}
|
||||
options, err = setConnManagerOption(s.cfg, options)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "set connection manager option")
|
||||
}
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
@@ -134,6 +135,59 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
|
||||
}
|
||||
|
||||
func TestSetConnManagerOption(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
maxPeers uint
|
||||
highWater int
|
||||
}{
|
||||
{
|
||||
name: "MaxPeers lower than default high water mark",
|
||||
maxPeers: defaultConnManagerPruneAbove - 1,
|
||||
highWater: defaultConnManagerPruneAbove,
|
||||
},
|
||||
{
|
||||
name: "MaxPeers equal to default high water mark",
|
||||
maxPeers: defaultConnManagerPruneAbove,
|
||||
highWater: defaultConnManagerPruneAbove,
|
||||
},
|
||||
{
|
||||
name: "MaxPeers higher than default high water mark",
|
||||
maxPeers: defaultConnManagerPruneAbove + 1,
|
||||
highWater: defaultConnManagerPruneAbove + 1 + connManagerPruneAmount,
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Config{MaxPeers: tt.maxPeers}
|
||||
opts, err := setConnManagerOption(cfg, []libp2p.Option{})
|
||||
assert.NoError(t, err)
|
||||
_, high := cfg.connManagerLowHigh()
|
||||
require.Equal(t, true, high > int(cfg.MaxPeers))
|
||||
|
||||
var libCfg libp2p.Config
|
||||
require.NoError(t, libCfg.Apply(append(opts, libp2p.FallbackDefaults)...))
|
||||
checkLimit(t, libCfg.ConnManager, high)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type connLimitGetter int
|
||||
|
||||
func (m connLimitGetter) GetConnLimit() int {
|
||||
return int(m)
|
||||
}
|
||||
|
||||
// CheckLimit will return an error if the result of calling lg.GetConnLimit is greater than
|
||||
// the high water mark. So by checking the result of calling it with a value equal to and lower
|
||||
// than the expected value, we can determine the value it holds internally.
|
||||
func checkLimit(t *testing.T, cm connmgr.ConnManager, expected int) {
|
||||
require.NoError(t, cm.CheckLimit(connLimitGetter(expected)), "Connection manager limit check failed")
|
||||
if err := cm.CheckLimit(connLimitGetter(expected - 1)); err == nil {
|
||||
t.Errorf("connection manager limit is below the expected value of %d", expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiAddressBuilderWithID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -25,6 +25,7 @@ package peers
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -87,11 +88,12 @@ const (
|
||||
|
||||
// Status is the structure holding the peer status information.
|
||||
type Status struct {
|
||||
ctx context.Context
|
||||
scorers *scorers.Service
|
||||
store *peerdata.Store
|
||||
ipTracker map[string]uint64
|
||||
rand *rand.Rand
|
||||
ctx context.Context
|
||||
scorers *scorers.Service
|
||||
store *peerdata.Store
|
||||
ipTracker map[string]uint64
|
||||
rand *rand.Rand
|
||||
ipColocationWhitelist []*net.IPNet
|
||||
}
|
||||
|
||||
// StatusConfig represents peer status service params.
|
||||
@@ -100,6 +102,8 @@ type StatusConfig struct {
|
||||
PeerLimit int
|
||||
// ScorerParams holds peer scorer configuration params.
|
||||
ScorerParams *scorers.Config
|
||||
// IPColocationWhitelist contains CIDR ranges that are exempt from IP colocation limits.
|
||||
IPColocationWhitelist []*net.IPNet
|
||||
}
|
||||
|
||||
// NewStatus creates a new status entity.
|
||||
@@ -107,11 +111,13 @@ func NewStatus(ctx context.Context, config *StatusConfig) *Status {
|
||||
store := peerdata.NewStore(ctx, &peerdata.StoreConfig{
|
||||
MaxPeers: maxLimitBuffer + config.PeerLimit,
|
||||
})
|
||||
|
||||
return &Status{
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
scorers: scorers.NewService(ctx, store, config.ScorerParams),
|
||||
ipTracker: map[string]uint64{},
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
scorers: scorers.NewService(ctx, store, config.ScorerParams),
|
||||
ipTracker: map[string]uint64{},
|
||||
ipColocationWhitelist: config.IPColocationWhitelist,
|
||||
// Random generator used to calculate dial backoff period.
|
||||
// It is ok to use deterministic generator, no need for true entropy.
|
||||
rand: rand.NewDeterministicGenerator(),
|
||||
@@ -1046,6 +1052,13 @@ func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
// Check if IP is in the whitelist
|
||||
for _, ipNet := range p.ipColocationWhitelist {
|
||||
if ipNet.Contains(ip) {
|
||||
// IP is whitelisted, skip colocation limit check
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf(
|
||||
"colocation limit exceeded: got %d - limit %d for peer %v with IP %v",
|
||||
val, CollocationLimit, pid, ip.String(),
|
||||
|
||||
@@ -145,7 +145,7 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
pubsub.WithPeerOutboundQueueSize(int(s.cfg.QueueSize)),
|
||||
pubsub.WithMaxMessageSize(int(MaxMessageSize())), // lint:ignore uintcast -- Max Message Size is a config value and is naturally bounded by networking limitations.
|
||||
pubsub.WithValidateQueueSize(int(s.cfg.QueueSize)),
|
||||
pubsub.WithPeerScore(peerScoringParams()),
|
||||
pubsub.WithPeerScore(peerScoringParams(s.cfg.IPColocationWhitelist)),
|
||||
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||
pubsub.WithRawTracer(gossipTracer{host: s.host}),
|
||||
|
||||
@@ -178,7 +178,8 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
s.pubsub = gs
|
||||
|
||||
s.peers = peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: int(s.cfg.MaxPeers),
|
||||
PeerLimit: int(s.cfg.MaxPeers),
|
||||
IPColocationWhitelist: s.cfg.IPColocationWhitelist,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -168,7 +169,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (*FakeP2P) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -232,7 +233,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
func (p *TestP2P) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
@@ -498,8 +499,39 @@ func (s *TestP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer .
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
// It first tries to get the custody group count from the peer's metadata,
|
||||
// then falls back to the ENR value if the metadata is not available, then
|
||||
// falls back to the minimum number of custody groups an honest node should custodiy
|
||||
// and serve samples from if ENR is not available.
|
||||
func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
// Try to get the custody group count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
// On error, default to the ENR value.
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// If the metadata is nil, default to the ENR value.
|
||||
if metadata == nil {
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// Get the custody subnets count from the metadata.
|
||||
custodyCount := metadata.CustodyGroupCount()
|
||||
|
||||
// If the custody count is null, default to the ENR value.
|
||||
if custodyCount == 0 {
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
// custodyGroupCountFromPeerENR retrieves the custody count from the peer's ENR.
|
||||
// If the ENR is not available, it defaults to the minimum number of custody groups
|
||||
// an honest node custodies and serves samples from.
|
||||
func (s *TestP2P) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of groups.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
@@ -509,7 +541,7 @@ func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody subnets count from the ENR.
|
||||
// Retrieve the custody group count from the ENR.
|
||||
custodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
return custodyRequirement
|
||||
|
||||
@@ -20,10 +20,10 @@ go_library(
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
|
||||
@@ -102,7 +102,7 @@ func (s *Service) endpoints(
|
||||
endpoints = append(endpoints, s.prysmValidatorEndpoints(stater, coreService)...)
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
endpoints = append(endpoints, s.lightClientEndpoints(blocker, stater)...)
|
||||
endpoints = append(endpoints, s.lightClientEndpoints()...)
|
||||
}
|
||||
|
||||
if enableDebug {
|
||||
@@ -194,6 +194,8 @@ func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
const namespace = "blob"
|
||||
return []endpoint{
|
||||
{
|
||||
// Deprecated: /eth/v1/beacon/blob_sidecars/{block_id} in favor of /eth/v1/beacon/blobs/{block_id}
|
||||
// the endpoint will continue to work post fulu for some time however
|
||||
template: "/eth/v1/beacon/blob_sidecars/{block_id}",
|
||||
name: namespace + ".Blobs",
|
||||
middleware: []middleware.Middleware{
|
||||
@@ -203,6 +205,16 @@ func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
handler: server.Blobs,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/blobs/{block_id}",
|
||||
name: namespace + ".GetBlobs",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlobs,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1034,9 +1046,10 @@ func (*Service) configEndpoints() []endpoint {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Stater) []endpoint {
|
||||
func (s *Service) lightClientEndpoints() []endpoint {
|
||||
server := &lightclient.Server{
|
||||
LCStore: s.cfg.LCStore,
|
||||
LCStore: s.cfg.LCStore,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
}
|
||||
|
||||
const namespace = "lightclient"
|
||||
@@ -1230,6 +1243,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
// Warning: no longer supported post Fulu fork
|
||||
template: "/prysm/v1/beacon/blobs",
|
||||
name: namespace + ".PublishBlobs",
|
||||
middleware: []middleware.Middleware{
|
||||
|
||||
@@ -70,6 +70,7 @@ func Test_endpoints(t *testing.T) {
|
||||
|
||||
blobRoutes := map[string][]string{
|
||||
"/eth/v1/beacon/blob_sidecars/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/blobs/{block_id}": {http.MethodGet},
|
||||
}
|
||||
|
||||
configRoutes := map[string][]string{
|
||||
|
||||
@@ -13,7 +13,9 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/options:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -33,6 +35,7 @@ go_test(
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
@@ -41,6 +44,7 @@ go_test(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
field_params "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/options"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -22,6 +24,8 @@ import (
|
||||
)
|
||||
|
||||
// Blobs is an HTTP handler for Beacon API getBlobs.
|
||||
// Deprecated: /eth/v1/beacon/blob_sidecars/{block_id} in favor of /eth/v1/beacon/blobs/{block_id}
|
||||
// the endpoint will continue to work post fulu for some time however
|
||||
func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.Blobs")
|
||||
defer span.End()
|
||||
@@ -34,7 +38,7 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
blockId := segments[len(segments)-1]
|
||||
|
||||
verifiedBlobs, rpcErr := s.Blocker.Blobs(ctx, blockId, indices)
|
||||
verifiedBlobs, rpcErr := s.Blocker.Blobs(ctx, blockId, options.WithIndices(indices))
|
||||
if rpcErr != nil {
|
||||
code := core.ErrorReasonToHTTP(rpcErr.Reason)
|
||||
switch code {
|
||||
@@ -127,6 +131,94 @@ loop:
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
// GetBlobs retrieves blobs for a given block id. ( this is the new handler that replaces func (s *Server) Blobs )
|
||||
func (s *Server) GetBlobs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlobs")
|
||||
defer span.End()
|
||||
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
blockId := segments[len(segments)-1]
|
||||
|
||||
var verifiedBlobs []*blocks.VerifiedROBlob
|
||||
var rpcErr *core.RpcError
|
||||
|
||||
// Check if versioned_hashes parameter is provided
|
||||
versionedHashesStr := r.URL.Query()["versioned_hashes"]
|
||||
versionedHashes := make([][]byte, len(versionedHashesStr))
|
||||
if len(versionedHashesStr) > 0 {
|
||||
for i, hashStr := range versionedHashesStr {
|
||||
hash, ok := shared.ValidateHex(w, fmt.Sprintf("versioned_hashes[%d]", i), hashStr, 32)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
versionedHashes[i] = hash
|
||||
}
|
||||
}
|
||||
verifiedBlobs, rpcErr = s.Blocker.Blobs(ctx, blockId, options.WithVersionedHashes(versionedHashes))
|
||||
if rpcErr != nil {
|
||||
code := core.ErrorReasonToHTTP(rpcErr.Reason)
|
||||
switch code {
|
||||
case http.StatusBadRequest:
|
||||
httputil.HandleError(w, "Bad Request: "+rpcErr.Err.Error(), code)
|
||||
return
|
||||
case http.StatusNotFound:
|
||||
httputil.HandleError(w, "Not found: "+rpcErr.Err.Error(), code)
|
||||
return
|
||||
case http.StatusInternalServerError:
|
||||
httputil.HandleError(w, "Internal server error: "+rpcErr.Err.Error(), code)
|
||||
return
|
||||
default:
|
||||
httputil.HandleError(w, rpcErr.Err.Error(), code)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not fetch block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if blk == nil {
|
||||
httputil.HandleError(w, "Block not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if httputil.RespondWithSsz(r) {
|
||||
sszLen := fieldparams.BlobSize
|
||||
sszData := make([]byte, len(verifiedBlobs)*sszLen)
|
||||
for i := range verifiedBlobs {
|
||||
copy(sszData[i*sszLen:(i+1)*sszLen], verifiedBlobs[i].Blob)
|
||||
}
|
||||
|
||||
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
|
||||
httputil.WriteSsz(w, sszData)
|
||||
return
|
||||
}
|
||||
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, blkRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
data := make([]string, len(verifiedBlobs))
|
||||
for i, v := range verifiedBlobs {
|
||||
data[i] = hexutil.Encode(v.Blob)
|
||||
}
|
||||
resp := &structs.GetBlobsResponse{
|
||||
Data: data,
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, blkRoot),
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
func buildSidecarsJsonResponse(verifiedBlobs []*blocks.VerifiedROBlob) []*structs.Sidecar {
|
||||
sidecars := make([]*structs.Sidecar, len(verifiedBlobs))
|
||||
for i, sc := range verifiedBlobs {
|
||||
@@ -147,13 +239,13 @@ func buildSidecarsJsonResponse(verifiedBlobs []*blocks.VerifiedROBlob) []*struct
|
||||
}
|
||||
|
||||
func buildSidecarsSSZResponse(verifiedBlobs []*blocks.VerifiedROBlob) ([]byte, error) {
|
||||
ssz := make([]byte, field_params.BlobSidecarSize*len(verifiedBlobs))
|
||||
ssz := make([]byte, fieldparams.BlobSidecarSize*len(verifiedBlobs))
|
||||
for i, sidecar := range verifiedBlobs {
|
||||
sszrep, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal sidecar ssz")
|
||||
}
|
||||
copy(ssz[i*field_params.BlobSidecarSize:(i+1)*field_params.BlobSidecarSize], sszrep)
|
||||
copy(ssz[i*fieldparams.BlobSidecarSize:(i+1)*fieldparams.BlobSidecarSize], sszrep)
|
||||
}
|
||||
return ssz, nil
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -278,7 +280,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, fmt.Sprintf("requested blob indices [%d] are invalid", overLimit)))
|
||||
})
|
||||
t.Run("outside retention period returns 200 w/ empty list ", func(t *testing.T) {
|
||||
t.Run("outside retention period returns 200 with what we have", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -296,7 +298,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 0, len(resp.Data))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, "deneb", resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
@@ -558,7 +560,573 @@ func Test_parseIndices(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBlobs(t *testing.T) {
|
||||
// Start the trusted setup for KZG operations (needed for data columns)
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
cfg.ElectraForkEpoch = 10
|
||||
cfg.FuluForkEpoch = 20
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 0},
|
||||
{Epoch: 1, MaxBlobsPerBlock: 6}, // Deneb
|
||||
{Epoch: 10, MaxBlobsPerBlock: 9}, // Electra
|
||||
{Epoch: 20, MaxBlobsPerBlock: 12}, // Fulu
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
for i := range testSidecars {
|
||||
require.NoError(t, bs.Save(testSidecars[i]))
|
||||
}
|
||||
blockRoot := blobs[0].BlockRoot()
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
Genesis: time.Now().Add(-time.Duration(uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().DenebForkEpoch)*params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
}
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
}
|
||||
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
u := "http://foo.example/genesis"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "blobs are not supported for Phase 0 fork", e.Message)
|
||||
})
|
||||
t.Run("head", func(t *testing.T) {
|
||||
u := "http://foo.example/head"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Root: blockRoot[:], Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
blob := resp.Data[0]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[0].Blob), blob)
|
||||
blob = resp.Data[1]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[1].Blob), blob)
|
||||
blob = resp.Data[2]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[2].Blob), blob)
|
||||
blob = resp.Data[3]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[3].Blob), blob)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
u := "http://foo.example/finalized"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("root", func(t *testing.T) {
|
||||
u := "http://foo.example/" + hexutil.Encode(blockRoot[:])
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Block: denebBlock},
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Block: denebBlock},
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot not found", func(t *testing.T) {
|
||||
u := "http://foo.example/122"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Block: denebBlock},
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
})
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t), // new ephemeral storage
|
||||
}
|
||||
|
||||
s.GetBlobs(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, len(resp.Data), 0)
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("outside retention period still returns 200 what we have in db ", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
moc := &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: moc,
|
||||
GenesisTimeFetcher: moc, // genesis time is set to 0 here, so it results in current epoch being extremely large
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("block without commitments returns 200 w/empty list ", func(t *testing.T) {
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 333, 0)
|
||||
commitments, err := denebBlock.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(commitments), 0)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
|
||||
u := "http://foo.example/333"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: denebBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 0, len(resp.Data))
|
||||
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot before Deneb fork", func(t *testing.T) {
|
||||
u := "http://foo.example/31"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "blobs are not supported before Deneb fork", e.Message)
|
||||
})
|
||||
t.Run("malformed block ID", func(t *testing.T) {
|
||||
u := "http://foo.example/foo"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "could not parse block ID"))
|
||||
})
|
||||
t.Run("ssz", func(t *testing.T) {
|
||||
u := "http://foo.example/finalized"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
assert.Equal(t, version.String(version.Deneb), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
require.Equal(t, fieldparams.BlobSize*4, len(writer.Body.Bytes())) // size of 4 sidecars
|
||||
// unmarshal all 4 blobs
|
||||
blbs := unmarshalBlobs(t, writer.Body.Bytes())
|
||||
require.Equal(t, 4, len(blbs))
|
||||
})
|
||||
t.Run("ssz multiple blobs", func(t *testing.T) {
|
||||
u := "http://foo.example/finalized"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
blbs := unmarshalBlobs(t, writer.Body.Bytes())
|
||||
require.Equal(t, 4, len(blbs))
|
||||
})
|
||||
|
||||
t.Run("versioned_hashes invalid hex", func(t *testing.T) {
|
||||
u := "http://foo.example/finalized?versioned_hashes=invalidhex,invalid2hex"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "versioned_hashes[0] is invalid", e.Message)
|
||||
assert.StringContains(t, "hex string without 0x prefix", e.Message)
|
||||
})
|
||||
|
||||
t.Run("versioned_hashes invalid length", func(t *testing.T) {
|
||||
// Using 16 bytes instead of 32
|
||||
shortHash := "0x1234567890abcdef1234567890abcdef"
|
||||
u := fmt.Sprintf("http://foo.example/finalized?versioned_hashes=%s", shortHash)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid versioned_hashes[0]:", e.Message)
|
||||
assert.StringContains(t, "is not length 32", e.Message)
|
||||
})
|
||||
|
||||
t.Run("versioned_hashes valid single hash", func(t *testing.T) {
|
||||
// Get the first blob's commitment and convert to versioned hash
|
||||
versionedHash := primitives.ConvertKzgCommitmentToVersionedHash(blobs[0].KzgCommitment)
|
||||
|
||||
u := fmt.Sprintf("http://foo.example/finalized?versioned_hashes=%s", hexutil.Encode(versionedHash[:]))
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 1, len(resp.Data)) // Should return only the requested blob
|
||||
blob := resp.Data[0]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[0].Blob), blob)
|
||||
})
|
||||
|
||||
t.Run("versioned_hashes multiple hashes", func(t *testing.T) {
|
||||
// Get commitments for blobs 1 and 3 and convert to versioned hashes
|
||||
versionedHash1 := primitives.ConvertKzgCommitmentToVersionedHash(blobs[1].KzgCommitment)
|
||||
versionedHash3 := primitives.ConvertKzgCommitmentToVersionedHash(blobs[3].KzgCommitment)
|
||||
|
||||
u := fmt.Sprintf("http://foo.example/finalized?versioned_hashes=%s&versioned_hashes=%s",
|
||||
hexutil.Encode(versionedHash1[:]), hexutil.Encode(versionedHash3[:]))
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data)) // Should return 2 requested blobs
|
||||
// Verify blobs are returned in KZG commitment order from the block (1, 3)
|
||||
// not in the requested order
|
||||
assert.Equal(t, hexutil.Encode(blobs[1].Blob), resp.Data[0])
|
||||
assert.Equal(t, hexutil.Encode(blobs[3].Blob), resp.Data[1])
|
||||
})
|
||||
|
||||
// Test for Electra fork
|
||||
t.Run("electra max blobs", func(t *testing.T) {
|
||||
electraBlock, electraBlobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 323, maxBlobsPerBlockByVersion(version.Electra))
|
||||
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
|
||||
electraBs := filesystem.NewEphemeralBlobStorage(t)
|
||||
electraSidecars := verification.FakeVerifySliceForTest(t, electraBlobs)
|
||||
for i := range electraSidecars {
|
||||
require.NoError(t, electraBs.Save(electraSidecars[i]))
|
||||
}
|
||||
electraBlockRoot := electraBlobs[0].BlockRoot()
|
||||
|
||||
u := "http://foo.example/323"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: electraBlockRoot[:]}, Block: electraBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: electraBs,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Electra), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, maxBlobsPerBlockByVersion(version.Electra), len(resp.Data))
|
||||
blob := resp.Data[0]
|
||||
require.NotNil(t, blob)
|
||||
assert.Equal(t, hexutil.Encode(electraBlobs[0].Blob), blob)
|
||||
})
|
||||
|
||||
// Test for Fulu fork with data columns
|
||||
t.Run("fulu with data columns", func(t *testing.T) {
|
||||
// Generate a Fulu block with data columns
|
||||
fuluForkSlot := primitives.Slot(20 * params.BeaconConfig().SlotsPerEpoch) // Fulu is at epoch 20
|
||||
fuluBlock, _, verifiedRoDataColumnSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithSlot(fuluForkSlot))
|
||||
require.NoError(t, db.SaveBlock(t.Context(), fuluBlock.ReadOnlySignedBeaconBlock))
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
// Store data columns
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
require.NoError(t, dataColumnStorage.Save(verifiedRoDataColumnSidecars))
|
||||
|
||||
u := fmt.Sprintf("http://foo.example/%d", fuluForkSlot)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
// Create an empty blob storage (won't be used but needs to be non-nil)
|
||||
_, emptyBlobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: fuluBlockRoot[:]}, Block: fuluBlock.ReadOnlySignedBeaconBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: emptyBlobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Fulu), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
|
||||
// Verify that we got blobs back (they were reconstructed from data columns)
|
||||
for i := range resp.Data {
|
||||
require.NotNil(t, resp.Data[i])
|
||||
}
|
||||
})
|
||||
|
||||
// Test for Fulu with versioned hashes and data columns
|
||||
t.Run("fulu versioned_hashes with data columns", func(t *testing.T) {
|
||||
// Generate a Fulu block with data columns
|
||||
fuluForkSlot2 := primitives.Slot(20*params.BeaconConfig().SlotsPerEpoch + 10) // Fulu is at epoch 20
|
||||
fuluBlock2, _, verifiedRoDataColumnSidecars2 := util.GenerateTestFuluBlockWithSidecars(t, 4, util.WithSlot(fuluForkSlot2))
|
||||
require.NoError(t, db.SaveBlock(t.Context(), fuluBlock2.ReadOnlySignedBeaconBlock))
|
||||
fuluBlockRoot2 := fuluBlock2.Root()
|
||||
|
||||
// Store data columns
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
require.NoError(t, dataColumnStorage.Save(verifiedRoDataColumnSidecars2))
|
||||
|
||||
// Get the commitments from the block to derive versioned hashes
|
||||
commitments, err := fuluBlock2.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, len(commitments) >= 3)
|
||||
|
||||
// Request specific blobs by versioned hashes in reverse order
|
||||
// We request commitments[2] and commitments[0], but they should be returned
|
||||
// in commitment order from the block (0, 2), not in the requested order
|
||||
versionedHash1 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[2])
|
||||
versionedHash2 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[0])
|
||||
|
||||
u := fmt.Sprintf("http://foo.example/%d?versioned_hashes=%s&versioned_hashes=%s", fuluForkSlot2,
|
||||
hexutil.Encode(versionedHash1[:]),
|
||||
hexutil.Encode(versionedHash2[:]))
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
// Create an empty blob storage (won't be used but needs to be non-nil)
|
||||
_, emptyBlobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: fuluBlockRoot2[:]}, Block: fuluBlock2.ReadOnlySignedBeaconBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: emptyBlobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
s.GetBlobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Fulu), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlobsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
// Blobs are returned in commitment order, regardless of request order
|
||||
})
|
||||
}
|
||||
|
||||
func unmarshalBlobs(t *testing.T, response []byte) [][]byte {
|
||||
require.Equal(t, 0, len(response)%fieldparams.BlobSize)
|
||||
if len(response) == fieldparams.BlobSize {
|
||||
return [][]byte{response}
|
||||
}
|
||||
blobs := make([][]byte, len(response)/fieldparams.BlobSize)
|
||||
for i := range blobs {
|
||||
blobs[i] = response[i*fieldparams.BlobSize : (i+1)*fieldparams.BlobSize]
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
|
||||
func maxBlobsPerBlockByVersion(v int) int {
|
||||
if v >= version.Fulu {
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockFulu
|
||||
}
|
||||
if v >= version.Electra {
|
||||
return params.BeaconConfig().DeprecatedMaxBlobsPerBlockElectra
|
||||
}
|
||||
|
||||
@@ -11,7 +11,8 @@ go_library(
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -32,9 +33,11 @@ go_test(
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -90,10 +90,21 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
return
|
||||
}
|
||||
|
||||
if startPeriod*uint64(config.EpochsPerSyncCommitteePeriod) < uint64(config.AltairForkEpoch) {
|
||||
httputil.HandleError(w, "Invalid 'start_period': before Altair fork", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
endPeriod := startPeriod + count - 1
|
||||
|
||||
headBlock, err := s.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// get updates
|
||||
updates, err := s.LCStore.LightClientUpdates(ctx, startPeriod, endPeriod)
|
||||
updates, err := s.LCStore.LightClientUpdates(ctx, startPeriod, endPeriod, headBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get light client updates from DB: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
||||
@@ -2,6 +2,7 @@ package lightclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
@@ -12,9 +13,11 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
blockchainTest "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
dbtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
p2ptesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -55,7 +58,8 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed))
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
@@ -99,7 +103,8 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed))
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
@@ -142,8 +147,9 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("no bootstrap found", func(t *testing.T) {
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com/", nil)
|
||||
request.SetPathValue("block_root", hexutil.Encode([]byte{0x00, 0x01, 0x02}))
|
||||
@@ -185,13 +191,24 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
updates = append(updates, update)
|
||||
}
|
||||
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updatePeriod := startPeriod
|
||||
for _, update := range updates {
|
||||
err := s.LCStore.SaveLightClientUpdate(ctx, updatePeriod, update)
|
||||
err := db.SaveLightClientUpdate(ctx, updatePeriod, update)
|
||||
require.NoError(t, err)
|
||||
updatePeriod++
|
||||
}
|
||||
@@ -326,20 +343,30 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
secondForkSlot := primitives.Slot(params.BeaconConfig().VersionToForkEpochMap()[testVersion+1] * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 2)
|
||||
|
||||
updatePeriod := firstForkSlot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
startPeriod := updatePeriod
|
||||
|
||||
var err error
|
||||
updates[0], err = createUpdate(t, testVersion)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[0])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod = secondForkSlot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
@@ -347,7 +374,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
updates[1], err = createUpdate(t, testVersion+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[1])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[1])
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("json", func(t *testing.T) {
|
||||
@@ -446,19 +473,30 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
|
||||
updatePeriod := slot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
var err error
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
updates[i], err = createUpdate(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod++
|
||||
@@ -493,20 +531,30 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
slot := primitives.Slot(config.AltairForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
|
||||
updatePeriod := slot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
updates[i], err = createUpdate(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod++
|
||||
@@ -536,10 +584,16 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("start period before altair", func(t *testing.T) {
|
||||
config.AltairForkEpoch = 1
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
}
|
||||
|
||||
startPeriod := 0
|
||||
url := fmt.Sprintf("http://foo.com/?count=128&start_period=%d", startPeriod)
|
||||
request := httptest.NewRequest("GET", url, nil)
|
||||
@@ -548,11 +602,10 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
s.GetLightClientUpdatesByRange(writer, request)
|
||||
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
var resp structs.LightClientUpdatesByRangeResponse
|
||||
err := json.Unmarshal(writer.Body.Bytes(), &resp.Updates)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(resp.Updates))
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
|
||||
config.AltairForkEpoch = 0
|
||||
params.OverrideBeaconConfig(config)
|
||||
})
|
||||
|
||||
t.Run("missing updates", func(t *testing.T) {
|
||||
@@ -560,15 +613,25 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
t.Run("missing update in the middle", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
|
||||
updatePeriod := slot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
if i == 1 { // skip this update
|
||||
updatePeriod++
|
||||
@@ -577,7 +640,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
updates[i], err = createUpdate(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod++
|
||||
@@ -604,15 +667,25 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
t.Run("missing update at the beginning", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), db)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
LCStore: lcStore,
|
||||
HeadFetcher: &blockchainTest.ChainService{
|
||||
Block: signedBlk,
|
||||
},
|
||||
}
|
||||
|
||||
saveHead(t, ctx, db)
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
|
||||
updatePeriod := slot.Div(uint64(config.EpochsPerSyncCommitteePeriod)).Div(uint64(config.SlotsPerEpoch))
|
||||
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
if i == 0 { // skip this update
|
||||
updatePeriod++
|
||||
@@ -622,7 +695,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
updates[i], err = createUpdate(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.LCStore.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
err = db.SaveLightClientUpdate(ctx, uint64(updatePeriod), updates[i])
|
||||
require.NoError(t, err)
|
||||
|
||||
updatePeriod++
|
||||
@@ -665,7 +738,12 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
s.LCStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
@@ -690,7 +768,12 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
s.LCStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
@@ -729,7 +812,11 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
t.Run("no update", func(t *testing.T) {
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -745,7 +832,12 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
s.LCStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
@@ -769,7 +861,12 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
lcStore := lightclient.NewLightClientStore(&p2ptesting.FakeP2P{}, new(event.Feed), dbtesting.SetupDB(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
LCStore: lcStore,
|
||||
}
|
||||
s.LCStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
@@ -984,3 +1081,14 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
|
||||
|
||||
return update, nil
|
||||
}
|
||||
|
||||
func saveHead(t *testing.T, ctx context.Context, d db.Database) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, d, blk)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, d.SaveState(ctx, st, blkRoot))
|
||||
require.NoError(t, d.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package lightclient
|
||||
|
||||
import (
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
LCStore *lightClient.Store
|
||||
LCStore *lightClient.Store
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/options:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
@@ -44,6 +45,7 @@ go_test(
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/options:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/options"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -57,7 +58,7 @@ func (e BlockIdParseError) Error() string {
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
Blobs(ctx context.Context, id string, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
}
|
||||
|
||||
// BeaconDbBlocker is an implementation of Blocker. It retrieves blocks from the beacon chain database.
|
||||
@@ -147,7 +148,9 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// Blobs returns the blobs for a given block id identifier and blob indices. The identifier can be one of:
|
||||
// Blobs returns the fetched blobs for a given block ID with configurable options.
|
||||
// Options can specify either blob indices or versioned hashes for retrieval.
|
||||
// The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
// - "finalized"
|
||||
@@ -158,11 +161,17 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
//
|
||||
// cases:
|
||||
// - no block, 404
|
||||
// - block exists, no commitment, 200 w/ empty list
|
||||
// - block exists, has commitments, inside retention period (greater of protocol- or user-specified) serve then w/ 200 unless we hit an error reading them.
|
||||
// we are technically not supposed to import a block to forkchoice unless we have the blobs, so the nuance here is if we can't find the file and we are inside the protocol-defined retention period, then it's actually a 500.
|
||||
// - block exists, has commitments, outside retention period (greater of protocol- or user-specified) - ie just like block exists, no commitment
|
||||
func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
// Apply options
|
||||
cfg := &options.BlobsConfig{}
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
|
||||
// Resolve block ID to root
|
||||
var rootSlice []byte
|
||||
switch id {
|
||||
case "genesis":
|
||||
@@ -239,11 +248,6 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) (
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("block %#x not found in db", rootSlice), Reason: core.NotFound}
|
||||
}
|
||||
|
||||
// If block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(roSignedBlock.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
roBlock := roSignedBlock.Block()
|
||||
|
||||
commitments, err := roBlock.Body().BlobKzgCommitments()
|
||||
@@ -266,6 +270,46 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) (
|
||||
}
|
||||
}
|
||||
|
||||
// Convert versioned hashes to indices if provided
|
||||
indices := cfg.Indices
|
||||
if len(cfg.VersionedHashes) > 0 {
|
||||
// Build a map of requested versioned hashes for fast lookup and tracking
|
||||
requestedHashes := make(map[string]bool)
|
||||
for _, versionedHash := range cfg.VersionedHashes {
|
||||
requestedHashes[string(versionedHash)] = true
|
||||
}
|
||||
|
||||
// Create indices array and track which hashes we found
|
||||
indices = make([]int, 0, len(cfg.VersionedHashes))
|
||||
foundHashes := make(map[string]bool)
|
||||
|
||||
for i, commitment := range commitments {
|
||||
versionedHash := primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
hashStr := string(versionedHash[:])
|
||||
if requestedHashes[hashStr] {
|
||||
indices = append(indices, i)
|
||||
foundHashes[hashStr] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if all requested hashes were found
|
||||
if len(indices) != len(cfg.VersionedHashes) {
|
||||
// Collect missing hashes
|
||||
missingHashes := make([]string, 0, len(cfg.VersionedHashes)-len(indices))
|
||||
for _, requestedHash := range cfg.VersionedHashes {
|
||||
if !foundHashes[string(requestedHash)] {
|
||||
missingHashes = append(missingHashes, hexutil.Encode(requestedHash))
|
||||
}
|
||||
}
|
||||
|
||||
// Create detailed error message
|
||||
errMsg := fmt.Sprintf("versioned hash(es) not found in block (requested %d hashes, found %d, missing: %v)",
|
||||
len(cfg.VersionedHashes), len(indices), missingHashes)
|
||||
|
||||
return nil, &core.RpcError{Err: errors.New(errMsg), Reason: core.NotFound}
|
||||
}
|
||||
}
|
||||
|
||||
if roBlock.Slot() >= fuluForkSlot {
|
||||
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, root)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,11 +14,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/options"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -220,16 +222,13 @@ func TestGetBlob(t *testing.T) {
|
||||
cellsAndProofsList = append(cellsAndProofsList, cellsAndProogs)
|
||||
}
|
||||
|
||||
dataColumnSidecarPb, err := peerdas.DataColumnSidecars(fuluBlock, cellsAndProofsList)
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecarPb))
|
||||
for _, sidecarPb := range dataColumnSidecarPb {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecarPb, fuluBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
|
||||
for _, roDataColumnSidecar := range roDataColumnSidecars {
|
||||
verifiedRoDataColumnSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumnSidecar)
|
||||
}
|
||||
|
||||
err = db.SaveBlock(t.Context(), fuluBlock)
|
||||
@@ -239,7 +238,7 @@ func TestGetBlob(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{}
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis", nil)
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis")
|
||||
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
require.StringContains(t, "blobs are not supported for Phase 0 fork", rpcErr.Err.Error())
|
||||
})
|
||||
@@ -256,7 +255,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "head", nil)
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "head")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(retrievedVerifiedSidecars))
|
||||
|
||||
@@ -285,7 +284,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "finalized", nil)
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "finalized")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedSidecars))
|
||||
})
|
||||
@@ -302,7 +301,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "justified", nil)
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "justified")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedSidecars))
|
||||
})
|
||||
@@ -318,7 +317,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(denebBlockRoot[:]), nil)
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(denebBlockRoot[:]))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
})
|
||||
@@ -334,7 +333,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
})
|
||||
@@ -353,7 +352,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", []int{index})
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{index}))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 1, len(retrievedVerifiedSidecars))
|
||||
|
||||
@@ -379,7 +378,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
@@ -397,7 +396,7 @@ func TestGetBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
noBlobIndex := len(storedBlobSidecars) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, noBlobIndex})
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, noBlobIndex}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
@@ -413,7 +412,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, math.MaxInt})
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, math.MaxInt}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
@@ -434,7 +433,7 @@ func TestGetBlob(t *testing.T) {
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
_, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
@@ -455,7 +454,7 @@ func TestGetBlob(t *testing.T) {
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
|
||||
|
||||
@@ -482,7 +481,7 @@ func TestGetBlob(t *testing.T) {
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
|
||||
|
||||
@@ -493,3 +492,164 @@ func TestGetBlob(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlobs_CommitmentOrdering(t *testing.T) {
|
||||
// Set up Fulu fork configuration
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
cfg.FuluForkEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
// Start the trusted setup for KZG
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Fulu/Electra block with multiple blob commitments
|
||||
fuluForkSlot := primitives.Slot(cfg.FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
parent := [32]byte{}
|
||||
fuluBlock, fuluBlobs := util.GenerateTestElectraBlockWithSidecar(t, parent, fuluForkSlot, 3)
|
||||
|
||||
// Save the block
|
||||
err = beaconDB.SaveBlock(ctx, fuluBlock)
|
||||
require.NoError(t, err)
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
// Get the commitments from the generated block
|
||||
commitments, err := fuluBlock.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(commitments))
|
||||
|
||||
// Convert blob sidecars to data column sidecars for Fulu
|
||||
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobs))
|
||||
for _, blob := range fuluBlobs {
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob.Blob)
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
cellsAndProofsList = append(cellsAndProofsList, cellsAndProofs)
|
||||
}
|
||||
|
||||
dataColumnSidecarPb, err := peerdas.DataColumnSidecars(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecarPb))
|
||||
for _, roDataColumn := range dataColumnSidecarPb {
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
// Set up data column storage and save data columns
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up the blocker
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: fuluBlockRoot[:],
|
||||
},
|
||||
}
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
// Compute versioned hashes for commitments in their block order
|
||||
hash0 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[0])
|
||||
hash1 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[1])
|
||||
hash2 := primitives.ConvertKzgCommitmentToVersionedHash(commitments[2])
|
||||
|
||||
t.Run("blobs returned in commitment order regardless of request order", func(t *testing.T) {
|
||||
// Request versioned hashes in reverse order: 2, 1, 0
|
||||
requestedHashes := [][]byte{hash2[:], hash1[:], hash0[:]}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "finalized", options.WithVersionedHashes(requestedHashes))
|
||||
if rpcErr != nil {
|
||||
t.Errorf("RPC Error: %v (reason: %v)", rpcErr.Err, rpcErr.Reason)
|
||||
return
|
||||
}
|
||||
require.Equal(t, 3, len(verifiedBlobs))
|
||||
|
||||
// Verify blobs are returned in commitment order from the block (0, 1, 2)
|
||||
// In Fulu, blobs are reconstructed from data columns
|
||||
assert.Equal(t, uint64(0), verifiedBlobs[0].Index) // First commitment in block
|
||||
assert.Equal(t, uint64(1), verifiedBlobs[1].Index) // Second commitment in block
|
||||
assert.Equal(t, uint64(2), verifiedBlobs[2].Index) // Third commitment in block
|
||||
|
||||
// Verify the blob content matches what we expect
|
||||
for i, verifiedBlob := range verifiedBlobs {
|
||||
require.NotNil(t, verifiedBlob.BlobSidecar)
|
||||
require.DeepEqual(t, fuluBlobs[i].Blob, verifiedBlob.Blob)
|
||||
require.DeepEqual(t, fuluBlobs[i].KzgCommitment, verifiedBlob.KzgCommitment)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("subset of blobs maintains commitment order", func(t *testing.T) {
|
||||
// Request hashes for indices 1 and 0 (out of order)
|
||||
requestedHashes := [][]byte{hash1[:], hash0[:]}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "finalized", options.WithVersionedHashes(requestedHashes))
|
||||
if rpcErr != nil {
|
||||
t.Errorf("RPC Error: %v (reason: %v)", rpcErr.Err, rpcErr.Reason)
|
||||
return
|
||||
}
|
||||
require.Equal(t, 2, len(verifiedBlobs))
|
||||
|
||||
// Verify blobs are returned in commitment order from the block
|
||||
assert.Equal(t, uint64(0), verifiedBlobs[0].Index) // First commitment in block
|
||||
assert.Equal(t, uint64(1), verifiedBlobs[1].Index) // Second commitment in block
|
||||
|
||||
// Verify the blob content matches what we expect
|
||||
require.DeepEqual(t, fuluBlobs[0].Blob, verifiedBlobs[0].Blob)
|
||||
require.DeepEqual(t, fuluBlobs[1].Blob, verifiedBlobs[1].Blob)
|
||||
})
|
||||
|
||||
t.Run("request non-existent hash", func(t *testing.T) {
|
||||
// Create a fake versioned hash
|
||||
fakeHash := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
fakeHash[i] = 0xFF
|
||||
}
|
||||
|
||||
// Request only the fake hash
|
||||
requestedHashes := [][]byte{fakeHash}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "finalized", options.WithVersionedHashes(requestedHashes))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
require.StringContains(t, "versioned hash(es) not found in block", rpcErr.Err.Error())
|
||||
require.StringContains(t, "requested 1 hashes, found 0", rpcErr.Err.Error())
|
||||
require.StringContains(t, "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("request multiple non-existent hashes", func(t *testing.T) {
|
||||
// Create two fake versioned hashes
|
||||
fakeHash1 := make([]byte, 32)
|
||||
fakeHash2 := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
fakeHash1[i] = 0xAA
|
||||
fakeHash2[i] = 0xBB
|
||||
}
|
||||
|
||||
// Request valid hash with two fake hashes
|
||||
requestedHashes := [][]byte{fakeHash1, hash0[:], fakeHash2}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, "finalized", options.WithVersionedHashes(requestedHashes))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
require.StringContains(t, "versioned hash(es) not found in block", rpcErr.Err.Error())
|
||||
require.StringContains(t, "requested 3 hashes, found 1", rpcErr.Err.Error())
|
||||
// Check that both missing hashes are reported
|
||||
require.StringContains(t, "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", rpcErr.Err.Error())
|
||||
require.StringContains(t, "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", rpcErr.Err.Error())
|
||||
})
|
||||
}
|
||||
|
||||
8
beacon-chain/rpc/options/BUILD.bazel
Normal file
8
beacon-chain/rpc/options/BUILD.bazel
Normal file
@@ -0,0 +1,8 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["options.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/options",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
24
beacon-chain/rpc/options/options.go
Normal file
24
beacon-chain/rpc/options/options.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package options
|
||||
|
||||
// BlobsOption is a functional option for configuring blob retrieval
|
||||
type BlobsOption func(*BlobsConfig)
|
||||
|
||||
// BlobsConfig holds configuration for blob retrieval
|
||||
type BlobsConfig struct {
|
||||
Indices []int
|
||||
VersionedHashes [][]byte
|
||||
}
|
||||
|
||||
// WithIndices specifies blob indices to retrieve
|
||||
func WithIndices(indices []int) BlobsOption {
|
||||
return func(c *BlobsConfig) {
|
||||
c.Indices = indices
|
||||
}
|
||||
}
|
||||
|
||||
// WithVersionedHashes specifies versioned hashes to retrieve blobs by
|
||||
func WithVersionedHashes(hashes [][]byte) BlobsOption {
|
||||
return func(c *BlobsConfig) {
|
||||
c.VersionedHashes = hashes
|
||||
}
|
||||
}
|
||||
@@ -186,6 +186,7 @@ func (s *Server) GetChainHead(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// Warning: no longer supported post Fulu blobs
|
||||
func (s *Server) PublishBlobs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.PublishBlobs")
|
||||
defer span.End()
|
||||
@@ -215,6 +216,15 @@ func (s *Server) PublishBlobs(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not decode blob sidecar: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
scEpoch := slots.ToEpoch(sc.SignedBlockHeader.Header.Slot)
|
||||
if scEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
httputil.HandleError(w, "Blob sidecars not supported for pre deneb", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if scEpoch > params.BeaconConfig().FuluForkEpoch {
|
||||
httputil.HandleError(w, "Blob sidecars not supported for post fulu blobs", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sc, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
|
||||
@@ -1017,6 +1017,11 @@ func TestPublishBlobs_BadBlockRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPublishBlobs(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 0 // Set Deneb fork to epoch 0 so slot 0 is valid
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server := &Server{
|
||||
BlobReceiver: &chainMock.ChainService{},
|
||||
Broadcaster: &mockp2p.MockBroadcaster{},
|
||||
@@ -1032,3 +1037,94 @@ func TestPublishBlobs(t *testing.T) {
|
||||
assert.Equal(t, len(server.BlobReceiver.(*chainMock.ChainService).Blobs), 1)
|
||||
assert.Equal(t, server.Broadcaster.(*mockp2p.MockBroadcaster).BroadcastCalled.Load(), true)
|
||||
}
|
||||
|
||||
func TestPublishBlobs_PreDeneb(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 10 // Set Deneb fork to epoch 10, so slot 0 is pre-Deneb
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server := &Server{
|
||||
BlobReceiver: &chainMock.ChainService{},
|
||||
Broadcaster: &mockp2p.MockBroadcaster{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.PublishBlobsRequest)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlobs(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, "Blob sidecars not supported for pre deneb", writer.Body.String())
|
||||
|
||||
assert.Equal(t, len(server.BlobReceiver.(*chainMock.ChainService).Blobs), 0)
|
||||
assert.Equal(t, server.Broadcaster.(*mockp2p.MockBroadcaster).BroadcastCalled.Load(), false)
|
||||
}
|
||||
|
||||
func TestPublishBlobs_PostFulu(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 0
|
||||
cfg.FuluForkEpoch = 0 // Set Fulu fork to epoch 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server := &Server{
|
||||
BlobReceiver: &chainMock.ChainService{},
|
||||
Broadcaster: &mockp2p.MockBroadcaster{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
// Create a custom request with a slot in epoch 1 (which is > FuluForkEpoch of 0)
|
||||
postFuluRequest := fmt.Sprintf(`{
|
||||
"block_root" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"blob_sidecars" : {
|
||||
"sidecars" : [
|
||||
{
|
||||
"blob" : "%s",
|
||||
"index" : "0",
|
||||
"kzg_commitment" : "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"kzg_commitment_inclusion_proof" : [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
],
|
||||
"kzg_proof" : "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"signed_block_header" : {
|
||||
"message" : {
|
||||
"body_root" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parent_root" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"proposer_index" : "0",
|
||||
"slot" : "33",
|
||||
"state_root" : "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"signature" : "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`, rpctesting.Blob)
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(postFuluRequest)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlobs(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, "Blob sidecars not supported for post fulu blobs", writer.Body.String())
|
||||
|
||||
assert.Equal(t, len(server.BlobReceiver.(*chainMock.ChainService).Blobs), 0)
|
||||
assert.Equal(t, server.Broadcaster.(*mockp2p.MockBroadcaster).BroadcastCalled.Load(), false)
|
||||
}
|
||||
|
||||
@@ -281,7 +281,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
@@ -309,10 +309,11 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlockWithRoot(block, root)
|
||||
if block.IsBlinded() {
|
||||
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(block, req)
|
||||
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(rob, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
@@ -348,10 +349,10 @@ func (vs *Server) broadcastAndReceiveSidecars(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
root [fieldparams.RootLength]byte,
|
||||
blobSidecars []*ethpb.BlobSidecar,
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
dataColumnSidecars []blocks.RODataColumn,
|
||||
) error {
|
||||
if block.Version() >= version.Fulu {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root); err != nil {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, root); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive data columns")
|
||||
}
|
||||
return nil
|
||||
@@ -398,21 +399,28 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
}
|
||||
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
block blocks.ROBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if block.Version() >= version.Fulu {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
// Compute cells and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
return nil, nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
// Construct data column sidecars from the signed block and cells and proofs.
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(block))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "data column sidcars")
|
||||
}
|
||||
|
||||
return nil, roDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
@@ -468,26 +476,21 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
sidecars []*ethpb.DataColumnSidecar,
|
||||
roSidecars []blocks.RODataColumn,
|
||||
root [fieldparams.RootLength]byte,
|
||||
) error {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, sidecar := range sidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecar, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
for _, roSidecar := range roSidecars {
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
eg.Go(func() error {
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(roSidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumnSidecar(root, subnet, sidecar); err != nil {
|
||||
if err := vs.P2P.BroadcastDataColumnSidecar(subnet, verifiedRODataColumn); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
// BuildBlobSidecars given a block, builds the blob sidecars for the block.
|
||||
func BuildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
|
||||
func BuildBlobSidecars(blk interfaces.ReadOnlySignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil, nil // No blobs before deneb.
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
opfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
|
||||
@@ -16,6 +16,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/options:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/options"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -36,6 +37,6 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
// Blobs --
|
||||
func (*MockBlocker) Blobs(_ context.Context, _ string, _ []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
func (*MockBlocker) Blobs(_ context.Context, _ string, _ ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
return nil, &core.RpcError{}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"options.go",
|
||||
@@ -49,6 +48,7 @@ go_library(
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
"subscription_topic_handler.go",
|
||||
"toggle.go",
|
||||
"validate_aggregate_proof.go",
|
||||
"validate_attester_slashing.go",
|
||||
"validate_beacon_attestation.go",
|
||||
@@ -63,11 +63,7 @@ go_library(
|
||||
"validate_voluntary_exit.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/sync",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd:__subpackages__",
|
||||
"//testing:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async:go_default_library",
|
||||
"//async/abool:go_default_library",
|
||||
@@ -81,7 +77,6 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -90,6 +85,7 @@ go_library(
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -154,12 +150,13 @@ go_library(
|
||||
"@com_github_trailofbits_go_mutexasserts//:go_default_library",
|
||||
"@io_opentelemetry_go_otel_trace//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
"@org_golang_x_sync//singleflight:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"batch_verifier_test.go",
|
||||
"blobs_test.go",
|
||||
@@ -171,6 +168,7 @@ go_test(
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
"kzg_batch_verifier_test.go",
|
||||
"pending_attestations_queue_test.go",
|
||||
"pending_blocks_queue_test.go",
|
||||
"rate_limiter_test.go",
|
||||
@@ -197,6 +195,7 @@ go_test(
|
||||
"subscription_topic_handler_test.go",
|
||||
"sync_fuzz_test.go",
|
||||
"sync_test.go",
|
||||
"toggle_test.go",
|
||||
"validate_aggregate_proof_test.go",
|
||||
"validate_attester_slashing_test.go",
|
||||
"validate_beacon_attestation_test.go",
|
||||
@@ -211,7 +210,6 @@ go_test(
|
||||
"validate_voluntary_exit_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 4,
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
@@ -223,7 +221,6 @@ go_test(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -235,6 +232,7 @@ go_test(
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/light-client:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
|
||||
@@ -45,11 +45,12 @@ type p2pBatchWorkerPool struct {
|
||||
endSeq []batch
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
toggle *sync.ServiceToggler
|
||||
}
|
||||
|
||||
var _ batchWorkerPool = &p2pBatchWorkerPool{}
|
||||
|
||||
func newP2PBatchWorkerPool(p p2p.P2P, maxBatches int) *p2pBatchWorkerPool {
|
||||
func newP2PBatchWorkerPool(p p2p.P2P, maxBatches int, tg *sync.ServiceToggler) *p2pBatchWorkerPool {
|
||||
nw := defaultNewWorker(p)
|
||||
return &p2pBatchWorkerPool{
|
||||
newWorker: nw,
|
||||
@@ -59,6 +60,7 @@ func newP2PBatchWorkerPool(p p2p.P2P, maxBatches int) *p2pBatchWorkerPool {
|
||||
fromWorkers: make(chan batch),
|
||||
maxBatches: maxBatches,
|
||||
shutdownErr: make(chan error),
|
||||
toggle: tg,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,6 +117,7 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
// This ticker exists to periodically break out of the channel select
|
||||
// to retry failed assignments.
|
||||
case b := <-p.fromWorkers:
|
||||
p.toggle.Release(sync.ToggleGroupBackfill)
|
||||
pid := b.busy
|
||||
busy[pid] = false
|
||||
if b.state == batchBlobSync {
|
||||
@@ -143,6 +146,9 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
return
|
||||
}
|
||||
for _, pid := range assigned {
|
||||
if err := p.toggle.Acquire(p.ctx, sync.ToggleGroupBackfill); err != nil {
|
||||
p.shutdown(err)
|
||||
}
|
||||
if err := todo[0].waitUntilReady(p.ctx); err != nil {
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
p.shutdown(p.ctx.Err())
|
||||
|
||||
@@ -42,7 +42,8 @@ func TestPoolDetectAllEnded(t *testing.T) {
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
ctx := t.Context()
|
||||
ma := &mockAssigner{}
|
||||
pool := newP2PBatchWorkerPool(p2p, nw)
|
||||
tg := sync.NewServiceToggler()
|
||||
pool := newP2PBatchWorkerPool(p2p, nw, tg)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
keys, err := st.PublicKeys()
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
enabled bool // service is disabled by default while feature is experimental
|
||||
enabled bool // service is disabled by default
|
||||
clock *startup.Clock
|
||||
store *Store
|
||||
ms minimumSlotter
|
||||
@@ -41,6 +41,7 @@ type Service struct {
|
||||
blobStore *filesystem.BlobStorage
|
||||
initSyncWaiter func() error
|
||||
complete chan struct{}
|
||||
toggler *sync.ServiceToggler
|
||||
}
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
@@ -104,6 +105,15 @@ func WithInitSyncWaiter(w func() error) ServiceOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithServiceToggle sets the ServiceToggler, which is used to coordinate
|
||||
// with backfill, so that only one service runs at a time.
|
||||
func WithServiceToggle(toggler *sync.ServiceToggler) ServiceOption {
|
||||
return func(s *Service) error {
|
||||
s.toggler = toggler
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// InitializerWaiter is an interface that is satisfied by verification.InitializerWaiter.
|
||||
// Using this interface enables node init to satisfy this requirement for the backfill service
|
||||
// while also allowing backfill to mock it in tests.
|
||||
@@ -157,7 +167,7 @@ func NewService(ctx context.Context, su *Store, bStore *filesystem.BlobStorage,
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
s.pool = newP2PBatchWorkerPool(p, s.nWorkers)
|
||||
s.pool = newP2PBatchWorkerPool(p, s.nWorkers, s.toggler)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,9 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
@@ -18,6 +21,11 @@ type signatureVerifier struct {
|
||||
resChan chan error
|
||||
}
|
||||
|
||||
type kzgVerifier struct {
|
||||
dataColumns []blocks.RODataColumn
|
||||
resChan chan error
|
||||
}
|
||||
|
||||
// A routine that runs in the background to perform batch
|
||||
// verifications of incoming messages from gossip.
|
||||
func (s *Service) verifierRoutine() {
|
||||
@@ -47,6 +55,32 @@ func (s *Service) verifierRoutine() {
|
||||
}
|
||||
}
|
||||
|
||||
// A routine that runs in the background to perform batch
|
||||
// KZG verifications by draining the channel and processing all pending requests.
|
||||
func (s *Service) kzgVerifierRoutine() {
|
||||
for {
|
||||
kzgBatch := make([]*kzgVerifier, 0, 1)
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
continue
|
||||
default:
|
||||
verifyKzgBatch(kzgBatch)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
|
||||
defer span.End()
|
||||
@@ -56,7 +90,6 @@ func (s *Service) validateWithBatchVerifier(ctx context.Context, message string,
|
||||
s.signatureChan <- verificationSet
|
||||
|
||||
resErr := <-resChan
|
||||
close(resChan)
|
||||
// If verification fails we fallback to individual verification
|
||||
// of each signature set.
|
||||
if resErr != nil {
|
||||
@@ -120,3 +153,62 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
|
||||
}
|
||||
return aggSet, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
|
||||
defer span.End()
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
s.kzgChan <- verificationSet
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
case err := <-resChan:
|
||||
if err != nil {
|
||||
log.WithError(err).Trace("Could not perform batch verification")
|
||||
tracing.AnnotateError(span, err)
|
||||
return s.validateUnbatchedColumnsKzg(ctx, dataColumns)
|
||||
}
|
||||
}
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateUnbatchedColumnsKzg(ctx context.Context, columns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
|
||||
defer span.End()
|
||||
if err := peerdas.VerifyDataColumnsSidecarKZGProofs(columns); err != nil {
|
||||
err = errors.Wrap(err, "could not verify")
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
|
||||
if len(kzgBatch) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
allDataColumns := make([]blocks.RODataColumn, 0, len(kzgBatch))
|
||||
for _, kzgVerifier := range kzgBatch {
|
||||
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
|
||||
}
|
||||
|
||||
var verificationErr error
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
|
||||
if err != nil {
|
||||
verificationErr = errors.Wrap(err, "batch KZG verification failed")
|
||||
}
|
||||
|
||||
// Send the same result to all verifiers in the batch
|
||||
for _, verifier := range kzgBatch {
|
||||
verifier.resChan <- verificationErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,232 +39,328 @@ type DataColumnSidecarsParams struct {
|
||||
DownscorePeerOnRPCFault bool // Downscore a peer if it commits an RPC fault. Not responding sidecars at all is considered as a fault.
|
||||
}
|
||||
|
||||
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given
|
||||
// blocks and requested data column indices. It employs a multi-step strategy:
|
||||
// FetchDataColumnSidecars retrieves data column sidecars for the given blocks and indices
|
||||
// using a series of fallback strategies.
|
||||
//
|
||||
// 1. Direct retrieval: If all requested columns are available in storage, they are
|
||||
// retrieved directly without reconstruction.
|
||||
// 2. Reconstruction-based retrieval: If some requested columns are missing but sufficient
|
||||
// stored columns exist (at least the minimum required for reconstruction), the function
|
||||
// reconstructs all columns and extracts the requested indices.
|
||||
// 3. Peer retrieval: If storage and reconstruction fail, missing columns are requested
|
||||
// from connected peers that are expected to custody the required data.
|
||||
// For each block in `roBlocks` that has commitments, the function attempts to obtain
|
||||
// all sidecars corresponding to the indices listed in `requestedIndices`.
|
||||
//
|
||||
// The function returns a map of block roots to their corresponding verified read-only data
|
||||
// columns. It returns an error if data column storage is unavailable, if storage/reconstruction
|
||||
// operations fail unexpectedly, or if not all requested columns could be retrieved from peers.
|
||||
// The function returns:
|
||||
// - A map from block root to the sidecars successfully retrieved.
|
||||
// - A set of block roots for which not all requested sidecars could be retrieved.
|
||||
//
|
||||
// Retrieval strategy (proceeds to the next step only if not all requested sidecars
|
||||
// were successfully obtained at the current step):
|
||||
// 1. Attempt to load the requested sidecars from storage, reconstructing them from
|
||||
// other available sidecars in storage if necessary.
|
||||
// 2. Request any missing sidecars from peers. If some are still missing, attempt to
|
||||
// reconstruct them using both stored sidecars and those retrieved from peers.
|
||||
// 3. Request all remaining possible sidecars from peers that are not already in storage
|
||||
// or retrieved in step 2. Stop once either all requested sidecars are retrieved,
|
||||
// or enough sidecars are available (from storage, step 2, and step 3) to reconstruct
|
||||
// the requested ones.
|
||||
func FetchDataColumnSidecars(
|
||||
params DataColumnSidecarsParams,
|
||||
roBlocks []blocks.ROBlock,
|
||||
indicesMap map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
if len(roBlocks) == 0 || len(indicesMap) == 0 {
|
||||
return nil, nil
|
||||
requestedIndices map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
if len(roBlocks) == 0 || len(requestedIndices) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
indices := sortedSliceFromMap(indicesMap)
|
||||
slotsWithCommitments := make(map[primitives.Slot]bool)
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
indicesByRootStored := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
blockCount := len(roBlocks)
|
||||
|
||||
// We first consider all requested roots as incomplete, and remove roots from this
|
||||
// set as we retrieve them.
|
||||
incompleteRoots := make(map[[fieldparams.RootLength]byte]bool, blockCount)
|
||||
slotsWithCommitments := make(map[primitives.Slot]bool, blockCount)
|
||||
slotByRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, blockCount)
|
||||
storedIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, blockCount)
|
||||
|
||||
for _, roBlock := range roBlocks {
|
||||
// Filter out blocks without commitments.
|
||||
block := roBlock.Block()
|
||||
|
||||
commitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get blob kzg commitments for block root %#x", roBlock.Root())
|
||||
return nil, nil, errors.Wrapf(err, "get blob kzg commitments for block root %#x", roBlock.Root())
|
||||
}
|
||||
|
||||
if len(commitments) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
slotsWithCommitments[block.Slot()] = true
|
||||
root := roBlock.Root()
|
||||
slot := block.Slot()
|
||||
|
||||
// Step 1: Get the requested sidecars for this root if available in storage
|
||||
requestedColumns, err := tryGetStoredColumns(params.Storage, root, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
}
|
||||
if requestedColumns != nil {
|
||||
result[root] = requestedColumns
|
||||
continue
|
||||
}
|
||||
incompleteRoots[root] = true
|
||||
slotByRoot[root] = slot
|
||||
slotsWithCommitments[slot] = true
|
||||
|
||||
// Step 2: If step 1 failed, reconstruct the requested sidecars from what is available in storage
|
||||
requestedColumns, err = tryGetReconstructedColumns(params.Storage, root, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get reconstructed columns for root %#x", root)
|
||||
}
|
||||
if requestedColumns != nil {
|
||||
result[root] = requestedColumns
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 3a: If steps 1 and 2 failed, keep track of the sidecars that need to be queried from peers
|
||||
// and those that are already stored.
|
||||
indicesToQueryMap, indicesStoredMap := categorizeIndices(params.Storage, root, indices)
|
||||
|
||||
if len(indicesToQueryMap) > 0 {
|
||||
missingIndicesByRoot[root] = indicesToQueryMap
|
||||
}
|
||||
if len(indicesStoredMap) > 0 {
|
||||
indicesByRootStored[root] = indicesStoredMap
|
||||
storedIndices := params.Storage.Summary(root).Stored()
|
||||
if len(storedIndices) > 0 {
|
||||
storedIndicesByRoot[root] = storedIndices
|
||||
}
|
||||
}
|
||||
|
||||
// Early return if no sidecars need to be queried from peers.
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
initialMissingRootCount := len(incompleteRoots)
|
||||
|
||||
// Step 3b: Request missing sidecars from peers.
|
||||
start, count := time.Now(), computeTotalCount(missingIndicesByRoot)
|
||||
fromPeersResult, err := tryRequestingColumnsFromPeers(params, roBlocks, slotsWithCommitments, missingIndicesByRoot)
|
||||
// Request sidecars from storage (by reconstructing them from other available sidecars if needed).
|
||||
result, err := requestSidecarsFromStorage(params.Storage, storedIndicesByRoot, requestedIndices, incompleteRoots)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "request from peers")
|
||||
return nil, nil, errors.Wrap(err, "request sidecars from storage")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{"duration": time.Since(start), "count": count}).Debug("Requested data column sidecars from peers")
|
||||
log := log.WithField("initialMissingRootCount", initialMissingRootCount)
|
||||
|
||||
// Step 3c: If needed, try to reconstruct missing sidecars from storage and fetched data.
|
||||
fromReconstructionResult, err := tryReconstructFromStorageAndPeers(params.Storage, fromPeersResult, indicesMap, missingIndicesByRoot)
|
||||
if len(incompleteRoots) == 0 {
|
||||
log.WithField("finalMissingRootCount", 0).Debug("Fetched data column sidecars from storage")
|
||||
return result, nil, nil
|
||||
}
|
||||
|
||||
// Request direct sidecars from peers.
|
||||
directSidecarsByRoot, err := requestDirectSidecarsFromPeers(params, slotByRoot, requestedIndices, slotsWithCommitments, storedIndicesByRoot, incompleteRoots)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reconstruct from storage and peers")
|
||||
return nil, nil, errors.Wrap(err, "request direct sidecars from peers")
|
||||
}
|
||||
|
||||
for root, verifiedSidecars := range fromReconstructionResult {
|
||||
result[root] = verifiedSidecars
|
||||
// Merge sidecars in storage and those received from peers. Reconstruct if needed.
|
||||
mergedSidecarsByRoot, err := mergeAvailableSidecars(params.Storage, requestedIndices, storedIndicesByRoot, incompleteRoots, directSidecarsByRoot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "try merge storage and mandatory inputs")
|
||||
}
|
||||
|
||||
for root := range fromPeersResult {
|
||||
if _, ok := fromReconstructionResult[root]; ok {
|
||||
// We already have what we need from peers + reconstruction
|
||||
for root, sidecars := range mergedSidecarsByRoot {
|
||||
result[root] = sidecars
|
||||
}
|
||||
|
||||
if len(incompleteRoots) == 0 {
|
||||
log.WithField("finalMissingRootCount", 0).Debug("Fetched data column sidecars from storage and peers")
|
||||
return result, nil, nil
|
||||
}
|
||||
|
||||
// Request all possible indirect sidecars from peers which are neither stored nor in `directSidecarsByRoot`
|
||||
indirectSidecarsByRoot, err := requestIndirectSidecarsFromPeers(params, slotByRoot, slotsWithCommitments, storedIndicesByRoot, directSidecarsByRoot, requestedIndices, incompleteRoots)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "request all sidecars from peers")
|
||||
}
|
||||
|
||||
// Merge sidecars in storage and those received from peers. Reconstruct if needed.
|
||||
mergedSidecarsByRoot, err = mergeAvailableSidecars(params.Storage, requestedIndices, storedIndicesByRoot, incompleteRoots, indirectSidecarsByRoot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "try merge storage and all inputs")
|
||||
}
|
||||
|
||||
for root, sidecars := range mergedSidecarsByRoot {
|
||||
result[root] = sidecars
|
||||
}
|
||||
|
||||
if len(incompleteRoots) == 0 {
|
||||
log.WithField("finalMissingRootCount", 0).Debug("Fetched data column sidecars from storage and peers using rescue mode")
|
||||
return result, nil, nil
|
||||
}
|
||||
|
||||
// For remaining incomplete roots, assemble what is available.
|
||||
incompleteSidecarsByRoot, missingByRoot, err := assembleAvailableSidecars(params.Storage, requestedIndices, incompleteRoots, directSidecarsByRoot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "assemble available sidecars for incomplete roots")
|
||||
}
|
||||
|
||||
for root, sidecars := range incompleteSidecarsByRoot {
|
||||
result[root] = sidecars
|
||||
}
|
||||
|
||||
log.WithField("finalMissingRootCount", len(incompleteRoots)).Debug("Failed to fetch data column sidecars from storage and peers using rescue mode")
|
||||
return result, missingByRoot, nil
|
||||
}
|
||||
|
||||
// requestSidecarsFromStorage attempts to retrieve data column sidecars for each block root in `roots`
|
||||
// and for all indices specified in `requestedIndices`.
|
||||
//
|
||||
// If not all requested sidecars can be obtained for a given root, that root is excluded from the result.
|
||||
// It returns a map from each root to its successfully retrieved sidecars.
|
||||
//
|
||||
// WARNING: This function mutates `roots` by removing entries for which all requested sidecars
|
||||
// were successfully retrieved.
|
||||
func requestSidecarsFromStorage(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
storedIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
requestedIndicesMap map[uint64]bool,
|
||||
roots map[[fieldparams.RootLength]byte]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
requestedIndices := sortedSliceFromMap(requestedIndicesMap)
|
||||
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(roots))
|
||||
|
||||
for root := range roots {
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
// Check if all requested indices are stored.
|
||||
allAvailable := true
|
||||
for index := range requestedIndicesMap {
|
||||
if !storedIndices[index] {
|
||||
allAvailable = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Skip if not all requested indices are stored.
|
||||
if !allAvailable {
|
||||
continue
|
||||
}
|
||||
|
||||
result[root] = append(result[root], fromPeersResult[root]...)
|
||||
|
||||
storedIndices := indicesByRootStored[root]
|
||||
if len(storedIndices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
storedColumns, err := tryGetStoredColumns(params.Storage, root, sortedSliceFromMap(storedIndices))
|
||||
// All requested indices are stored, retrieve them.
|
||||
verifiedRoSidecars, err := storage.Get(root, requestedIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
return nil, errors.Wrapf(err, "storage get for block root %#x", root)
|
||||
}
|
||||
|
||||
result[root] = append(result[root], storedColumns...)
|
||||
result[root] = verifiedRoSidecars
|
||||
delete(roots, root)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// tryGetStoredColumns attempts to retrieve all requested data column sidecars directly from storage
|
||||
// if they are all available. Returns the sidecars if successful, and nil if at least one
|
||||
// requested sidecar is not available in the storage.
|
||||
func tryGetStoredColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if all requested indices are present in cache
|
||||
storedIndices := storage.Summary(blockRoot).Stored()
|
||||
allRequestedPresent := true
|
||||
for _, requestedIndex := range indices {
|
||||
if !storedIndices[requestedIndex] {
|
||||
allRequestedPresent = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allRequestedPresent {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// All requested data is present, retrieve directly from DB
|
||||
requestedColumns, err := storage.Get(blockRoot, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get data columns for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
return requestedColumns, nil
|
||||
}
|
||||
|
||||
// tryGetReconstructedColumns attempts to retrieve columns using reconstruction
|
||||
// if sufficient columns are available. Returns the columns if successful, nil and nil if insufficient columns,
|
||||
// or nil and error if an error occurs.
|
||||
func tryGetReconstructedColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if we have enough columns for reconstruction
|
||||
summary := storage.Summary(blockRoot)
|
||||
if summary.Count() < peerdas.MinimumColumnCountToReconstruct() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Retrieve all stored columns for reconstruction
|
||||
allStoredColumns, err := storage.Get(blockRoot, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get all stored columns for reconstruction for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Attempt reconstruction
|
||||
reconstructedColumns, err := peerdas.ReconstructDataColumnSidecars(allStoredColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct data columns for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Health check: ensure we have the expected number of columns
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if uint64(len(reconstructedColumns)) != numberOfColumns {
|
||||
return nil, errors.Errorf("reconstructed %d columns but expected %d for block root %#x", len(reconstructedColumns), numberOfColumns, blockRoot)
|
||||
}
|
||||
|
||||
// Extract only the requested indices from reconstructed data using direct indexing
|
||||
requestedColumns := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||
for _, requestedIndex := range indices {
|
||||
if requestedIndex >= numberOfColumns {
|
||||
return nil, errors.Errorf("requested column index %d exceeds maximum %d for block root %#x", requestedIndex, numberOfColumns-1, blockRoot)
|
||||
}
|
||||
requestedColumns = append(requestedColumns, reconstructedColumns[requestedIndex])
|
||||
}
|
||||
|
||||
return requestedColumns, nil
|
||||
}
|
||||
|
||||
// categorizeIndices separates indices into those that need to be queried from peers
|
||||
// and those that are already stored.
|
||||
func categorizeIndices(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) (map[uint64]bool, map[uint64]bool) {
|
||||
indicesToQuery := make(map[uint64]bool, len(indices))
|
||||
indicesStored := make(map[uint64]bool, len(indices))
|
||||
|
||||
allStoredIndices := storage.Summary(blockRoot).Stored()
|
||||
for _, index := range indices {
|
||||
if allStoredIndices[index] {
|
||||
indicesStored[index] = true
|
||||
continue
|
||||
}
|
||||
indicesToQuery[index] = true
|
||||
}
|
||||
|
||||
return indicesToQuery, indicesStored
|
||||
}
|
||||
|
||||
// tryRequestingColumnsFromPeers attempts to request missing data column sidecars from connected peers.
|
||||
// It explores the connected peers to find those that are expected to custody the requested columns
|
||||
// and returns only when all requested columns are either retrieved or have been tried to be retrieved
|
||||
// by all possible peers.
|
||||
// WARNING: This function alters `missingIndicesByRoot` by removing successfully retrieved columns.
|
||||
// After running this function, the user can check the content of the (modified) `missingIndicesByRoot` map
|
||||
// to check if some sidecars are still missing.
|
||||
func tryRequestingColumnsFromPeers(
|
||||
p DataColumnSidecarsParams,
|
||||
roBlocks []blocks.ROBlock,
|
||||
// requestDirectSidecarsFromPeers tries to fetch missing data column sidecars from connected peers.
|
||||
// It searches through the available peers to identify those responsible for the requested columns,
|
||||
// and returns only after all columns have either been successfully retrieved or all candidate peers
|
||||
// have been exhausted.
|
||||
//
|
||||
// It returns a map from each root to its successfully retrieved sidecars.
|
||||
func requestDirectSidecarsFromPeers(
|
||||
params DataColumnSidecarsParams,
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
requestedIndices map[uint64]bool,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
storedIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
incompleteRoots map[[fieldparams.RootLength]byte]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
start := time.Now()
|
||||
|
||||
// Create a new random source for peer selection.
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
// Compute slots by block root.
|
||||
slotByRoot := computeSlotByBlockRoot(roBlocks)
|
||||
// Determine all sidecars each peers are expected to custody.
|
||||
connectedPeersSlice := params.P2P.Peers().Connected()
|
||||
connectedPeers := make(map[goPeer.ID]bool, len(connectedPeersSlice))
|
||||
for _, peer := range connectedPeersSlice {
|
||||
connectedPeers[peer] = true
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
for index := range requestedIndices {
|
||||
if !storedIndices[index] {
|
||||
missingIndices[index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingIndices) > 0 {
|
||||
missingIndicesByRoot[root] = missingIndices
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
|
||||
verifiedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for len(missingIndicesByRoot) > 0 && len(indicesByRootByPeer) > 0 {
|
||||
// Select peers to query the missing sidecars from.
|
||||
indicesByRootByPeerToQuery, err := selectPeers(params, randomSource, len(missingIndicesByRoot), indicesByRootByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers")
|
||||
}
|
||||
|
||||
// Remove selected peers from the maps.
|
||||
for peer := range indicesByRootByPeerToQuery {
|
||||
delete(connectedPeers, peer)
|
||||
}
|
||||
|
||||
// Fetch the sidecars from the chosen peers.
|
||||
roDataColumnsByPeer := fetchDataColumnSidecarsFromPeers(params, slotByRoot, slotsWithCommitments, indicesByRootByPeerToQuery)
|
||||
|
||||
// Verify the received data column sidecars.
|
||||
verifiedRoDataColumnSidecars, err := verifyDataColumnSidecarsByPeer(params.P2P, params.NewVerifier, roDataColumnsByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "verify data columns sidecars by peer")
|
||||
}
|
||||
|
||||
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
|
||||
localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
|
||||
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
|
||||
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
|
||||
}
|
||||
|
||||
// Compute indices by root by peers with the updated missing indices and connected peers.
|
||||
indicesByRootByPeer, err = computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// requestIndirectSidecarsFromPeers requests, for all roots in `missingIndicesbyRootOrig`,
|
||||
// for all possible peers, taking into account sidecars available in `inputs` and in the storage,
|
||||
// all possible sidecars until either, for each root:
|
||||
// - all indices in `indices` are available, or
|
||||
// - enough sidecars are available to trigger a reconstruction, or
|
||||
// - all peers are exhausted.
|
||||
func requestIndirectSidecarsFromPeers(
|
||||
p DataColumnSidecarsParams,
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
storedIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
alreadyAvailableByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
requestedIndices map[uint64]bool,
|
||||
roots map[[fieldparams.RootLength]byte]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
start := time.Now()
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
// Create a new random source for peer selection.
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
// For each root compute all possible data column sidecar indices excluding
|
||||
// those already stored or already available.
|
||||
indicesToRetrieveByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
for root := range roots {
|
||||
alreadyAvailableIndices := make(map[uint64]bool, len(alreadyAvailableByRoot[root]))
|
||||
for _, sidecar := range alreadyAvailableByRoot[root] {
|
||||
alreadyAvailableIndices[sidecar.Index] = true
|
||||
}
|
||||
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
indicesToRetrieve := make(map[uint64]bool, numberOfColumns)
|
||||
for index := range numberOfColumns {
|
||||
if !(storedIndices[index] || alreadyAvailableIndices[index]) {
|
||||
indicesToRetrieve[index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(indicesToRetrieve) > 0 {
|
||||
indicesToRetrieveByRoot[root] = indicesToRetrieve
|
||||
}
|
||||
}
|
||||
|
||||
initialToRetrieveRootCount := len(indicesToRetrieveByRoot)
|
||||
|
||||
// Determine all sidecars each peers are expected to custody.
|
||||
connectedPeersSlice := p.P2P.Peers().Connected()
|
||||
@@ -273,15 +369,22 @@ func tryRequestingColumnsFromPeers(
|
||||
connectedPeers[peer] = true
|
||||
}
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
// Compute which peers have which of the missing indices.
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(p.P2P, slotByRoot, indicesToRetrieveByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
|
||||
verifiedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for len(missingIndicesByRoot) > 0 && len(indicesByRootByPeer) > 0 {
|
||||
// Already add into results all sidecars present in `alreadyAvailableByRoot`.
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for root := range roots {
|
||||
alreadyAvailable := alreadyAvailableByRoot[root]
|
||||
result[root] = append(result[root], alreadyAvailable...)
|
||||
}
|
||||
|
||||
for len(indicesToRetrieveByRoot) > 0 && len(indicesByRootByPeer) > 0 {
|
||||
// Select peers to query the missing sidecars from.
|
||||
indicesByRootByPeerToQuery, err := selectPeers(p, randomSource, len(missingIndicesByRoot), indicesByRootByPeer)
|
||||
indicesByRootByPeerToQuery, err := selectPeers(p, randomSource, len(indicesToRetrieveByRoot), indicesByRootByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers")
|
||||
}
|
||||
@@ -300,82 +403,216 @@ func tryRequestingColumnsFromPeers(
|
||||
return nil, errors.Wrap(err, "verify data columns sidecars by peer")
|
||||
}
|
||||
|
||||
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
|
||||
localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
|
||||
// Add to results all verified sidecars.
|
||||
localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, indicesToRetrieveByRoot)
|
||||
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
|
||||
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
|
||||
result[root] = append(result[root], verifiedRoDataColumns...)
|
||||
}
|
||||
|
||||
// Unlabel a root as to retrieve if enough sidecars are retrieved to enable a reconstruction,
|
||||
// or if all requested sidecars are now available for this root.
|
||||
for root, indicesToRetrieve := range indicesToRetrieveByRoot {
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
storedCount := uint64(len(storedIndices))
|
||||
resultCount := uint64(len(result[root]))
|
||||
|
||||
if storedCount+resultCount >= minimumColumnCountToReconstruct {
|
||||
delete(indicesToRetrieveByRoot, root)
|
||||
continue
|
||||
}
|
||||
|
||||
allRequestedIndicesAvailable := true
|
||||
for index := range requestedIndices {
|
||||
if indicesToRetrieve[index] {
|
||||
// Still need this index.
|
||||
allRequestedIndicesAvailable = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if allRequestedIndicesAvailable {
|
||||
delete(indicesToRetrieveByRoot, root)
|
||||
}
|
||||
}
|
||||
|
||||
// Compute indices by root by peers with the updated missing indices and connected peers.
|
||||
indicesByRootByPeer, err = computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
indicesByRootByPeer, err = computeIndicesByRootByPeer(p.P2P, slotByRoot, indicesToRetrieveByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// tryReconstructFromStorageAndPeers attempts to reconstruct missing data column sidecars
|
||||
// using the data available in the storage and the data fetched from peers.
|
||||
// If, for at least one root, the reconstruction is not possible, an error is returned.
|
||||
func tryReconstructFromStorageAndPeers(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
fromPeersByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
indices map[uint64]bool,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
// Nothing to do, return early.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
start := time.Now()
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(missingIndicesByRoot))
|
||||
for root := range missingIndicesByRoot {
|
||||
// Check if a reconstruction is possible based on what we have from the store and fetched from peers.
|
||||
summary := storage.Summary(root)
|
||||
storedCount := summary.Count()
|
||||
fetchedCount := uint64(len(fromPeersByRoot[root]))
|
||||
|
||||
if storedCount+fetchedCount < minimumColumnsCountToReconstruct {
|
||||
return nil, errors.Errorf("cannot reconstruct all needed columns for root %#x. stored: %d, fetched: %d, minimum: %d", root, storedCount, fetchedCount, minimumColumnsCountToReconstruct)
|
||||
}
|
||||
|
||||
// Load all we have in the store.
|
||||
storedSidecars, err := storage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get stored sidecars for root %#x", root)
|
||||
}
|
||||
|
||||
sidecars := make([]blocks.VerifiedRODataColumn, 0, storedCount+fetchedCount)
|
||||
sidecars = append(sidecars, storedSidecars...)
|
||||
sidecars = append(sidecars, fromPeersByRoot[root]...)
|
||||
|
||||
// Attempt reconstruction.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(sidecars)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct data columns for root %#x", root)
|
||||
}
|
||||
|
||||
// Select only sidecars we need.
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if indices[sidecar.Index] {
|
||||
result[root] = append(result[root], sidecar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"rootCount": len(missingIndicesByRoot),
|
||||
"elapsed": time.Since(start),
|
||||
}).Debug("Reconstructed from storage and peers")
|
||||
"duration": time.Since(start),
|
||||
"initialToRetrieveRootCount": initialToRetrieveRootCount,
|
||||
"finalToRetrieveRootCount": len(indicesToRetrieveByRoot),
|
||||
}).Debug("Requested all data column sidecars from peers")
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// mergeAvailableSidecars retrieves missing data column sidecars by combining
|
||||
// what is available in storage with the sidecars provided in `alreadyAvailableByRoot`,
|
||||
// reconstructing them when necessary.
|
||||
//
|
||||
// The function works in two modes depending on sidecar availability:
|
||||
// - If all requested sidecars are already available (no reconstruction needed),
|
||||
// it simply returns them directly from storage and inputs.
|
||||
// - If storage + inputs together provide enough sidecars to reconstruct all requested ones,
|
||||
// it reconstructs and returns the requested sidecars.
|
||||
//
|
||||
// If a root cannot yield all requested sidecars, that root is omitted from the result.
|
||||
//
|
||||
// Note: It is assumed that no sidecar in `alreadyAvailableByRoot` is already present in storage.
|
||||
//
|
||||
// WARNING: This function mutates `roots`, removing any block roots
|
||||
// for which all requested sidecars were successfully retrieved.
|
||||
func mergeAvailableSidecars(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
requestedIndices map[uint64]bool,
|
||||
storedIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
roots map[[fieldparams.RootLength]byte]bool,
|
||||
alreadyAvailableByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(roots))
|
||||
for root := range roots {
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
alreadyAvailable := alreadyAvailableByRoot[root]
|
||||
|
||||
// Compute already available indices.
|
||||
alreadyAvailableIndices := make(map[uint64]bool, len(alreadyAvailable))
|
||||
for _, sidecar := range alreadyAvailable {
|
||||
alreadyAvailableIndices[sidecar.Index] = true
|
||||
}
|
||||
|
||||
// Check if reconstruction is needed.
|
||||
isReconstructionNeeded := false
|
||||
for index := range requestedIndices {
|
||||
if !(storedIndices[index] || alreadyAvailableIndices[index]) {
|
||||
isReconstructionNeeded = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Check if reconstruction is possible.
|
||||
storedCount := uint64(len(storedIndices))
|
||||
alreadyAvailableCount := uint64(len(alreadyAvailableIndices))
|
||||
isReconstructionPossible := storedCount+alreadyAvailableCount >= minimumColumnsCountToReconstruct
|
||||
|
||||
// Skip if the reconstruction is needed and not possible.
|
||||
if isReconstructionNeeded && !isReconstructionPossible {
|
||||
continue
|
||||
}
|
||||
|
||||
// Reconstruct if reconstruction is needed and possible.
|
||||
if isReconstructionNeeded && isReconstructionPossible {
|
||||
// Load all we have in the store.
|
||||
stored, err := storage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "storage get for root %#x", root)
|
||||
}
|
||||
|
||||
allAvailable := make([]blocks.VerifiedRODataColumn, 0, storedCount+alreadyAvailableCount)
|
||||
allAvailable = append(allAvailable, stored...)
|
||||
allAvailable = append(allAvailable, alreadyAvailable...)
|
||||
|
||||
// Attempt reconstruction.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(allAvailable)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "reconstruct data column sidecars for root %#x", root)
|
||||
}
|
||||
|
||||
// Select only sidecars we need.
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if requestedIndices[sidecar.Index] {
|
||||
result[root] = append(result[root], sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
delete(roots, root)
|
||||
continue
|
||||
}
|
||||
|
||||
// Reconstruction is not needed, simply assemble what is available in storage and already available.
|
||||
allAvailable, err := assembleAvailableSidecarsForRoot(storage, alreadyAvailableByRoot, root, requestedIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "assemble available sidecars")
|
||||
}
|
||||
|
||||
result[root] = allAvailable
|
||||
delete(roots, root)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// assembleAvailableSidecars assembles all sidecars available in storage
|
||||
// and in `alreadyAvailableByRoot` corresponding to `roots`.
|
||||
// It also returns all missing indices by root.
|
||||
func assembleAvailableSidecars(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
requestedIndices map[uint64]bool,
|
||||
roots map[[fieldparams.RootLength]byte]bool,
|
||||
alreadyAvailableByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
// Assemble results.
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(roots))
|
||||
for root := range roots {
|
||||
allAvailable, err := assembleAvailableSidecarsForRoot(storage, alreadyAvailableByRoot, root, requestedIndices)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "assemble sidecars for root")
|
||||
}
|
||||
|
||||
if len(allAvailable) > 0 {
|
||||
result[root] = allAvailable
|
||||
}
|
||||
}
|
||||
|
||||
// Compute still missing sidecars.
|
||||
missingByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(roots))
|
||||
for root := range roots {
|
||||
missing := make(map[uint64]bool, len(requestedIndices))
|
||||
for index := range requestedIndices {
|
||||
missing[index] = true
|
||||
}
|
||||
|
||||
allAvailable := result[root]
|
||||
for _, sidecar := range allAvailable {
|
||||
delete(missing, sidecar.Index)
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
missingByRoot[root] = missing
|
||||
}
|
||||
}
|
||||
|
||||
return result, missingByRoot, nil
|
||||
}
|
||||
|
||||
// assembleAvailableSidecarsForRoot assembles all sidecars available in storage
|
||||
// and in `alreadyAvailableByRoot` corresponding to `root` and `indices`.
|
||||
func assembleAvailableSidecarsForRoot(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
alreadyAvailableByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
root [fieldparams.RootLength]byte,
|
||||
indices map[uint64]bool,
|
||||
) ([]blocks.VerifiedRODataColumn, error) {
|
||||
stored, err := storage.Get(root, sortedSliceFromMap(indices))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "storage get for root %#x", root)
|
||||
}
|
||||
|
||||
alreadyAvailable := alreadyAvailableByRoot[root]
|
||||
|
||||
allAvailable := make([]blocks.VerifiedRODataColumn, 0, len(stored)+len(alreadyAvailable))
|
||||
allAvailable = append(allAvailable, stored...)
|
||||
allAvailable = append(allAvailable, alreadyAvailable...)
|
||||
|
||||
return allAvailable, nil
|
||||
}
|
||||
|
||||
// selectPeers selects peers to query the sidecars.
|
||||
// It begins by randomly selecting a peer in `origIndicesByRootByPeer` that has enough bandwidth,
|
||||
// and assigns to it all its available sidecars. Then, it randomly select an other peer, until
|
||||
@@ -450,7 +687,6 @@ func updateResults(
|
||||
verifiedSidecars []blocks.VerifiedRODataColumn,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn {
|
||||
// Copy the original map to avoid modifying it directly.
|
||||
verifiedSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for _, verifiedSidecar := range verifiedSidecars {
|
||||
blockRoot := verifiedSidecar.BlockRoot()
|
||||
@@ -566,11 +802,23 @@ func sendDataColumnSidecarsRequest(
|
||||
roDataColumns = append(roDataColumns, localRoDataColumns...)
|
||||
}
|
||||
|
||||
prettyByRangeRequests := make([]map[string]any, 0, len(byRangeRequests))
|
||||
for _, request := range byRangeRequests {
|
||||
prettyRequest := map[string]any{
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
"columns": request.Columns,
|
||||
}
|
||||
|
||||
prettyByRangeRequests = append(prettyByRangeRequests, prettyRequest)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"respondedSidecars": len(roDataColumns),
|
||||
"requests": len(byRangeRequests),
|
||||
"requestCount": len(byRangeRequests),
|
||||
"type": "byRange",
|
||||
"duration": time.Since(start),
|
||||
"requests": prettyByRangeRequests,
|
||||
}).Debug("Received data column sidecars")
|
||||
|
||||
return roDataColumns, nil
|
||||
@@ -766,6 +1014,8 @@ func computeIndicesByRootByPeer(
|
||||
indicesByBlockRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
peers map[goPeer.ID]bool,
|
||||
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// First, compute custody columns for all peers
|
||||
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
|
||||
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
|
||||
@@ -800,7 +1050,10 @@ func computeIndicesByRootByPeer(
|
||||
return nil, errors.Errorf("chain state is nil for peer %s", peer)
|
||||
}
|
||||
|
||||
headSlotByPeer[peer] = peerChainState.HeadSlot
|
||||
// Our view of the head slot of a peer is not updated in real time.
|
||||
// We add an epoch to take into account the fact the real head slot of the peer
|
||||
// is higher than our view of it.
|
||||
headSlotByPeer[peer] = peerChainState.HeadSlot + slotsPerEpoch
|
||||
}
|
||||
|
||||
// For each block root and its indices, find suitable peers
|
||||
@@ -931,15 +1184,6 @@ func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
return result
|
||||
}
|
||||
|
||||
// computeSlotByBlockRoot maps each block root to its corresponding slot.
|
||||
func computeSlotByBlockRoot(roBlocks []blocks.ROBlock) map[[fieldparams.RootLength]byte]primitives.Slot {
|
||||
slotByBlockRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, len(roBlocks))
|
||||
for _, roBlock := range roBlocks {
|
||||
slotByBlockRoot[roBlock.Root()] = roBlock.Block().Slot()
|
||||
}
|
||||
return slotByBlockRoot
|
||||
}
|
||||
|
||||
// computeTotalCount calculates the total count of indices across all roots.
|
||||
func computeTotalCount(input map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||
totalCount := 0
|
||||
|
||||
@@ -20,8 +20,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -33,11 +35,13 @@ import (
|
||||
|
||||
func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
// Slot 1: All needed sidecars are available in storage
|
||||
// Slot 2: No commitment
|
||||
// Slot 3: All sidecars are saved excepted the needed ones
|
||||
// Slot 4: Some sidecars are in the storage, other have to be retrieved from peers.
|
||||
// Slot 5: Some sidecars are in the storage, other have to be retrieved from peers but peers do not deliver all requested sidecars.
|
||||
// Slot 1: All needed sidecars are available in storage ==> Retrieval from storage only.
|
||||
// Slot 2: No commitment ==> Nothing to do.
|
||||
// Slot 3: Some sidecars are in the storage, other have to be retrieved from peers ==> Retrieval from storage and peers.
|
||||
// Slot 4: Some sidecars are in the storage, other have to be retrieved from peers but peers do not deliver all requested sidecars ==> Retrieval from storage and peers then reconstruction.
|
||||
// Slot 5: Some sidecars are in the storage, other have to be retrieved from peers ==> Retrieval from storage and peers but peers do not respond all needed on first attempt and respond needed sidecars on second attempt ==> Retrieval from storage and peers.
|
||||
// Slot 6: Some sidecars are in the storage, other have to be retrieved from peers ==> Retrieval from storage and peers but peers do not respond all needed on first attempt and respond not needed sidecars on second attempt ==> Retrieval from storage and peers then reconstruction.
|
||||
// Slot 7: Some sidecars are in the storage, other have to be retrieved from peers but peers do not send anything ==> Still missing.
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
@@ -75,67 +79,74 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
// Block 3
|
||||
block3, _, verifiedSidecars3 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(3))
|
||||
root3 := block3.Root()
|
||||
|
||||
toStore3 := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns-uint64(len(indices)))
|
||||
for i := range numberOfColumns {
|
||||
if !indices[i] {
|
||||
sidecar := verifiedSidecars3[i]
|
||||
toStore3 = append(toStore3, sidecar)
|
||||
}
|
||||
}
|
||||
toStore3 := []blocks.VerifiedRODataColumn{verifiedSidecars3[106]}
|
||||
|
||||
err = storage.Save(toStore3)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 4
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
block4, _, verifiedSidecars4 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(4))
|
||||
root4 := block4.Root()
|
||||
toStore4 := []blocks.VerifiedRODataColumn{verifiedSidecars4[106]}
|
||||
|
||||
toStoreCount := minimumColumnsCountToReconstruct - 1
|
||||
toStore4 := make([]blocks.VerifiedRODataColumn, 0, toStoreCount)
|
||||
|
||||
for i := uint64(0); uint64(len(toStore4)) < toStoreCount; i++ {
|
||||
sidecar := verifiedSidecars4[minimumColumnsCountToReconstruct+i]
|
||||
if sidecar.Index == 81 {
|
||||
continue
|
||||
}
|
||||
|
||||
toStore4 = append(toStore4, sidecar)
|
||||
}
|
||||
|
||||
err = storage.Save(toStore4)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 5
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
block5, _, verifiedSidecars5 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(5))
|
||||
root5 := block5.Root()
|
||||
|
||||
toStoreCount := minimumColumnsCountToReconstruct - 1
|
||||
toStore5 := make([]blocks.VerifiedRODataColumn, 0, toStoreCount)
|
||||
|
||||
for i := uint64(0); uint64(len(toStore5)) < toStoreCount; i++ {
|
||||
sidecar := verifiedSidecars5[minimumColumnsCountToReconstruct+i]
|
||||
if sidecar.Index == 81 {
|
||||
continue
|
||||
}
|
||||
|
||||
toStore5 = append(toStore5, sidecar)
|
||||
}
|
||||
toStore5 := []blocks.VerifiedRODataColumn{verifiedSidecars5[106]}
|
||||
|
||||
err = storage.Save(toStore5)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Custody columns with this private key and 4-cgc: 31, 81, 97, 105
|
||||
privateKeyBytes := [32]byte{1}
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||
// Block 6
|
||||
block6, _, verifiedSidecars6 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(6))
|
||||
root6 := block6.Root()
|
||||
toStore6 := []blocks.VerifiedRODataColumn{verifiedSidecars6[106]}
|
||||
|
||||
err = storage.Save(toStore6)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 7
|
||||
block7, _, verifiedSidecars7 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(7))
|
||||
root7 := block7.Root()
|
||||
toStore7 := []blocks.VerifiedRODataColumn{verifiedSidecars7[106]}
|
||||
|
||||
err = storage.Save(toStore7)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Peers
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
byRangeProtocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
byRootProtocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
|
||||
privateKeyBytes := [32]byte{1}
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
p2p.Peers().SetConnectionState(other.PeerID(), peers.Connected)
|
||||
p2p.Connect(other)
|
||||
|
||||
p2p.Peers().SetChainState(other.PeerID(), ðpb.StatusV2{
|
||||
HeadSlot: 5,
|
||||
HeadSlot: 8,
|
||||
})
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 4,
|
||||
Count: 2,
|
||||
Columns: []uint64{31, 81},
|
||||
}
|
||||
p2p.Peers().SetMetadata(other.PeerID(), wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodyGroupCount: 128,
|
||||
}))
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
@@ -149,21 +160,82 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
other.SetStreamHandler(byRangeProtocol, func(stream network.Stream) {
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 3,
|
||||
Count: 5,
|
||||
Columns: []uint64{31, 81},
|
||||
}
|
||||
|
||||
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[31].DataColumnSidecar)
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars3[31].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars3[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars5[31].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars6[31].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
other.SetStreamHandler(byRootProtocol, func(stream network.Stream) {
|
||||
allBut31And81And106 := make([]uint64, 0, numberOfColumns-3)
|
||||
allBut31And106 := make([]uint64, 0, numberOfColumns-2)
|
||||
allBut106 := make([]uint64, 0, numberOfColumns-1)
|
||||
for i := range numberOfColumns {
|
||||
if !map[uint64]bool{31: true, 81: true, 106: true}[i] {
|
||||
allBut31And81And106 = append(allBut31And81And106, i)
|
||||
}
|
||||
if !map[uint64]bool{31: true, 106: true}[i] {
|
||||
allBut31And106 = append(allBut31And106, i)
|
||||
}
|
||||
|
||||
if i != 106 {
|
||||
allBut106 = append(allBut106, i)
|
||||
}
|
||||
}
|
||||
|
||||
expectedRequest := &p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{
|
||||
BlockRoot: root7[:],
|
||||
Columns: allBut106,
|
||||
},
|
||||
{
|
||||
BlockRoot: root5[:],
|
||||
Columns: allBut31And106,
|
||||
},
|
||||
{
|
||||
BlockRoot: root6[:],
|
||||
Columns: allBut31And106,
|
||||
},
|
||||
}
|
||||
|
||||
actualRequest := new(p2ptypes.DataColumnsByRootIdentifiers)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars5[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, index := range allBut31And81And106 {
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars6[index].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
@@ -178,51 +250,39 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
NewVerifier: newDataColumnsVerifier,
|
||||
}
|
||||
|
||||
expected := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||
expectedResult := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||
root1: {verifiedSidecars1[31], verifiedSidecars1[81], verifiedSidecars1[106]},
|
||||
// no root2 (no commitments in this block)
|
||||
root3: {verifiedSidecars3[31], verifiedSidecars3[81], verifiedSidecars3[106]},
|
||||
root3: {verifiedSidecars3[106], verifiedSidecars3[31], verifiedSidecars3[81]},
|
||||
root4: {verifiedSidecars4[31], verifiedSidecars4[81], verifiedSidecars4[106]},
|
||||
root5: {verifiedSidecars5[31], verifiedSidecars5[81], verifiedSidecars5[106]},
|
||||
root5: {verifiedSidecars5[106], verifiedSidecars5[31], verifiedSidecars5[81]},
|
||||
root6: {verifiedSidecars6[31], verifiedSidecars6[81], verifiedSidecars6[106]},
|
||||
root7: {verifiedSidecars7[106]},
|
||||
}
|
||||
|
||||
blocks := []blocks.ROBlock{block1, block2, block3, block4, block5}
|
||||
actual, err := FetchDataColumnSidecars(params, blocks, indices)
|
||||
expectedMissingIndicesBYRoots := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
root7: {31: true, 81: true},
|
||||
}
|
||||
|
||||
blocks := []blocks.ROBlock{block1, block2, block3, block4, block5, block6, block7}
|
||||
actualResult, actualMissingRoots, err := FetchDataColumnSidecars(params, blocks, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for root := range expected {
|
||||
require.Equal(t, len(expected[root]), len(actual[root]))
|
||||
for i := range expected[root] {
|
||||
require.DeepSSZEqual(t, expected[root][i], actual[root][i])
|
||||
require.Equal(t, len(expectedResult), len(actualResult))
|
||||
for root := range expectedResult {
|
||||
require.Equal(t, len(expectedResult[root]), len(actualResult[root]))
|
||||
for i := range expectedResult[root] {
|
||||
require.DeepSSZEqual(t, expectedResult[root][i], actualResult[root][i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCategorizeIndices(t *testing.T) {
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
|
||||
})
|
||||
|
||||
err := storage.Save(verifiedRoSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedToQuery := map[uint64]bool{13: true}
|
||||
expectedStored := map[uint64]bool{12: true, 14: true}
|
||||
|
||||
actualToQuery, actualStored := categorizeIndices(storage, verifiedRoSidecars[0].BlockRoot(), []uint64{12, 13, 14})
|
||||
|
||||
require.Equal(t, len(expectedToQuery), len(actualToQuery))
|
||||
require.Equal(t, len(expectedStored), len(actualStored))
|
||||
|
||||
for index := range expectedToQuery {
|
||||
require.Equal(t, true, actualToQuery[index])
|
||||
}
|
||||
for index := range expectedStored {
|
||||
require.Equal(t, true, actualStored[index])
|
||||
require.Equal(t, len(expectedMissingIndicesBYRoots), len(actualMissingRoots))
|
||||
for root, expectedMissingIndices := range expectedMissingIndicesBYRoots {
|
||||
actualMissingIndices := actualMissingRoots[root]
|
||||
require.Equal(t, len(expectedMissingIndices), len(actualMissingIndices))
|
||||
for index := range expectedMissingIndices {
|
||||
require.Equal(t, true, actualMissingIndices[index])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -841,6 +901,7 @@ func TestComputeIndicesByRootByPeer(t *testing.T) {
|
||||
[fieldparams.RootLength]byte{3}: {38: true},
|
||||
},
|
||||
peerIDs[3]: {
|
||||
[fieldparams.RootLength]byte{2}: {10: true},
|
||||
[fieldparams.RootLength]byte{3}: {10: true},
|
||||
},
|
||||
}
|
||||
@@ -953,39 +1014,6 @@ func TestSlortedSliceFromMap(t *testing.T) {
|
||||
require.DeepEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestComputeSlotByBlockRoot(t *testing.T) {
|
||||
const (
|
||||
count = 3
|
||||
multiplier = 10
|
||||
)
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, count)
|
||||
for i := range count {
|
||||
signedBlock := util.NewBeaconBlock()
|
||||
signedBlock.Block.Slot = primitives.Slot(i).Mul(multiplier)
|
||||
roSignedBlock, err := blocks.NewSignedBeaconBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, [fieldparams.RootLength]byte{byte(i)})
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlocks = append(roBlocks, roBlock)
|
||||
}
|
||||
|
||||
expected := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
[fieldparams.RootLength]byte{0}: primitives.Slot(0),
|
||||
[fieldparams.RootLength]byte{1}: primitives.Slot(10),
|
||||
[fieldparams.RootLength]byte{2}: primitives.Slot(20),
|
||||
}
|
||||
|
||||
actual := computeSlotByBlockRoot(roBlocks)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for k, v := range expected {
|
||||
require.Equal(t, v, actual[k])
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeTotalCount(t *testing.T) {
|
||||
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
|
||||
|
||||
@@ -3,220 +3,131 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
broadcastMissingDataColumnsTimeIntoSlotMin = 1 * time.Second
|
||||
broadcastMissingDataColumnsSlack = 2 * time.Second
|
||||
)
|
||||
// processDataColumnSidecarsFromReconstruction, after a random delay, attempts to reconstruct,
|
||||
// broadcast and receive missing data column sidecars for the given block root.
|
||||
// https:github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#reconstruction-and-cross-seeding
|
||||
func (s *Service) processDataColumnSidecarsFromReconstruction(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
||||
key := fmt.Sprintf("%#x", sidecar.BlockRoot())
|
||||
if _, err, _ := s.reconstructionSingleFlight.Do(key, func() (interface{}, error) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// reconstructSaveBroadcastDataColumnSidecars reconstructs, if possible,
|
||||
// all data column sidecars. Then, it saves missing sidecars to the store.
|
||||
// After a delay, it broadcasts in the background not seen via gossip
|
||||
// (but reconstructed) sidecars.
|
||||
func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
root [fieldparams.RootLength]byte,
|
||||
) error {
|
||||
startTime := time.Now()
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
root := sidecar.BlockRoot()
|
||||
slot := sidecar.Slot()
|
||||
proposerIndex := sidecar.ProposerIndex()
|
||||
|
||||
// Lock to prevent concurrent reconstructions.
|
||||
s.reconstructionLock.Lock()
|
||||
defer s.reconstructionLock.Unlock()
|
||||
// Return early if reconstruction is not needed.
|
||||
if !s.shouldReconstruct(root) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the slot start time.
|
||||
slotStartTime, err := slots.StartTime(s.cfg.clock.GenesisTime(), slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to calculate slot start time")
|
||||
}
|
||||
|
||||
// Randomly choose value before starting reconstruction.
|
||||
timeIntoSlot := s.computeRandomDelay(slotStartTime)
|
||||
broadcastTime := slotStartTime.Add(timeIntoSlot)
|
||||
waitingTime := time.Until(broadcastTime)
|
||||
|
||||
wg.Add(1)
|
||||
time.AfterFunc(waitingTime, func() {
|
||||
defer wg.Done()
|
||||
|
||||
// Return early if the context was canceled during the waiting time.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Return early if reconstruction is not needed anymore.
|
||||
if !s.shouldReconstruct(root) {
|
||||
return
|
||||
}
|
||||
|
||||
// Load all the stored data column sidecars for this root.
|
||||
verifiedSidecars, err := s.cfg.dataColumnStorage.Get(root, nil)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get data column sidecars")
|
||||
return
|
||||
}
|
||||
|
||||
// Reconstruct all the data column sidecars.
|
||||
startTime := time.Now()
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(verifiedSidecars)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to reconstruct data column sidecars")
|
||||
return
|
||||
}
|
||||
|
||||
duration := time.Since(startTime)
|
||||
dataColumnReconstructionHistogram.Observe(float64(duration.Milliseconds()))
|
||||
dataColumnReconstructionCounter.Add(float64(len(reconstructedSidecars) - len(verifiedSidecars)))
|
||||
|
||||
// Retrieve indices of data column sidecars to sample.
|
||||
columnIndicesToSample, err := s.columnIndicesToSample()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get column indices to sample")
|
||||
return
|
||||
}
|
||||
|
||||
unseenIndices, err := s.broadcastAndReceiveUnseenDataColumnSidecars(ctx, slot, proposerIndex, columnIndicesToSample, reconstructedSidecars)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast and receive unseen data column sidecars")
|
||||
return
|
||||
}
|
||||
|
||||
if len(unseenIndices) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"proposerIndex": proposerIndex,
|
||||
"count": len(unseenIndices),
|
||||
"indices": sortedSliceFromMap(unseenIndices),
|
||||
"duration": duration,
|
||||
}).Debug("Reconstructed data column sidecars")
|
||||
}
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
return nil, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldReconstruct returns true if we should attempt to reconstruct the data columns for the given block root.
|
||||
func (s *Service) shouldReconstruct(root [fieldparams.RootLength]byte) bool {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Get the columns we store.
|
||||
storedDataColumns := s.cfg.dataColumnStorage.Summary(root)
|
||||
storedColumnsCount := storedDataColumns.Count()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If reconstruction is not possible or if all columns are already stored, exit early.
|
||||
if storedColumnsCount < peerdas.MinimumColumnCountToReconstruct() || storedColumnsCount == numberOfColumns {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve our local node info.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Load all the possible data column sidecars, to minimize reconstruction time.
|
||||
verifiedSidecars, err := s.cfg.dataColumnStorage.Get(root, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get data column sidecars")
|
||||
}
|
||||
|
||||
// Reconstruct all the data column sidecars.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(verifiedSidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reconstruct data column sidecars")
|
||||
}
|
||||
|
||||
// Filter reconstructed sidecars to save.
|
||||
custodyColumns := localNodeInfo.CustodyColumns
|
||||
toSaveSidecars := make([]blocks.VerifiedRODataColumn, 0, len(custodyColumns))
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if custodyColumns[sidecar.Index] {
|
||||
toSaveSidecars = append(toSaveSidecars, sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the data column sidecars to the database.
|
||||
// Note: We do not call `receiveDataColumn`, because it will ignore
|
||||
// incoming data columns via gossip while we did not broadcast (yet) the reconstructed data columns.
|
||||
if err := s.cfg.dataColumnStorage.Save(toSaveSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
slotStartTime, err := slots.StartTime(s.cfg.clock.GenesisTime(), slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to calculate slot start time")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"fromColumnsCount": storedColumnsCount,
|
||||
"sinceSlotStartTime": time.Since(slotStartTime),
|
||||
"reconstructionAndSaveDuration": time.Since(startTime),
|
||||
}).Debug("Data columns reconstructed and saved")
|
||||
|
||||
// Update reconstruction metrics.
|
||||
dataColumnReconstructionHistogram.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
dataColumnReconstructionCounter.Add(float64(len(reconstructedSidecars) - len(verifiedSidecars)))
|
||||
|
||||
// Schedule the broadcast.
|
||||
if err := s.scheduleMissingDataColumnSidecarsBroadcast(ctx, root, proposerIndex, slot); err != nil {
|
||||
return errors.Wrap(err, "schedule reconstructed data columns broadcast")
|
||||
}
|
||||
|
||||
return nil
|
||||
// Reconstruct only if we have at least the minimum number of columns to reconstruct, but not all the columns.
|
||||
// (If we have not enough columns, reconstruction is impossible. If we have all the columns, reconstruction is unnecessary.)
|
||||
return storedColumnsCount >= peerdas.MinimumColumnCountToReconstruct() && storedColumnsCount != numberOfColumns
|
||||
}
|
||||
|
||||
// scheduleMissingDataColumnSidecarsBroadcast schedules the broadcast of missing
|
||||
// (aka. not seen via gossip but reconstructed) sidecars.
|
||||
func (s *Service) scheduleMissingDataColumnSidecarsBroadcast(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%x", root),
|
||||
"slot": slot,
|
||||
})
|
||||
// computeRandomDelay computes a random delay duration to wait before reconstructing data column sidecars.
|
||||
func (s *Service) computeRandomDelay(slotStartTime time.Time) time.Duration {
|
||||
const maxReconstructionDelaySec = 2.
|
||||
|
||||
// Get the time corresponding to the start of the slot.
|
||||
genesisTime := s.cfg.clock.GenesisTime()
|
||||
slotStartTime, err := slots.StartTime(genesisTime, slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to calculate slot start time")
|
||||
}
|
||||
|
||||
// Compute the waiting time. This could be negative. In such a case, broadcast immediately.
|
||||
randFloat := s.reconstructionRandGen.Float64()
|
||||
timeIntoSlot := broadcastMissingDataColumnsTimeIntoSlotMin + time.Duration(float64(broadcastMissingDataColumnsSlack)*randFloat)
|
||||
broadcastTime := slotStartTime.Add(timeIntoSlot)
|
||||
waitingTime := time.Until(broadcastTime)
|
||||
time.AfterFunc(waitingTime, func() {
|
||||
// Return early if the context was canceled during the waiting time.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastMissingDataColumnSidecars(slot, proposerIndex, root, timeIntoSlot); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast missing data column sidecars")
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastMissingDataColumnSidecars(
|
||||
slot primitives.Slot,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
root [fieldparams.RootLength]byte,
|
||||
timeIntoSlot time.Duration,
|
||||
) error {
|
||||
// Get the node ID.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
// Retrieve the local node info.
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peerdas info")
|
||||
}
|
||||
|
||||
// Compute the missing data columns (data columns we should custody but we did not received via gossip.)
|
||||
missingColumns := make([]uint64, 0, len(localNodeInfo.CustodyColumns))
|
||||
for column := range localNodeInfo.CustodyColumns {
|
||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, column) {
|
||||
missingColumns = append(missingColumns, column)
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if there are no missing data columns.
|
||||
if len(missingColumns) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load from the store the non received but reconstructed data column.
|
||||
verifiedRODataColumnSidecars, err := s.cfg.dataColumnStorage.Get(root, missingColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column storage get")
|
||||
}
|
||||
|
||||
broadcastedColumns := make([]uint64, 0, len(verifiedRODataColumnSidecars))
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumnSidecars {
|
||||
broadcastedColumns = append(broadcastedColumns, verifiedRODataColumn.Index)
|
||||
// Compute the subnet for this column.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(verifiedRODataColumn.Index)
|
||||
|
||||
// Broadcast the missing data column.
|
||||
if err := s.cfg.p2p.BroadcastDataColumnSidecar(root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
|
||||
log.WithError(err).Error("Broadcast data column")
|
||||
}
|
||||
|
||||
// Now, we can set the data column as seen.
|
||||
s.setSeenDataColumnIndex(slot, proposerIndex, verifiedRODataColumn.Index)
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
// Sort for nice logging.
|
||||
slices.Sort(broadcastedColumns)
|
||||
slices.Sort(missingColumns)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"timeIntoSlot": timeIntoSlot,
|
||||
"missingColumns": missingColumns,
|
||||
"broadcasted": broadcastedColumns,
|
||||
}).Debug("Start broadcasting not seen via gossip but reconstructed data columns")
|
||||
}
|
||||
|
||||
return nil
|
||||
timeIntoSlot := time.Duration(maxReconstructionDelaySec * randFloat * float64(time.Second))
|
||||
return timeIntoSlot
|
||||
}
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"math/rand"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
@@ -9,12 +13,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestReconstructDataColumns(t *testing.T) {
|
||||
func TestProcessDataColumnSidecarsFromReconstruction(t *testing.T) {
|
||||
const blobCount = 4
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
@@ -27,9 +30,6 @@ func TestReconstructDataColumns(t *testing.T) {
|
||||
roBlock, _, verifiedRoDataColumns := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
require.Equal(t, numberOfColumns, uint64(len(verifiedRoDataColumns)))
|
||||
|
||||
root, block := roBlock.Root(), roBlock.Block()
|
||||
slot, proposerIndex := block.Slot(), block.ProposerIndex()
|
||||
|
||||
minimumCount := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
t.Run("not enough stored sidecars", func(t *testing.T) {
|
||||
@@ -38,7 +38,7 @@ func TestReconstructDataColumns(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
service := NewService(ctx, WithP2P(p2ptest.NewTestP2P(t)), WithDataColumnStorage(storage))
|
||||
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root)
|
||||
err = service.processDataColumnSidecarsFromReconstruction(ctx, verifiedRoDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestReconstructDataColumns(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
service := NewService(ctx, WithP2P(p2ptest.NewTestP2P(t)), WithDataColumnStorage(storage))
|
||||
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root)
|
||||
err = service.processDataColumnSidecarsFromReconstruction(ctx, verifiedRoDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -60,19 +60,39 @@ func TestReconstructDataColumns(t *testing.T) {
|
||||
// However, for the needs of this test, this is perfectly fine.
|
||||
const cgc = 8
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
minimumCount := peerdas.MinimumColumnCountToReconstruct()
|
||||
err := storage.Save(verifiedRoDataColumns[:minimumCount])
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &mockChain.ChainService{}
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
service := NewService(
|
||||
ctx,
|
||||
WithP2P(p2ptest.NewTestP2P(t)),
|
||||
WithP2P(p2p),
|
||||
WithDataColumnStorage(storage),
|
||||
WithChainService(&mockChain.ChainService{}),
|
||||
WithChainService(chainService),
|
||||
WithOperationNotifier(chainService.OperationNotifier()),
|
||||
)
|
||||
|
||||
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root)
|
||||
minimumCount := peerdas.MinimumColumnCountToReconstruct()
|
||||
receivedBeforeReconstruction := verifiedRoDataColumns[:minimumCount]
|
||||
|
||||
err = service.receiveDataColumnSidecars(ctx, receivedBeforeReconstruction)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = storage.Save(receivedBeforeReconstruction)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load())
|
||||
|
||||
// Check received indices before reconstruction.
|
||||
require.Equal(t, minimumCount, uint64(len(chainService.DataColumns)))
|
||||
for i, actual := range chainService.DataColumns {
|
||||
require.Equal(t, uint64(i), actual.Index)
|
||||
}
|
||||
|
||||
// Run the reconstruction.
|
||||
err = service.processDataColumnSidecarsFromReconstruction(ctx, verifiedRoDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := make(map[uint64]bool, minimumCount+cgc)
|
||||
@@ -81,97 +101,38 @@ func TestReconstructDataColumns(t *testing.T) {
|
||||
}
|
||||
|
||||
// The node should custody these indices.
|
||||
for _, i := range [...]uint64{1, 17, 19, 42, 75, 87, 102, 117} {
|
||||
for _, i := range [...]uint64{75, 87, 102, 117} {
|
||||
expected[i] = true
|
||||
}
|
||||
|
||||
summary := storage.Summary(root)
|
||||
actual := summary.Stored()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for index := range expected {
|
||||
require.Equal(t, true, actual[index])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBroadcastMissingDataColumnSidecars(t *testing.T) {
|
||||
const (
|
||||
cgc = 8
|
||||
blobCount = 4
|
||||
timeIntoSlot = 0
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
ctx := t.Context()
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, _, verifiedRoDataColumns := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
require.Equal(t, numberOfColumns, uint64(len(verifiedRoDataColumns)))
|
||||
|
||||
root, block := roBlock.Root(), roBlock.Block()
|
||||
slot, proposerIndex := block.Slot(), block.ProposerIndex()
|
||||
|
||||
t.Run("no missing sidecars", func(t *testing.T) {
|
||||
service := NewService(
|
||||
ctx,
|
||||
WithP2P(p2ptest.NewTestP2P(t)),
|
||||
)
|
||||
|
||||
for _, index := range [...]uint64{1, 17, 19, 42, 75, 87, 102, 117} {
|
||||
key := computeCacheKey(slot, proposerIndex, index)
|
||||
service.seenDataColumnCache.Add(slot, key, true)
|
||||
}
|
||||
|
||||
err := service.broadcastMissingDataColumnSidecars(slot, proposerIndex, root, timeIntoSlot)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("some missing sidecars", func(t *testing.T) {
|
||||
toSave := make([]blocks.VerifiedRODataColumn, 0, 2)
|
||||
for _, index := range [...]uint64{42, 87} {
|
||||
toSave = append(toSave, verifiedRoDataColumns[index])
|
||||
}
|
||||
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
err := storage.Save(toSave)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := NewService(
|
||||
ctx,
|
||||
WithP2P(p2p),
|
||||
WithDataColumnStorage(storage),
|
||||
)
|
||||
_, _, err = service.cfg.p2p.UpdateCustodyInfo(0, cgc)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, index := range [...]uint64{1, 17, 19, 102, 117} { // 42, 75 and 87 are missing
|
||||
key := computeCacheKey(slot, proposerIndex, index)
|
||||
service.seenDataColumnCache.Add(slot, key, true)
|
||||
}
|
||||
|
||||
for _, index := range [...]uint64{42, 75, 87} {
|
||||
seen := service.hasSeenDataColumnIndex(slot, proposerIndex, index)
|
||||
require.Equal(t, false, seen)
|
||||
}
|
||||
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load())
|
||||
|
||||
err = service.broadcastMissingDataColumnSidecars(slot, proposerIndex, root, timeIntoSlot)
|
||||
require.NoError(t, err)
|
||||
|
||||
seen := service.hasSeenDataColumnIndex(slot, proposerIndex, 75)
|
||||
require.Equal(t, false, seen)
|
||||
|
||||
for _, index := range [...]uint64{42, 87} {
|
||||
seen := service.hasSeenDataColumnIndex(slot, proposerIndex, index)
|
||||
require.Equal(t, true, seen)
|
||||
require.Equal(t, len(expected), len(chainService.DataColumns))
|
||||
for _, actual := range chainService.DataColumns {
|
||||
require.Equal(t, true, expected[actual.Index])
|
||||
require.Equal(t, true, service.hasSeenDataColumnIndex(slot, proposerIndex, actual.Index))
|
||||
}
|
||||
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load())
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeRandomDelay(t *testing.T) {
|
||||
const (
|
||||
seed = 42
|
||||
expected = 746056722 * time.Nanosecond // = 0.746056722 seconds
|
||||
)
|
||||
slotStartTime := time.Date(2020, 12, 30, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
service := NewService(
|
||||
t.Context(),
|
||||
WithP2P(p2ptest.NewTestP2P(t)),
|
||||
WithReconstructionRandGen(rand.New(rand.NewSource(seed))),
|
||||
)
|
||||
|
||||
waitingTime := service.computeRandomDelay(slotStartTime)
|
||||
fmt.Print(waitingTime)
|
||||
require.Equal(t, expected, waitingTime)
|
||||
}
|
||||
|
||||
@@ -34,7 +34,6 @@ go_library(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -395,6 +395,25 @@ func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []
|
||||
return blobsPid, errors.Wrap(err, "custody info")
|
||||
}
|
||||
|
||||
currentSlot := f.clock.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, len(postFulu))
|
||||
for _, blockWithSidecars := range postFulu {
|
||||
blockSlot := blockWithSidecars.Block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
if params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
roBlocks = append(roBlocks, blockWithSidecars.Block)
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if there are no blocks that need data column sidecars.
|
||||
if len(roBlocks) == 0 {
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// Some blocks neesd data column sidecars, fetch them.
|
||||
params := prysmsync.DataColumnSidecarsParams{
|
||||
Ctx: ctx,
|
||||
Tor: f.clock,
|
||||
@@ -405,16 +424,19 @@ func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []
|
||||
NewVerifier: f.cv,
|
||||
}
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, len(postFulu))
|
||||
for _, block := range postFulu {
|
||||
roBlocks = append(roBlocks, block.Block)
|
||||
}
|
||||
|
||||
verifiedRoDataColumnsByRoot, err := prysmsync.FetchDataColumnSidecars(params, roBlocks, info.CustodyColumns)
|
||||
verifiedRoDataColumnsByRoot, missingIndicesByRoot, err := prysmsync.FetchDataColumnSidecars(params, roBlocks, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
if len(missingIndicesByRoot) > 0 {
|
||||
prettyMissingIndicesByRoot := make(map[string][]uint64, len(missingIndicesByRoot))
|
||||
for root, indices := range missingIndicesByRoot {
|
||||
prettyMissingIndicesByRoot[fmt.Sprintf("%#x", root)] = sortedSliceFromMap(indices)
|
||||
}
|
||||
return "", errors.Errorf("some sidecars are still missing after fetch: %v", prettyMissingIndicesByRoot)
|
||||
}
|
||||
|
||||
// Populate the response.
|
||||
for i := range bwScs {
|
||||
bwSc := &bwScs[i]
|
||||
@@ -456,6 +478,9 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
"count": req.Count,
|
||||
"step": req.Step,
|
||||
}).WithError(err).Debug("Could not request blocks by range from peer")
|
||||
if errors.Is(err, prysmsync.ErrInvalidFetchedData) {
|
||||
f.downscorePeer(p, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
@@ -876,6 +901,12 @@ func dedupPeers(peers []peer.ID) []peer.ID {
|
||||
return newPeerList
|
||||
}
|
||||
|
||||
// downscorePeer increments the bad responses score for the peer and logs the event.
|
||||
func (f *blocksFetcher) downscorePeer(peerID peer.ID, reason error) {
|
||||
newScore := f.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
// findFirstFuluIndex returns the index of the first block with a version >= Fulu.
|
||||
// It returns an error if blocks are not correctly sorted by version regarding Fulu.
|
||||
func findFirstFuluIndex(bwScs []blocks.BlockWithROSidecars) (int, error) {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
beaconsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
@@ -910,6 +911,55 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_requestBlocksDownscoreOnInvalidData(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p1.Encoding().ProtocolSuffix())
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}},
|
||||
})
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(0.000001, 640, 1*time.Second, false)
|
||||
|
||||
// Set up handler that returns blocks in wrong order (will trigger ErrInvalidFetchedData)
|
||||
p2.BHost.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 163
|
||||
tor := startup.NewClock(time.Now(), [32]byte{})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 162
|
||||
wsb, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, tor, p1.Encoding(), wsb))
|
||||
assert.NoError(t, stream.Close())
|
||||
})
|
||||
|
||||
// Verify the peer has no bad responses before the request
|
||||
scoreBeforeRequest, err := p1.Peers().Scorers().BadResponsesScorer().Count(p2.PeerID())
|
||||
require.ErrorIs(t, err, peerdata.ErrPeerUnknown)
|
||||
assert.Equal(t, -1, scoreBeforeRequest)
|
||||
|
||||
// Use fetchBlocksFromPeer which includes the downscoring logic
|
||||
_, _, err = fetcher.fetchBlocksFromPeer(ctx, 100, 64, []peer.ID{p2.PeerID()})
|
||||
assert.ErrorContains(t, errNoPeersAvailable.Error(), err)
|
||||
|
||||
// Verify the peer was downscored
|
||||
scoreAfterRequest, err := p1.Peers().Scorers().BadResponsesScorer().Count(p2.PeerID())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, scoreAfterRequest)
|
||||
}
|
||||
|
||||
func TestTimeToWait(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1335,6 +1385,11 @@ func TestFetchSidecars(t *testing.T) {
|
||||
nower := func() time.Time { return now }
|
||||
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define a Deneb block with blobs out of retention period.
|
||||
denebBlock := util.NewBeaconBlockDeneb()
|
||||
denebBlock.Block.Slot = 0 // Genesis slot, out of retention period.
|
||||
@@ -1343,33 +1398,52 @@ func TestFetchSidecars(t *testing.T) {
|
||||
roDebebBlock, err := blocks.NewROBlock(signedDenebBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define a Fulu block with blobs in the retention period.
|
||||
fuluBlock := util.NewBeaconBlockFulu()
|
||||
fuluBlock.Block.Slot = slotsPerEpoch // Within retention period.
|
||||
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||
// Define a Fulu block with blobs before the retention period.
|
||||
fuluBlock1 := util.NewBeaconBlockFulu()
|
||||
fuluBlock1.Block.Slot = slotsPerEpoch.Sub(1) // Before the retention period.
|
||||
fuluBlock1.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock1, err := blocks.NewSignedBeaconBlock(fuluBlock1)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||
roFuluBlock1, err := blocks.NewROBlock(signedFuluBlock1)
|
||||
require.NoError(t, err)
|
||||
|
||||
bodyRoot, err := fuluBlock.Block.Body.HashTreeRoot()
|
||||
bodyRootFulu1, err := fuluBlock1.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save data column sidecars for this fulu block in the database.
|
||||
params := make([]util.DataColumnParam, 0, numberOfColumns)
|
||||
// Create and save data column sidecars for fulu block 2 in the database.
|
||||
paramsFulu1 := make([]util.DataColumnParam, 0, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRoot[:]}
|
||||
params = append(params, param)
|
||||
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRootFulu1[:]}
|
||||
paramsFulu1 = append(paramsFulu1, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
_, verifiedRoDataColumnSidecarsFulu1 := util.CreateTestVerifiedRoDataColumnSidecars(t, paramsFulu1)
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
// Save the data column sidecars for block fulu 1 to the storage.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecarsFulu1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save the data column sidecars to the storage.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
// Define a Fulu block with blobs in the retention period.
|
||||
fuluBlock2 := util.NewBeaconBlockFulu()
|
||||
fuluBlock2.Block.Slot = slotsPerEpoch // Within retention period.
|
||||
fuluBlock2.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock2, err := blocks.NewSignedBeaconBlock(fuluBlock2)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock2, err := blocks.NewROBlock(signedFuluBlock2)
|
||||
require.NoError(t, err)
|
||||
|
||||
bodyRootFulu2, err := fuluBlock2.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save data column sidecars for fulu block 2 in the database.
|
||||
paramsFulu2 := make([]util.DataColumnParam, 0, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRootFulu2[:]}
|
||||
paramsFulu2 = append(paramsFulu2, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecarsFulu2 := util.CreateTestVerifiedRoDataColumnSidecars(t, paramsFulu2)
|
||||
|
||||
// Save the data column sidecars for block fulu 2 to the storage.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecarsFulu2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a blocks fetcher.
|
||||
@@ -1382,7 +1456,8 @@ func TestFetchSidecars(t *testing.T) {
|
||||
// Fetch sidecars.
|
||||
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||
{Block: roDebebBlock},
|
||||
{Block: roFuluBlock},
|
||||
{Block: roFuluBlock1},
|
||||
{Block: roFuluBlock2},
|
||||
}
|
||||
pid, err := fetcher.fetchSidecars(ctx, "", nil, blocksWithSidecars)
|
||||
require.NoError(t, err)
|
||||
@@ -1392,12 +1467,15 @@ func TestFetchSidecars(t *testing.T) {
|
||||
require.Equal(t, 0, len(blocksWithSidecars[0].Blobs))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[0].Columns))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[1].Blobs))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[1].Columns))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[2].Blobs))
|
||||
|
||||
// We don't check the content of the columns here. The extensive test is done
|
||||
// in TestFetchDataColumnsSidecars.
|
||||
require.Equal(t, samplesPerSlot, uint64(len(blocksWithSidecars[1].Columns)))
|
||||
require.Equal(t, samplesPerSlot, uint64(len(blocksWithSidecars[2].Columns)))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFirstFuluIndex(t *testing.T) {
|
||||
bellatrix := util.NewBeaconBlockBellatrix()
|
||||
signedBellatrix, err := blocks.NewSignedBeaconBlock(bellatrix)
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
@@ -73,6 +72,7 @@ type Service struct {
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
genesisTime time.Time
|
||||
toggler *sync.ServiceToggler
|
||||
}
|
||||
|
||||
// Option is a functional option for the initial-sync Service.
|
||||
@@ -94,6 +94,14 @@ func WithSyncChecker(checker *SyncChecker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithServiceToggle sets the ServiceToggler, which is used to coordinate
|
||||
// with backfill, so that only one service runs at a time.
|
||||
func WithServiceToggle(toggler *sync.ServiceToggler) Option {
|
||||
return func(s *Service) {
|
||||
s.toggler = toggler
|
||||
}
|
||||
}
|
||||
|
||||
// SyncChecker allows other services to check the current status of
|
||||
// initial-sync and use that internally in their service.
|
||||
type SyncChecker struct {
|
||||
@@ -160,22 +168,25 @@ func (s *Service) Start() {
|
||||
log.Debug("Exiting Initial Sync Service")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.beginSync(); err != nil {
|
||||
log.WithError(err).Error("Failed to get acquire sync toggle lock")
|
||||
return
|
||||
}
|
||||
defer s.completeSync()
|
||||
s.genesisTime = gt
|
||||
// Exit entering round-robin sync if we require 0 peers to sync.
|
||||
if flags.Get().MinimumSyncPeers == 0 {
|
||||
s.markSynced()
|
||||
log.WithField("genesisTime", s.genesisTime).Info("Due to number of peers required for sync being set at 0, entering regular sync immediately.")
|
||||
return
|
||||
}
|
||||
if s.genesisTime.After(prysmTime.Now()) {
|
||||
s.markSynced()
|
||||
log.WithField("genesisTime", s.genesisTime).Info("Genesis time has not arrived - not syncing")
|
||||
return
|
||||
}
|
||||
currentSlot := clock.CurrentSlot()
|
||||
if slots.ToEpoch(currentSlot) == 0 {
|
||||
log.WithField("genesisTime", s.genesisTime).Info("Chain started within the last epoch - not syncing")
|
||||
s.markSynced()
|
||||
return
|
||||
}
|
||||
s.chainStarted.Set()
|
||||
@@ -184,7 +195,6 @@ func (s *Service) Start() {
|
||||
// Are we already in sync, or close to it?
|
||||
if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) {
|
||||
log.Info("Already synced to the current chain head")
|
||||
s.markSynced()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -205,7 +215,18 @@ func (s *Service) Start() {
|
||||
panic(err) // lint:nopanic -- Unexpected error. This should probably be surfaced with a returned error.
|
||||
}
|
||||
log.WithField("slot", s.cfg.Chain.HeadSlot()).Info("Synced up to")
|
||||
s.markSynced()
|
||||
}
|
||||
|
||||
func (s *Service) beginSync() error {
|
||||
s.synced.UnSet()
|
||||
return s.toggler.Acquire(s.ctx, sync.ToggleGroupRangeSync)
|
||||
}
|
||||
|
||||
// completeSync marks node as synced and notifies feed listeners.
|
||||
func (s *Service) completeSync() {
|
||||
s.toggler.Release(sync.ToggleGroupRangeSync)
|
||||
s.synced.Set()
|
||||
close(s.cfg.InitialSyncComplete)
|
||||
}
|
||||
|
||||
// fetchOriginSidecars fetches origin sidecars
|
||||
@@ -237,14 +258,14 @@ func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
blockVersion := roBlock.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.fetchOriginColumns(roBlock, delay); err != nil {
|
||||
if err := s.fetchOriginDataColumnSidecars(roBlock, delay); err != nil {
|
||||
return errors.Wrap(err, "fetch origin columns")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := s.fetchOriginBlobs(peers, roBlock); err != nil {
|
||||
if err := s.fetchOriginBlobSidecars(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin blobs")
|
||||
}
|
||||
}
|
||||
@@ -290,8 +311,10 @@ func (s *Service) Resync() error {
|
||||
}
|
||||
|
||||
// Set it to false since we are syncing again.
|
||||
s.synced.UnSet()
|
||||
defer func() { s.synced.Set() }() // Reset it at the end of the method.
|
||||
if err := s.beginSync(); err != nil {
|
||||
return errors.Wrap(err, "begin sync")
|
||||
}
|
||||
defer func() { s.completeSync() }() // Reset it at the end of the method.
|
||||
|
||||
_, err = s.waitForMinimumPeers()
|
||||
if err != nil {
|
||||
@@ -326,12 +349,6 @@ func (s *Service) waitForMinimumPeers() ([]peer.ID, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// markSynced marks node as synced and notifies feed listeners.
|
||||
func (s *Service) markSynced() {
|
||||
s.synced.Set()
|
||||
close(s.cfg.InitialSyncComplete)
|
||||
}
|
||||
|
||||
func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2ptypes.BlobSidecarsByRootReq, error) {
|
||||
r := blk.Root()
|
||||
if blk.Version() < version.Deneb {
|
||||
@@ -356,7 +373,7 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
func (s *Service) fetchOriginBlobSidecars(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
r := rob.Root()
|
||||
|
||||
req, err := missingBlobRequest(rob, s.cfg.BlobStorage)
|
||||
@@ -394,11 +411,12 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration) error {
|
||||
func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock, delay time.Duration) error {
|
||||
const (
|
||||
errorMessage = "Failed to fetch origin data column sidecars"
|
||||
warningIteration = 10
|
||||
)
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early if the origin block has no blob commitments.
|
||||
@@ -411,7 +429,7 @@ func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the columns to request.
|
||||
// Compute the indices we need to custody.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
@@ -423,9 +441,30 @@ func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration
|
||||
return errors.Wrap(err, "fetch peer info")
|
||||
}
|
||||
|
||||
// Fetch origin data column sidecars.
|
||||
root := roBlock.Root()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"blobCount": len(commitments),
|
||||
"dataColumnCount": len(info.CustodyColumns),
|
||||
})
|
||||
|
||||
// Check if some needed data column sidecars are missing.
|
||||
stored := s.cfg.DataColumnStorage.Summary(root).Stored()
|
||||
missing := make(map[uint64]bool, len(info.CustodyColumns))
|
||||
for column := range info.CustodyColumns {
|
||||
if !stored[column] {
|
||||
missing[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) == 0 {
|
||||
// All needed data column sidecars are present, exit early.
|
||||
log.Info("All needed origin data column sidecars are already present")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
params := sync.DataColumnSidecarsParams{
|
||||
Ctx: s.ctx,
|
||||
Tor: s.clock,
|
||||
@@ -436,46 +475,39 @@ func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration
|
||||
DownscorePeerOnRPCFault: true,
|
||||
}
|
||||
|
||||
var verifiedRoDataColumnsByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn
|
||||
for attempt := uint64(0); ; attempt++ {
|
||||
verifiedRoDataColumnsByRoot, err = sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||
if err == nil {
|
||||
break
|
||||
// Retrieve missing data column sidecars.
|
||||
verifiedRoSidecarsByRoot, missingIndicesByRoot, err := sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, missing)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
log := log.WithError(err).WithFields(logrus.Fields{
|
||||
"attempt": attempt,
|
||||
"delay": delay,
|
||||
// Save retrieved data column sidecars.
|
||||
if err := s.cfg.DataColumnStorage.Save(verifiedRoSidecarsByRoot[root]); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
// Check if some needed data column sidecars are missing.
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
log.Info("Retrieved all needed origin data column sidecars")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some sidecars are still missing.
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"attempt": attempt,
|
||||
"missingIndices": sortedSliceFromMap(missingIndicesByRoot[root]),
|
||||
"delay": delay,
|
||||
})
|
||||
|
||||
if attempt%warningIteration == 0 && attempt > 0 {
|
||||
log.Warning(errorMessage)
|
||||
time.Sleep(delay)
|
||||
|
||||
continue
|
||||
logFunc := log.Debug
|
||||
if attempt > 0 && attempt%warningIteration == 0 {
|
||||
logFunc = log.Warning
|
||||
}
|
||||
|
||||
log.Debug(errorMessage)
|
||||
time.Sleep(delay)
|
||||
logFunc("Failed to fetch some origin data column sidecars, retrying later")
|
||||
}
|
||||
|
||||
// Save origin data columns to disk.
|
||||
verifiedRoDataColumnsSidecars, ok := verifiedRoDataColumnsByRoot[root]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
|
||||
}
|
||||
|
||||
if err := s.cfg.DataColumnStorage.Save(verifiedRoDataColumnsSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"blobCount": len(commitments),
|
||||
"columnCount": len(verifiedRoDataColumnsSidecars),
|
||||
}).Info("Successfully downloaded data column sidecars for checkpoint sync block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func shufflePeers(pids []peer.ID) {
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
prysmSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
bcsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -174,6 +174,7 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
InitialSyncComplete: make(chan struct{}),
|
||||
})
|
||||
s.toggler = bcsync.NewServiceToggler()
|
||||
s.verifierWaiter = verification.NewInitializerWaiter(gs, nil, nil)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
assert.NotNil(t, s)
|
||||
@@ -218,6 +219,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
genesisChan: make(chan time.Time),
|
||||
}
|
||||
s.verifierWaiter = verification.NewInitializerWaiter(cs, nil, nil)
|
||||
s.toggler = bcsync.NewServiceToggler()
|
||||
return s, cs
|
||||
}
|
||||
|
||||
@@ -318,16 +320,18 @@ func TestService_markSynced(t *testing.T) {
|
||||
StateNotifier: mc.StateNotifier(),
|
||||
InitialSyncComplete: make(chan struct{}),
|
||||
})
|
||||
s.toggler = bcsync.NewServiceToggler()
|
||||
require.NotNil(t, s)
|
||||
assert.Equal(t, false, s.chainStarted.IsSet())
|
||||
assert.Equal(t, false, s.synced.IsSet())
|
||||
assert.Equal(t, true, s.Syncing())
|
||||
assert.NoError(t, s.Status())
|
||||
require.NoError(t, s.beginSync())
|
||||
s.chainStarted.Set()
|
||||
assert.ErrorContains(t, "syncing", s.Status())
|
||||
|
||||
go func() {
|
||||
s.markSynced()
|
||||
s.completeSync()
|
||||
}()
|
||||
|
||||
select {
|
||||
@@ -398,12 +402,14 @@ func TestService_Resync(t *testing.T) {
|
||||
mc = tt.chainService()
|
||||
}
|
||||
s := NewService(ctx, &Config{
|
||||
DB: beaconDB,
|
||||
P2P: p,
|
||||
Chain: mc,
|
||||
StateNotifier: mc.StateNotifier(),
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
DB: beaconDB,
|
||||
P2P: p,
|
||||
Chain: mc,
|
||||
StateNotifier: mc.StateNotifier(),
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
InitialSyncComplete: make(chan struct{}),
|
||||
})
|
||||
s.toggler = bcsync.NewServiceToggler()
|
||||
assert.NotNil(t, s)
|
||||
s.genesisTime = mc.Genesis
|
||||
assert.Equal(t, primitives.Slot(0), s.cfg.Chain.HeadSlot())
|
||||
@@ -699,7 +705,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -721,7 +727,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
err := storage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -747,10 +753,35 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
other.ENR().Set(peerdas.Cgc(numberOfCustodyGroups))
|
||||
p2p.Peers().UpdateENR(other.ENR(), other.PeerID())
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 17, 19, 42, 75, 87, 102, 117},
|
||||
allBut42 := make([]uint64, 0, numberOfCustodyGroups-1)
|
||||
for i := range numberOfCustodyGroups {
|
||||
if i != 42 {
|
||||
allBut42 = append(allBut42, i)
|
||||
}
|
||||
}
|
||||
|
||||
expectedRequests := []*ethpb.DataColumnSidecarsByRangeRequest{
|
||||
{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 17, 19, 42, 75, 87, 102, 117},
|
||||
},
|
||||
{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: allBut42,
|
||||
},
|
||||
{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 17, 19, 75, 87, 102, 117},
|
||||
},
|
||||
}
|
||||
|
||||
toRespondByAttempt := [][]uint64{
|
||||
{42},
|
||||
{},
|
||||
{1, 17, 19, 75, 87, 102, 117},
|
||||
}
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
@@ -768,7 +799,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
// Create a block with blob commitments and sidecars
|
||||
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
ctxMap, err := prysmSync.ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
ctxMap, err := bcsync.ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{
|
||||
@@ -783,36 +814,33 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
}
|
||||
|
||||
// Do not respond any sidecar on the first attempt, and respond everything requested on the second one.
|
||||
firstAttempt := true
|
||||
attempt := 0
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
assert.DeepEqual(t, expectedRequests[attempt], actualRequest)
|
||||
|
||||
if firstAttempt {
|
||||
firstAttempt = false
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, column := range actualRequest.Columns {
|
||||
err = prysmSync.WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedRoSidecars[column].DataColumnSidecar)
|
||||
for _, column := range toRespondByAttempt[attempt] {
|
||||
err = bcsync.WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedRoSidecars[column].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
|
||||
attempt++
|
||||
})
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check all corresponding sidecars are saved in the store.
|
||||
summary := storage.Summary(roBlock.Root())
|
||||
for _, index := range expectedRequest.Columns {
|
||||
require.Equal(t, true, summary.HasIndex(index))
|
||||
for _, indices := range toRespondByAttempt {
|
||||
for _, index := range indices {
|
||||
require.Equal(t, true, summary.HasIndex(index))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
327
beacon-chain/sync/kzg_batch_verifier_test.go
Normal file
327
beacon-chain/sync/kzg_batch_verifier_test.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
)
|
||||
|
||||
func TestValidateWithKzgBatchVerifier(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dataColumns []blocks.RODataColumn
|
||||
expectedResult pubsub.ValidationResult
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "single valid data column",
|
||||
dataColumns: createValidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "multiple valid data columns",
|
||||
dataColumns: createValidTestDataColumns(t, 3),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "single invalid data column",
|
||||
dataColumns: createInvalidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationReject,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty data column slice",
|
||||
dataColumns: []blocks.RODataColumn{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, tt.dataColumns)
|
||||
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
if tt.expectError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifierRoutine(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("processes single request", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for verification result")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("batches multiple requests", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numRequests = 5
|
||||
resChans := make([]chan error, numRequests)
|
||||
|
||||
for i := range numRequests {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
resChans[i] = resChan
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
}
|
||||
|
||||
for i := range numRequests {
|
||||
select {
|
||||
case err := <-resChans[i]:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("timeout waiting for verification result %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("context cancellation stops routine", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
|
||||
routineDone := make(chan struct{})
|
||||
go func() {
|
||||
service.kzgVerifierRoutine()
|
||||
close(routineDone)
|
||||
}()
|
||||
|
||||
cancel()
|
||||
|
||||
select {
|
||||
case <-routineDone:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for routine to exit")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyKzgBatch(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("all valid data columns succeed", func(t *testing.T) {
|
||||
dataColumns := createValidTestDataColumns(t, 3)
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: dataColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid proofs fail entire batch", func(t *testing.T) {
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
allColumns := append(validColumns, invalidColumns...)
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: allColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
assert.NotNil(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty batch handling", func(t *testing.T) {
|
||||
verifyKzgBatch([]*kzgVerifier{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierConcurrency(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numGoroutines = 10
|
||||
const numRequestsPerGoroutine = 5
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
// Multiple goroutines sending verification requests simultaneously
|
||||
for i := range numGoroutines {
|
||||
go func(goroutineID int) {
|
||||
defer wg.Done()
|
||||
|
||||
for range numRequestsPerGoroutine {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, dataColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("fallback handles mixed valid/invalid batch correctly", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, validColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err = service.validateWithKzgBatchVerifier(ctx, invalidColumns)
|
||||
require.Equal(t, pubsub.ValidationReject, result)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty data columns fallback", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, []blocks.RODataColumn{})
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
return roSidecars[:count]
|
||||
}
|
||||
return roSidecars
|
||||
}
|
||||
|
||||
func createInvalidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
dataColumns := createValidTestDataColumns(t, count)
|
||||
|
||||
if len(dataColumns) > 0 {
|
||||
sidecar := dataColumns[0].DataColumnSidecar
|
||||
if len(sidecar.Column) > 0 && len(sidecar.Column[0]) > 0 {
|
||||
corruptedSidecar := ðpb.DataColumnSidecar{
|
||||
Index: sidecar.Index,
|
||||
KzgCommitments: make([][]byte, len(sidecar.KzgCommitments)),
|
||||
KzgProofs: make([][]byte, len(sidecar.KzgProofs)),
|
||||
KzgCommitmentsInclusionProof: make([][]byte, len(sidecar.KzgCommitmentsInclusionProof)),
|
||||
SignedBlockHeader: sidecar.SignedBlockHeader,
|
||||
Column: make([][]byte, len(sidecar.Column)),
|
||||
}
|
||||
|
||||
for i, commitment := range sidecar.KzgCommitments {
|
||||
corruptedSidecar.KzgCommitments[i] = make([]byte, len(commitment))
|
||||
copy(corruptedSidecar.KzgCommitments[i], commitment)
|
||||
}
|
||||
|
||||
for i, proof := range sidecar.KzgProofs {
|
||||
corruptedSidecar.KzgProofs[i] = make([]byte, len(proof))
|
||||
copy(corruptedSidecar.KzgProofs[i], proof)
|
||||
}
|
||||
|
||||
for i, proof := range sidecar.KzgCommitmentsInclusionProof {
|
||||
corruptedSidecar.KzgCommitmentsInclusionProof[i] = make([]byte, len(proof))
|
||||
copy(corruptedSidecar.KzgCommitmentsInclusionProof[i], proof)
|
||||
}
|
||||
|
||||
for i, col := range sidecar.Column {
|
||||
corruptedSidecar.Column[i] = make([]byte, len(col))
|
||||
copy(corruptedSidecar.Column[i], col)
|
||||
}
|
||||
corruptedSidecar.Column[0][0] ^= 0xFF // Flip bits to corrupt
|
||||
|
||||
corruptedRO, err := blocks.NewRODataColumn(corruptedSidecar)
|
||||
require.NoError(t, err)
|
||||
dataColumns[0] = corruptedRO
|
||||
}
|
||||
}
|
||||
return dataColumns
|
||||
}
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill/coverage"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
)
|
||||
|
||||
type Option func(s *Service) error
|
||||
@@ -229,3 +230,11 @@ func WithBatchVerifierLimit(limit int) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithReconstructionRandGen sets the random generator for reconstruction delays.
|
||||
func WithReconstructionRandGen(rg *rand.Rand) Option {
|
||||
return func(s *Service) error {
|
||||
s.reconstructionRandGen = rg
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,11 +115,19 @@ func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock)
|
||||
NewVerifier: s.newColumnsVerifier,
|
||||
}
|
||||
|
||||
sidecarsByRoot, err := FetchDataColumnSidecars(params, blks, info.CustodyColumns)
|
||||
sidecarsByRoot, missingIndicesByRoot, err := FetchDataColumnSidecars(params, blks, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
if len(missingIndicesByRoot) > 0 {
|
||||
prettyMissingIndicesByRoot := make(map[string][]uint64, len(missingIndicesByRoot))
|
||||
for root, indices := range missingIndicesByRoot {
|
||||
prettyMissingIndicesByRoot[fmt.Sprintf("%#x", root)] = sortedSliceFromMap(indices)
|
||||
}
|
||||
return errors.Errorf("some sidecars are still missing after fetch: %v", prettyMissingIndicesByRoot)
|
||||
}
|
||||
|
||||
// Save the sidecars to the storage.
|
||||
count := 0
|
||||
for _, sidecars := range sidecarsByRoot {
|
||||
|
||||
@@ -112,7 +112,15 @@ func (s *Service) lightClientUpdatesByRangeRPCHandler(ctx context.Context, msg i
|
||||
|
||||
logger.Infof("LC: requesting updates by range (StartPeriod: %d, EndPeriod: %d)", r.StartPeriod, r.StartPeriod+r.Count-1)
|
||||
|
||||
updates, err := s.lcStore.LightClientUpdates(ctx, r.StartPeriod, endPeriod)
|
||||
headBlock, err := s.cfg.chain.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
logger.WithError(err).Error("Cannot retrieve head block")
|
||||
return err
|
||||
}
|
||||
|
||||
updates, err := s.lcStore.LightClientUpdates(ctx, r.StartPeriod, endPeriod, headBlock)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
|
||||
@@ -8,14 +8,15 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
db "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -43,6 +44,8 @@ func TestRPC_LightClientBootstrap(t *testing.T) {
|
||||
Genesis: time.Unix(time.Now().Unix(), 0),
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), d)
|
||||
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
@@ -54,7 +57,7 @@ func TestRPC_LightClientBootstrap(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
lcStore: lcStore,
|
||||
subHandler: newSubTopicHandler(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -159,6 +162,8 @@ func TestRPC_LightClientOptimisticUpdate(t *testing.T) {
|
||||
Genesis: time.Unix(time.Now().Unix(), 0),
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), d)
|
||||
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
@@ -170,7 +175,7 @@ func TestRPC_LightClientOptimisticUpdate(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
lcStore: lcStore,
|
||||
subHandler: newSubTopicHandler(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -274,6 +279,8 @@ func TestRPC_LightClientFinalityUpdate(t *testing.T) {
|
||||
Genesis: time.Unix(time.Now().Unix(), 0),
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), d)
|
||||
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
@@ -285,7 +292,7 @@ func TestRPC_LightClientFinalityUpdate(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
lcStore: lcStore,
|
||||
subHandler: newSubTopicHandler(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -384,11 +391,19 @@ func TestRPC_LightClientUpdatesByRange(t *testing.T) {
|
||||
p1.Connect(p2)
|
||||
require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
signedBlk, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Unix(time.Now().Unix(), 0),
|
||||
Block: signedBlk,
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), d)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
@@ -400,7 +415,7 @@ func TestRPC_LightClientUpdatesByRange(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
lcStore: lcStore,
|
||||
subHandler: newSubTopicHandler(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -472,7 +487,7 @@ func TestRPC_LightClientUpdatesByRange(t *testing.T) {
|
||||
t.Fatalf("unsupported version %d", i)
|
||||
}
|
||||
|
||||
updates, err := r.lcStore.LightClientUpdates(ctx, 0, 4)
|
||||
updates, err := r.lcStore.LightClientUpdates(ctx, 0, 4, signedBlk)
|
||||
require.NoError(t, err)
|
||||
updateSSZ, err := updates[uint64(responseCounter)].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -17,10 +17,10 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/attestations"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
@@ -50,6 +50,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
@@ -164,13 +165,15 @@ type Service struct {
|
||||
syncContributionBitsOverlapLock sync.RWMutex
|
||||
syncContributionBitsOverlapCache *lru.Cache
|
||||
signatureChan chan *signatureVerifier
|
||||
kzgChan chan *kzgVerifier
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newColumnsVerifier verification.NewDataColumnsVerifier
|
||||
columnSidecarsExecSingleFlight singleflight.Group
|
||||
reconstructionSingleFlight singleflight.Group
|
||||
availableBlocker coverage.AvailableBlocker
|
||||
reconstructionLock sync.Mutex
|
||||
reconstructionRandGen *rand.Rand
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
ctxMap ContextByteVersions
|
||||
@@ -203,6 +206,10 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
}
|
||||
// Initialize signature channel with configured limit
|
||||
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
|
||||
// Initialize KZG channel with fixed buffer size of 100.
|
||||
// This buffer size is designed to handle burst traffic of data column gossip messages:
|
||||
// - Data columns arrive less frequently than attestations (default batchVerifierLimit=1000)
|
||||
r.kzgChan = make(chan *kzgVerifier, 100)
|
||||
// Correctly remove it from our seen pending block map.
|
||||
// The eviction method always assumes that the mutex is held.
|
||||
r.slotToPendingBlocks.OnEvicted(func(s string, i interface{}) {
|
||||
@@ -255,6 +262,7 @@ func (s *Service) Start() {
|
||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.kzgVerifierRoutine()
|
||||
go s.startDiscoveryAndSubscriptions()
|
||||
go s.processDataColumnLogs()
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user