mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
44 Commits
fusaka-dev
...
peerdas-ge
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8716d8f77 | ||
|
|
58795d5ce3 | ||
|
|
c558798fe8 | ||
|
|
ba1699fdee | ||
|
|
adf62a6b45 | ||
|
|
9e5b3fb599 | ||
|
|
eaf4b4f9bf | ||
|
|
0b0b7ff0a9 | ||
|
|
f1be39f7f1 | ||
|
|
3815ff4c28 | ||
|
|
76a0759e13 | ||
|
|
5cd2d99606 | ||
|
|
1a2a0688e1 | ||
|
|
6d0524dcf5 | ||
|
|
8ec9da81c0 | ||
|
|
facb70e12c | ||
|
|
3d91b35f4e | ||
|
|
dc70dae9d0 | ||
|
|
9e2c04400c | ||
|
|
60058266e8 | ||
|
|
291c4ac9b5 | ||
|
|
045776ff75 | ||
|
|
0a386cbdfd | ||
|
|
4f02e44446 | ||
|
|
41600b67e3 | ||
|
|
cec236ff7d | ||
|
|
62dac40734 | ||
|
|
d3763d56cf | ||
|
|
461fa50c34 | ||
|
|
149e220b61 | ||
|
|
7b059560f6 | ||
|
|
ae4b982a6c | ||
|
|
111e5c462f | ||
|
|
f330021785 | ||
|
|
43c111bca2 | ||
|
|
41c2f1d802 | ||
|
|
384270f9a7 | ||
|
|
8e9d3f5f4f | ||
|
|
d6d542889c | ||
|
|
f8e6b9d1a8 | ||
|
|
8f25d1e986 | ||
|
|
81e9fda34b | ||
|
|
0ff2d2fa21 | ||
|
|
8477a84454 |
@@ -1727,7 +1727,7 @@ func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
|
||||
t.Run("Interface signature verification", func(t *testing.T) {
|
||||
// This test verifies that the SubmitBlindedBlock method signature
|
||||
// has been updated to return BlobsBundler interface
|
||||
|
||||
|
||||
client := &Client{}
|
||||
|
||||
// Verify the method exists with the correct signature
|
||||
|
||||
@@ -35,7 +35,7 @@ func WithMaxGoroutines(x int) Option {
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB)
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -668,7 +668,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
@@ -691,14 +691,16 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupCount := s.cfg.P2P.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#custody-sampling
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
@@ -899,6 +901,118 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
}
|
||||
}
|
||||
|
||||
// areDataColumnsImmediatelyAvailable checks if all required data columns are currently
|
||||
// available in the database without waiting for missing ones.
|
||||
func (s *Service) areDataColumnsImmediatelyAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has no commitments there is nothing to check.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count error")
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
// Compute the sampling size.
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if storedDataColumnsCount >= minimumColumnCountToReconstruct {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If any data is missing, return error immediately (don't wait)
|
||||
missingIndices := uint64MapToSortedSlice(missingMap)
|
||||
return fmt.Errorf("data columns not immediately available, missing %v", missingIndices)
|
||||
}
|
||||
|
||||
// areBlobsImmediatelyAvailable checks if all required blobs are currently
|
||||
// available in the database without waiting for missing ones.
|
||||
func (s *Service) areBlobsImmediatelyAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// expected is the number of kzg commitments observed in the block.
|
||||
expected := len(kzgCommitments)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingBlobIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If any blobs are missing, return error immediately (don't wait)
|
||||
missingIndices := uint64MapToSortedSlice(missing)
|
||||
return fmt.Errorf("blobs not immediately available, missing %v", missingIndices)
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -198,8 +197,7 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
|
||||
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
|
||||
|
||||
// Check if the finalized checkpoint has changed
|
||||
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
|
||||
if finalizedCheckpoint == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -224,17 +222,7 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug("Saving new light client finality update")
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client finality update")
|
||||
}
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -266,17 +254,7 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug("Saving new light client optimistic update")
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client optimistic update")
|
||||
}
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3172,7 +3172,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = &lightClient.Store{}
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
|
||||
var oldActualUpdate interfaces.LightClientOptimisticUpdate
|
||||
var err error
|
||||
@@ -3248,39 +3248,39 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
expectReplace: true,
|
||||
},
|
||||
{
|
||||
name: "Old update is better - age - no supermajority",
|
||||
name: "Old update is better - finalized slot is higher",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
|
||||
newOptions: []util.LightClientOption{},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "Old update is better - age - both supermajority",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1), util.WithSupermajority()},
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "Old update is better - supermajority",
|
||||
oldOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
name: "Old update is better - attested slot is higher",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
newOptions: []util.LightClientOption{},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "New update is better - age - both supermajority",
|
||||
oldOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1), util.WithSupermajority()},
|
||||
name: "Old update is better - signature slot is higher",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedSignatureSlot(1)},
|
||||
newOptions: []util.LightClientOption{},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "New update is better - finalized slot is higher",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
expectReplace: true,
|
||||
},
|
||||
{
|
||||
name: "New update is better - age - no supermajority",
|
||||
name: "New update is better - attested slot is higher",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
expectReplace: true,
|
||||
},
|
||||
{
|
||||
name: "New update is better - supermajority",
|
||||
name: "New update is better - signature slot is higher",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedSignatureSlot(1)},
|
||||
expectReplace: true,
|
||||
},
|
||||
}
|
||||
@@ -3312,7 +3312,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = &lightClient.Store{}
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
|
||||
var actualOldUpdate, actualNewUpdate interfaces.LightClientFinalityUpdate
|
||||
var err error
|
||||
|
||||
@@ -39,12 +39,22 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DataAvailabilityChecker defines an interface for checking if data is available
|
||||
// for a given block root. This interface is implemented by the blockchain service
|
||||
// which has knowledge of the beacon chain's data availability requirements.
|
||||
// Returns nil if data is available, ErrDataNotAvailable if data is not available,
|
||||
// or another error for other failures.
|
||||
type DataAvailabilityChecker interface {
|
||||
IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error
|
||||
}
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
@@ -108,6 +118,9 @@ type Checker interface {
|
||||
|
||||
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
|
||||
|
||||
// ErrDataNotAvailable is returned when block data is not immediately available for processing.
|
||||
var ErrDataNotAvailable = errors.New("block data is not available")
|
||||
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
@@ -308,6 +321,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
@@ -580,7 +597,7 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
}
|
||||
}
|
||||
|
||||
custodyGroupCount, earliestAvailableSlot, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, custodyGroupCount, slot)
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
@@ -588,6 +605,32 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// IsDataAvailable implements the DataAvailabilityChecker interface for use by the execution service.
|
||||
// It checks if all required blob and data column data is immediately available in the database without waiting.
|
||||
func (s *Service) IsDataAvailable(ctx context.Context, blockRoot [fieldparams.RootLength]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
blockVersion := block.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsImmediatelyAvailable(ctx, blockRoot, block); err != nil {
|
||||
return errors.Wrap(ErrDataNotAvailable, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := s.areBlobsImmediatelyAvailable(ctx, blockRoot, block); err != nil {
|
||||
return errors.Wrap(ErrDataNotAvailable, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
|
||||
@@ -55,7 +55,7 @@ type mockBroadcaster struct {
|
||||
|
||||
type mockAccessor struct {
|
||||
mockBroadcaster
|
||||
mockDataColumnsHandler
|
||||
mockCustodyManager
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
@@ -99,28 +99,28 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
// mockDataColumnsHandler is a mock implementation of p2p.DataColumnsHandler
|
||||
type mockDataColumnsHandler struct {
|
||||
// mockCustodyManager is a mock implementation of p2p.CustodyManager
|
||||
type mockCustodyManager struct {
|
||||
mut sync.RWMutex
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockDataColumnsHandler) EarliestAvailableSlot() primitives.Slot {
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot
|
||||
return dch.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
func (dch *mockDataColumnsHandler) CustodyGroupCount() uint64 {
|
||||
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.custodyGroupCount
|
||||
return dch.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockDataColumnsHandler) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
dch.mut.Lock()
|
||||
defer dch.mut.Unlock()
|
||||
|
||||
@@ -130,11 +130,11 @@ func (dch *mockDataColumnsHandler) UpdateCustodyInfo(earliestAvailableSlot primi
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockDataColumnsHandler) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ p2p.DataColumnsHandler = (*mockDataColumnsHandler)(nil)
|
||||
var _ p2p.CustodyManager = (*mockCustodyManager)(nil)
|
||||
|
||||
type testServiceRequirements struct {
|
||||
ctx context.Context
|
||||
|
||||
@@ -732,6 +732,11 @@ func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]b
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
// IsDataAvailable implements the data availability checker interface for testing
|
||||
func (c *ChainService) IsDataAvailable(_ context.Context, _ [32]byte, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockSyncChecker is a mock implementation of blockchain.Checker.
|
||||
// We can't make an assertion here that this is true because that would create a circular dependency.
|
||||
type MockSyncChecker struct {
|
||||
|
||||
@@ -96,24 +96,6 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -102,13 +102,13 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
return nil, err
|
||||
} else if n == params.BeaconConfig().PendingPartialWithdrawalsLimit && !isFullExitRequest {
|
||||
// if the PendingPartialWithdrawalsLimit is met, the user would have paid for a partial withdrawal that's not included
|
||||
log.Debugln("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
log.Debug("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
continue
|
||||
}
|
||||
|
||||
vIdx, exists := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(wr.ValidatorPubkey))
|
||||
if !exists {
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
log.WithField("validator", hexutil.Encode(wr.ValidatorPubkey)).Debug("Skipping execution layer withdrawal request, validator index not found")
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndexReadOnly(vIdx)
|
||||
@@ -120,23 +120,23 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
log.Debug("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch().AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/fork.md#upgrading-the-state
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/fork.md#upgrading-the-state
|
||||
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
|
||||
@@ -10,8 +10,12 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -39,6 +43,9 @@ go_test(
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -750,7 +750,9 @@ func UpdateHasSupermajority(syncAggregate *pb.SyncAggregate) bool {
|
||||
return numActiveParticipants*3 >= maxActiveParticipants*2
|
||||
}
|
||||
|
||||
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
// IsFinalityUpdateValidForBroadcast checks if a finality update needs to be broadcasted.
|
||||
// It is also used to check if an incoming gossiped finality update is valid for forwarding and saving.
|
||||
func IsFinalityUpdateValidForBroadcast(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
return true
|
||||
}
|
||||
@@ -772,6 +774,35 @@ func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityU
|
||||
return true
|
||||
}
|
||||
|
||||
// IsBetterFinalityUpdate checks if the new finality update is better than the old one for saving.
|
||||
// This does not concern broadcasting, but rather the decision of whether to save the new update.
|
||||
// For broadcasting checks, use IsFinalityUpdateValidForBroadcast.
|
||||
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot)
|
||||
newFinalizedSlot := newUpdate.FinalizedHeader().Beacon().Slot
|
||||
newAttestedSlot := newUpdate.AttestedHeader().Beacon().Slot
|
||||
|
||||
oldFinalizedSlot := oldUpdate.FinalizedHeader().Beacon().Slot
|
||||
oldAttestedSlot := oldUpdate.AttestedHeader().Beacon().Slot
|
||||
|
||||
if newFinalizedSlot < oldFinalizedSlot {
|
||||
return false
|
||||
}
|
||||
if newFinalizedSlot == oldFinalizedSlot {
|
||||
if newAttestedSlot < oldAttestedSlot {
|
||||
return false
|
||||
}
|
||||
if newAttestedSlot == oldAttestedSlot && newUpdate.SignatureSlot() <= oldUpdate.SignatureSlot() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func IsBetterOptimisticUpdate(newUpdate, oldUpdate interfaces.LightClientOptimisticUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
return true
|
||||
|
||||
@@ -4,7 +4,11 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -16,13 +20,17 @@ type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate // tracks the best finality update seen so far
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate // tracks the best optimistic update seen so far
|
||||
p2p p2p.Accessor
|
||||
stateFeed event.SubscriberSender
|
||||
}
|
||||
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase) *Store {
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase, p p2p.Accessor, e event.SubscriberSender) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
stateFeed: e,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,10 +151,23 @@ func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate) {
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
log.Debug("Saved new light client finality update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
@@ -155,10 +176,23 @@ func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate) {
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
log.Debug("Saved new light client optimistic update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
|
||||
|
||||
@@ -3,7 +3,10 @@ package light_client_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -21,7 +24,7 @@ func TestLightClientStore(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Initialize the light client store
|
||||
lcStore := &lightClient.Store{}
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), &p2pTesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
// Create test light client updates for Capella and Deneb
|
||||
lCapella := util.NewTestLightClient(t, version.Capella)
|
||||
@@ -45,24 +48,118 @@ func TestLightClientStore(t *testing.T) {
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get finality with Capella update. Optimistic update should be nil
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella)
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella, false)
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get optimistic with Capella update. Finality update should be Capella
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella, false)
|
||||
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with Deneb update
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb)
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb, false)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb, false)
|
||||
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with nil update
|
||||
lcStore.SetLastFinalityUpdate(nil)
|
||||
lcStore.SetLastOptimisticUpdate(nil)
|
||||
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), p2p, new(event.Feed))
|
||||
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update0, lcStore.LastFinalityUpdate()), "update0 should be better than nil")
|
||||
// update0 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update0, lcStore.LastFinalityUpdate()), "update0 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l1 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update1, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update1, update0), "update1 should be better than update0")
|
||||
// update1 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update1, lcStore.LastFinalityUpdate()), "update1 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 2 with same finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update2, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update2, update1), "update2 should be better than update1")
|
||||
// update2 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update2, lcStore.LastFinalityUpdate()), "update2 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l3 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(3), util.WithSupermajority())
|
||||
update3, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l3.Ctx, l3.State, l3.Block, l3.AttestedState, l3.AttestedBlock, l3.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update3, update2), "update3 should be better than update2")
|
||||
// update3 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update3, lcStore.LastFinalityUpdate()), "update3 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l4 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(1), util.WithSupermajority())
|
||||
update4, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l4.Ctx, l4.State, l4.Block, l4.AttestedState, l4.AttestedBlock, l4.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update4, update3), "update4 should be better than update3")
|
||||
// update4 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update4, lcStore.LastFinalityUpdate()), "update4 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l5 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update5, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l5.Ctx, l5.State, l5.Block, l5.AttestedState, l5.AttestedBlock, l5.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update5, update4), "update5 should be better than update4")
|
||||
// update5 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update5, lcStore.LastFinalityUpdate()), "update5 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l6 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(3))
|
||||
update6, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l6.Ctx, l6.State, l6.Block, l6.AttestedState, l6.AttestedBlock, l6.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update6, update5), "update6 should be better than update5")
|
||||
// update6 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update6, lcStore.LastFinalityUpdate()), "update6 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
@@ -31,15 +31,8 @@ var (
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
type CustodyType int
|
||||
|
||||
const (
|
||||
Target CustodyType = iota
|
||||
Actual
|
||||
)
|
||||
|
||||
// CustodyGroups computes the custody groups the node should participate in for custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#get_custody_groups
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#get_custody_groups
|
||||
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
@@ -102,7 +95,7 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
}
|
||||
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
@@ -127,7 +120,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
@@ -206,7 +199,7 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
// the KZG commitment includion proofs and cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars
|
||||
func dataColumnsSidecars(
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
blobKzgCommitments [][]byte,
|
||||
|
||||
@@ -24,13 +24,13 @@ var (
|
||||
ErrCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#custody-group-count
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#custody-group-count
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCountKey }
|
||||
|
||||
// VerifyDataColumnSidecar verifies if the data column sidecar is valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
@@ -57,7 +57,7 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
||||
// This is done to improve performance since the internal KZG library is way more
|
||||
// efficient when verifying in batch.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
@@ -93,7 +93,7 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
||||
return ErrNilBlockHeader
|
||||
@@ -125,7 +125,7 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
}
|
||||
|
||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
return columnIndex % dataColumnSidecarSubnetCount
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/validator.md#validator-custody
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
totalNodeBalance := uint64(0)
|
||||
for index := range validatorsIndex {
|
||||
|
||||
@@ -53,11 +53,6 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -64,9 +64,6 @@ type ReadOnlyDatabase interface {
|
||||
// Origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
// Custody operations.
|
||||
CustodyInfo(ctx context.Context) (uint64, uint64, error)
|
||||
SubscribedToAllDataSubnets(ctx context.Context) (bool, error)
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -107,10 +104,8 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
// Custody operations.
|
||||
SaveCustodyGroupCount(ctx context.Context, custodyGroupCount uint64) error
|
||||
SaveSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) error
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, custodyGroupCount uint64, earliestAvailableSlot primitives.Slot) (uint64, primitives.Slot, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
}
|
||||
|
||||
// HeadAccessDatabase defines a struct with access to reading chain head data.
|
||||
|
||||
@@ -84,6 +84,7 @@ go_test(
|
||||
"backup_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"custody_test.go",
|
||||
"deposit_contract_test.go",
|
||||
"encoding_test.go",
|
||||
"execution_chain_test.go",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -1327,3 +1328,86 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
want := errors.Wrap(ErrNotFoundFeeRecipient, "validator id 3")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
}
|
||||
|
||||
// Block creates a phase0 beacon block at the specified slot and saves it to the database.
|
||||
func createAndSaveBlock(t *testing.T, ctx context.Context, db *Store, slot primitives.Slot) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = slot
|
||||
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrappedBlock))
|
||||
}
|
||||
|
||||
func TestStore_EarliestSlot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("empty database returns ErrNotFound", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with only genesis block", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with genesis and blocks in genesis epoch", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
// Create and save a block in the genesis epoch
|
||||
createAndSaveBlock(t, ctx, db, primitives.Slot(slotsPerEpoch-1))
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with genesis and blocks beyond genesis epoch", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
// Create and save a block beyond the genesis epoch
|
||||
nextEpochSlot := primitives.Slot(slotsPerEpoch)
|
||||
createAndSaveBlock(t, ctx, db, nextEpochSlot)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nextEpochSlot, slot)
|
||||
})
|
||||
|
||||
t.Run("database starting from checkpoint (non-zero earliest slot)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Simulate starting from a checkpoint by creating blocks starting from a later slot
|
||||
checkpointSlot := primitives.Slot(slotsPerEpoch * 10) // 10 epochs later
|
||||
nextEpochSlot := checkpointSlot + slotsPerEpoch
|
||||
|
||||
// Create and save first block at checkpoint slot
|
||||
createAndSaveBlock(t, ctx, db, checkpointSlot)
|
||||
|
||||
// Create and save another block in the next epoch
|
||||
createAndSaveBlock(t, ctx, db, nextEpochSlot)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nextEpochSlot, slot)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,44 +7,14 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// CustodyInfo returns the custody group count and the earliest available slot in the database.
|
||||
func (s *Store) CustodyInfo(ctx context.Context) (uint64, uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.CustodyInfo")
|
||||
defer span.End()
|
||||
|
||||
groupCount, earliestAvailableSlot := uint64(0), uint64(0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the group count.
|
||||
bytes := bucket.Get(groupCountKey)
|
||||
if len(bytes) != 0 {
|
||||
groupCount = bytesutil.BytesToUint64BigEndian(bytes)
|
||||
}
|
||||
|
||||
// Retrieve the earliest available slot.
|
||||
earliestSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(earliestSlotBytes) != 0 {
|
||||
earliestAvailableSlot = bytesutil.BytesToUint64BigEndian(earliestSlotBytes)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return groupCount, earliestAvailableSlot, err
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo atomically updates the custody group count only it is greater than the stored one.
|
||||
// In this case, it also updates the earliest available slot with the provided value.
|
||||
// It returns the stored custody group count and earliest available slot.
|
||||
func (s *Store) UpdateCustodyInfo(ctx context.Context, custodyGroupCount uint64, earliestAvailableSlot primitives.Slot) (uint64, primitives.Slot, error) {
|
||||
// It returns the (potentially updated) custody group count and earliest available slot.
|
||||
func (s *Store) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateCustodyInfo")
|
||||
defer span.End()
|
||||
|
||||
@@ -92,84 +62,12 @@ func (s *Store) UpdateCustodyInfo(ctx context.Context, custodyGroupCount uint64,
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
return storedGroupCount, storedEarliestAvailableSlot, nil
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"earliestAvailableSlot": storedEarliestAvailableSlot,
|
||||
"groupCount": storedGroupCount,
|
||||
}).Debug("Custody info")
|
||||
|
||||
// SaveCustodyGroupCount saves the custody group count to the database.
|
||||
func (s *Store) SaveCustodyGroupCount(ctx context.Context, custodyGroupCount uint64) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SetCustodyGroupCount")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
// Store the custody group count.
|
||||
custodyGroupCountBytes := bytesutil.Uint64ToBytesBigEndian(custodyGroupCount)
|
||||
if err := bucket.Put(groupCountKey, custodyGroupCountBytes); err != nil {
|
||||
return errors.Wrap(err, "put custody group count")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// SubscribedToAllDataSubnets checks in the database if the node is subscribed to all data subnets.
|
||||
func (s *Store) SubscribedToAllDataSubnets(ctx context.Context) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
|
||||
result := false
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subscribe all data subnets flag.
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// SaveSubscribedToAllDataSubnets saves the subscription status to all data subnets in the database.
|
||||
func (s *Store) SaveSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveSubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
// Store the subscription status.
|
||||
value := byte(0)
|
||||
if subscribed {
|
||||
value = 1
|
||||
}
|
||||
|
||||
if err := bucket.Put(subscribeAllDataSubnetsKey, []byte{value}); err != nil {
|
||||
return errors.Wrap(err, "put subscribe all data subnets")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return storedEarliestAvailableSlot, storedGroupCount, nil
|
||||
}
|
||||
|
||||
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
|
||||
|
||||
176
beacon-chain/db/kv/custody_test.go
Normal file
176
beacon-chain/db/kv/custody_test.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// getCustodyInfoFromDB reads the custody info directly from the database for testing purposes.
|
||||
func getCustodyInfoFromDB(t *testing.T, db *Store) (primitives.Slot, uint64) {
|
||||
t.Helper()
|
||||
var earliestSlot primitives.Slot
|
||||
var groupCount uint64
|
||||
|
||||
err := db.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read group count
|
||||
groupCountBytes := bucket.Get(groupCountKey)
|
||||
if len(groupCountBytes) != 0 {
|
||||
groupCount = bytesutil.BytesToUint64BigEndian(groupCountBytes)
|
||||
}
|
||||
|
||||
// Read earliest available slot
|
||||
earliestSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(earliestSlotBytes) != 0 {
|
||||
earliestSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(earliestSlotBytes))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return earliestSlot, groupCount
|
||||
}
|
||||
|
||||
// getSubscriptionStatusFromDB reads the subscription status directly from the database for testing purposes.
|
||||
func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
t.Helper()
|
||||
var subscribed bool
|
||||
|
||||
err := db.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) != 0 && bytes[0] == 1 {
|
||||
subscribed = true
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return subscribed
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("initial update with empty database", func(t *testing.T) {
|
||||
const (
|
||||
earliestSlot = primitives.Slot(100)
|
||||
groupCount = uint64(5)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestSlot, slot)
|
||||
require.Equal(t, groupCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, earliestSlot, storedSlot)
|
||||
require.Equal(t, groupCount, storedCount)
|
||||
})
|
||||
|
||||
t.Run("update with higher group count", func(t *testing.T) {
|
||||
const (
|
||||
initialSlot = primitives.Slot(100)
|
||||
initialCount = uint64(5)
|
||||
earliestSlot = primitives.Slot(200)
|
||||
groupCount = uint64(10)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
_, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestSlot, slot)
|
||||
require.Equal(t, groupCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, earliestSlot, storedSlot)
|
||||
require.Equal(t, groupCount, storedCount)
|
||||
})
|
||||
|
||||
t.Run("update with lower group count should not update", func(t *testing.T) {
|
||||
const (
|
||||
initialSlot = primitives.Slot(200)
|
||||
initialCount = uint64(10)
|
||||
earliestSlot = primitives.Slot(300)
|
||||
groupCount = uint64(8)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
_, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, initialSlot, slot)
|
||||
require.Equal(t, initialCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, initialSlot, storedSlot)
|
||||
require.Equal(t, initialCount, storedCount)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateSubscribedToAllDataSubnets(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("initial update with empty database - set to false", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, false, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
}
|
||||
@@ -43,6 +43,7 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return errors.Wrap(err, "failed to initialize origin block w/ bytes + config+fork")
|
||||
}
|
||||
blk := wblk.Block()
|
||||
slot := blk.Slot()
|
||||
|
||||
blockRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -51,43 +52,43 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
|
||||
pr := blk.ParentRoot()
|
||||
bf := &dbval.BackfillStatus{
|
||||
LowSlot: uint64(wblk.Block().Slot()),
|
||||
LowSlot: uint64(slot),
|
||||
LowRoot: blockRoot[:],
|
||||
LowParentRoot: pr[:],
|
||||
OriginRoot: blockRoot[:],
|
||||
OriginSlot: uint64(wblk.Block().Slot()),
|
||||
OriginSlot: uint64(slot),
|
||||
}
|
||||
|
||||
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
|
||||
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
|
||||
}
|
||||
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint block to db")
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint data into database")
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
return errors.Wrap(err, "save block")
|
||||
}
|
||||
|
||||
// save state
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Info("Calling SaveState")
|
||||
if err = s.SaveState(ctx, state, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
return errors.Wrap(err, "save state")
|
||||
}
|
||||
|
||||
if err = s.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: state.Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not save state summary")
|
||||
return errors.Wrap(err, "save state summary")
|
||||
}
|
||||
|
||||
// mark block as head of chain, so that processing will pick up from this point
|
||||
if err = s.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
return errors.Wrap(err, "save head block root")
|
||||
}
|
||||
|
||||
// save origin block root in a special key, to be used when the canonical
|
||||
// origin (start of chain, ie alternative to genesis) block or state is needed
|
||||
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save origin block root")
|
||||
return errors.Wrap(err, "save origin checkpoint block root")
|
||||
}
|
||||
|
||||
// rebuild the checkpoint from the block
|
||||
@@ -96,15 +97,18 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chkpt := ðpb.Checkpoint{
|
||||
Epoch: primitives.Epoch(slotEpoch),
|
||||
Root: blockRoot[:],
|
||||
}
|
||||
|
||||
if err = s.SaveJustifiedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as justified")
|
||||
return errors.Wrap(err, "save justified checkpoint")
|
||||
}
|
||||
|
||||
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as finalized")
|
||||
return errors.Wrap(err, "save finalized checkpoint")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -74,6 +74,7 @@ go_library(
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_sync//singleflight:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -84,6 +85,7 @@ go_test(
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"deposit_test.go",
|
||||
"engine_client_broadcast_test.go",
|
||||
"engine_client_fuzz_test.go",
|
||||
"engine_client_test.go",
|
||||
"execution_chain_test.go",
|
||||
|
||||
@@ -98,12 +98,9 @@ const (
|
||||
// GetBlobsV2 request string for JSON-RPC.
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
// TODO: Remove temporarily needed hack since geth takes an input blobs txs with blobs proofs, and
|
||||
// does the heavy lifting of building cells proofs, while normally this is done by the tx sender.
|
||||
// This is a cool hack because it lets the CL to act as if the tx sender actually computed the cells proofs.
|
||||
// The only counter part is the `engine_getPayloadv<x>` takes a lot of time.
|
||||
// defaultEngineTimeout = time.Second
|
||||
defaultEngineTimeout = 2 * time.Second
|
||||
defaultEngineTimeout = time.Second
|
||||
// defaultGetBlobsRetryInterval is the default retry interval for getBlobsV2 calls.
|
||||
defaultGetBlobsRetryInterval = 200 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -657,9 +654,94 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
// It uses singleflight to ensure only one reconstruction per blockRoot.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Use singleflight to ensure only one reconstruction per blockRoot
|
||||
v, err, _ := s.reconstructSingleflight.Do(fmt.Sprintf("%x", blockRoot), func() (interface{}, error) {
|
||||
// Try reconstruction once
|
||||
result, err := s.reconstructDataColumnSidecarsOnce(ctx, signedROBlock, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to reconstruct data column sidecars")
|
||||
}
|
||||
if len(result) > 0 {
|
||||
return result, nil // Success - return data
|
||||
}
|
||||
|
||||
// Empty result - initiate retry mechanism
|
||||
|
||||
// Create a new context with a timeout for the retry goroutine.
|
||||
retryCtx, cancel := context.WithTimeout(s.ctx, time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)
|
||||
|
||||
// LoadOrStore atomically checks for an existing retry and stores
|
||||
// a new one if none exists. This prevents a race condition.
|
||||
// The stored value is the cancel function for the new context.
|
||||
_, loaded := s.activeRetries.LoadOrStore(blockRoot, cancel)
|
||||
|
||||
if loaded {
|
||||
// Another goroutine already started the retry process. The current one can exit.
|
||||
cancel() // Cancel the context we just created as it won't be used.
|
||||
return []blocks.VerifiedRODataColumn{}, nil
|
||||
}
|
||||
|
||||
// This goroutine is now responsible for starting the retry.
|
||||
// Perform periodic retry attempts for data column reconstruction inline.
|
||||
go func() {
|
||||
startTime := time.Now()
|
||||
// Defer the cancellation of the context and the removal of the active retry tracker.
|
||||
defer func() {
|
||||
cancel()
|
||||
s.activeRetries.Delete(blockRoot)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(defaultGetBlobsRetryInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
attemptCount := 0
|
||||
retryLog := log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot))
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
attemptCount++
|
||||
getBlobsRetryAttempts.WithLabelValues("attempt").Inc()
|
||||
|
||||
// Retry reconstruction
|
||||
retryLog.WithField("attempt", attemptCount).Debug("Retrying data column reconstruction")
|
||||
result, err := s.reconstructDataColumnSidecarsOnce(retryCtx, signedROBlock, blockRoot)
|
||||
if err != nil {
|
||||
retryLog.WithError(err).Debug("Reconstruction attempt failed, will retry")
|
||||
continue
|
||||
}
|
||||
if len(result) > 0 {
|
||||
retryLog.WithField("attempts", attemptCount).Debug("Retry succeeded")
|
||||
getBlobsRetryAttempts.WithLabelValues("success_reconstructed").Inc()
|
||||
getBlobsRetryDuration.WithLabelValues("success").Observe(time.Since(startTime).Seconds())
|
||||
// Clean up active retry tracker immediately on success
|
||||
s.activeRetries.Delete(blockRoot)
|
||||
return
|
||||
}
|
||||
|
||||
case <-retryCtx.Done():
|
||||
retryLog.WithField("attempts", attemptCount).Debug("Retry timeout")
|
||||
getBlobsRetryAttempts.WithLabelValues("timeout").Inc()
|
||||
getBlobsRetryDuration.WithLabelValues("timeout").Observe(time.Since(startTime).Seconds())
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Return empty result for now; the background retry will handle it.
|
||||
return []blocks.VerifiedRODataColumn{}, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v.([]blocks.VerifiedRODataColumn), nil
|
||||
}
|
||||
|
||||
// reconstructDataColumnSidecarsOnce performs a single attempt to reconstruct data column sidecars.
|
||||
func (s *Service) reconstructDataColumnSidecarsOnce(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
@@ -1013,6 +1095,12 @@ func toBlockNumArg(number *big.Int) string {
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
// hasActiveRetry checks if there's an active retry for the given block root.
|
||||
func (s *Service) hasActiveRetry(blockRoot [fieldparams.RootLength]byte) bool {
|
||||
_, exists := s.activeRetries.Load(blockRoot)
|
||||
return exists
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
|
||||
92
beacon-chain/execution/engine_client_broadcast_test.go
Normal file
92
beacon-chain/execution/engine_client_broadcast_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
// TestStartRetryIfNeeded_AtomicBehavior tests that the atomic retry start behavior
|
||||
// prevents race conditions by ensuring only one retry can be active per blockRoot.
|
||||
func TestStartRetryIfNeeded_AtomicBehavior(t *testing.T) {
|
||||
t.Run("prevents multiple concurrent retry claims", func(t *testing.T) {
|
||||
service := &Service{
|
||||
activeRetries: sync.Map{},
|
||||
}
|
||||
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
claimCount := int64(0)
|
||||
|
||||
numConcurrentCalls := 20
|
||||
var wg sync.WaitGroup
|
||||
startSignal := make(chan struct{})
|
||||
|
||||
// Launch multiple goroutines that try to claim retry slot simultaneously
|
||||
for i := 0; i < numConcurrentCalls; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
<-startSignal // Wait for signal to maximize race contention
|
||||
|
||||
// Simulate the atomic claim logic from startRetryIfNeeded
|
||||
cancelFunc := func() {}
|
||||
if _, loaded := service.activeRetries.LoadOrStore(blockRoot, cancelFunc); !loaded {
|
||||
// We won the race - count successful claims
|
||||
atomic.AddInt64(&claimCount, 1)
|
||||
|
||||
// Simulate some work before cleaning up
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
service.activeRetries.Delete(blockRoot)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Start all goroutines simultaneously to maximize race condition
|
||||
close(startSignal)
|
||||
wg.Wait()
|
||||
|
||||
// Verify only one goroutine successfully claimed the retry slot
|
||||
actualClaimCount := atomic.LoadInt64(&claimCount)
|
||||
require.Equal(t, int64(1), actualClaimCount, "Only one goroutine should successfully claim retry slot despite %d concurrent attempts", numConcurrentCalls)
|
||||
|
||||
t.Logf("Success: %d concurrent attempts resulted in only 1 successful claim (atomic behavior verified)", numConcurrentCalls)
|
||||
})
|
||||
|
||||
t.Run("hasActiveRetry correctly detects active retries", func(t *testing.T) {
|
||||
service := &Service{
|
||||
activeRetries: sync.Map{},
|
||||
}
|
||||
|
||||
blockRoot1 := [32]byte{1, 2, 3}
|
||||
blockRoot2 := [32]byte{4, 5, 6}
|
||||
|
||||
// Initially no active retries
|
||||
if service.hasActiveRetry(blockRoot1) {
|
||||
t.Error("Should not have active retry initially")
|
||||
}
|
||||
|
||||
// Add active retry for blockRoot1
|
||||
service.activeRetries.Store(blockRoot1, func() {})
|
||||
|
||||
// Verify detection
|
||||
if !service.hasActiveRetry(blockRoot1) {
|
||||
t.Error("Should detect active retry for blockRoot1")
|
||||
}
|
||||
if service.hasActiveRetry(blockRoot2) {
|
||||
t.Error("Should not detect active retry for blockRoot2")
|
||||
}
|
||||
|
||||
// Remove active retry
|
||||
service.activeRetries.Delete(blockRoot1)
|
||||
|
||||
// Verify removal
|
||||
if service.hasActiveRetry(blockRoot1) {
|
||||
t.Error("Should not detect active retry after deletion")
|
||||
}
|
||||
|
||||
t.Logf("Success: hasActiveRetry correctly tracks retry state")
|
||||
})
|
||||
}
|
||||
@@ -11,7 +11,10 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
@@ -2723,3 +2726,412 @@ func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test retry helper methods
|
||||
func TestRetryHelperMethods(t *testing.T) {
|
||||
client := &Service{}
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("hasActiveRetry returns false initially", func(t *testing.T) {
|
||||
hasActive := client.hasActiveRetry(blockRoot)
|
||||
require.Equal(t, false, hasActive)
|
||||
})
|
||||
|
||||
t.Run("hasActiveRetry returns true after storing cancel function", func(t *testing.T) {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
client.activeRetries.Store(blockRoot, cancel)
|
||||
|
||||
hasActive := client.hasActiveRetry(blockRoot)
|
||||
require.Equal(t, true, hasActive)
|
||||
|
||||
// Clean up
|
||||
client.activeRetries.Delete(blockRoot)
|
||||
})
|
||||
}
|
||||
|
||||
// Test ReconstructDataColumnSidecars with retry logic
|
||||
func TestReconstructDataColumnSidecars_WithRetry(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create test block
|
||||
kzgCommitments := createRandomKzgCommitments(t, 3)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("successful initial call does not trigger retry", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup server that returns all blobs
|
||||
blobMasks := []bool{true, true, true}
|
||||
srv := createBlobServerV2(t, 3, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
|
||||
// Should not have any active retries since initial call succeeded
|
||||
require.Equal(t, false, client.hasActiveRetry(r))
|
||||
})
|
||||
|
||||
t.Run("failed initial call triggers retry", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should have active retry since initial call returned empty
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Clean up
|
||||
if cancel, ok := client.activeRetries.Load(r); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
t.Run("does not start duplicate retry", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// First call should start retry
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Second call should not start another retry
|
||||
dataColumns, err = client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Clean up
|
||||
if cancel, ok := client.activeRetries.Load(r); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test timeout and cleanup behavior
|
||||
func TestRetryTimeout(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create test block
|
||||
kzgCommitments := createRandomKzgCommitments(t, 1)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("retry cleans up after timeout", func(t *testing.T) {
|
||||
// Setup server that always returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Modify config to have very short slot time for testing
|
||||
originalConfig := params.BeaconConfig()
|
||||
cfg := originalConfig.Copy()
|
||||
cfg.SecondsPerSlot = 1 // 1 second timeout for retry
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
defer params.OverrideBeaconConfig(originalConfig)
|
||||
|
||||
// Call ReconstructDataColumnSidecars which will start retry internally
|
||||
ctx := context.Background()
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err) // Should not error, just return empty result
|
||||
|
||||
// Wait a bit for the retry goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should have active retry initially
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Wait for timeout (longer than the 1 second timeout we set)
|
||||
time.Sleep(1200 * time.Millisecond)
|
||||
|
||||
// Should be cleaned up after timeout
|
||||
require.Equal(t, false, client.hasActiveRetry(r))
|
||||
})
|
||||
}
|
||||
|
||||
// Test concurrent retry scenarios
|
||||
func TestConcurrentRetries(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("multiple blocks can have concurrent retries", func(t *testing.T) {
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Create multiple test blocks
|
||||
testBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 3)
|
||||
roots := make([][32]byte, 3)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
kzgCommitments := createRandomKzgCommitments(t, 1)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
testBlocks[i] = signedB
|
||||
roots[i] = [32]byte{byte(i), byte(i), byte(i)}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Start retries for all blocks
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, testBlocks[i], roots[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Wait a bit for the goroutines to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// All should have active retries
|
||||
for i := 0; i < 3; i++ {
|
||||
require.Equal(t, true, client.hasActiveRetry(roots[i]))
|
||||
}
|
||||
|
||||
// Clean up
|
||||
for i := 0; i < 3; i++ {
|
||||
if cancel, ok := client.activeRetries.Load(roots[i]); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test end-to-end retry behavior with data availability changes
|
||||
func TestRetryBehaviorWithDataAvailability(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create test block
|
||||
kzgCommitments := createRandomKzgCommitments(t, 1)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("retry stops when data becomes available", func(t *testing.T) {
|
||||
// Setup server that returns no blobs initially
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Start the initial reconstruction which should trigger retry
|
||||
ctx := context.Background()
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify retry started
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Wait for retry timeout (the retry will continue since there's no way to stop it now)
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
// Retry should still be active since there's no availability check to stop it
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
})
|
||||
|
||||
t.Run("retry continues when data is not available", func(t *testing.T) {
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Start the initial reconstruction which should trigger retry
|
||||
ctx := context.Background()
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify retry started
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Wait a bit - retry should still be active
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Clean up
|
||||
if cancel, ok := client.activeRetries.Load(r); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
|
||||
// Wait for cleanup
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
require.Equal(t, false, client.hasActiveRetry(r))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
// TestConcurrentReconstructDataColumnSidecars tests that concurrent calls to ReconstructDataColumnSidecars
|
||||
// don't result in multiple getBlobsV2 calls for the same block root
|
||||
func TestConcurrentReconstructDataColumnSidecars(t *testing.T) {
|
||||
t.Run("concurrent calls share result", func(t *testing.T) {
|
||||
// Setup server that tracks call count
|
||||
callCount := int32(0)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
atomic.AddInt32(&callCount, 1)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
// Simulate some processing time
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
if strings.Contains(r.URL.RequestURI(), GetBlobsV2) {
|
||||
// Return empty result - simulating EL doesn't have the data yet
|
||||
resp := []interface{}{nil}
|
||||
respJSON, _ := json.Marshal(map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
})
|
||||
_, _ = w.Write(respJSON)
|
||||
return
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
// Setup client
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Create test block with KZG commitments
|
||||
slot := primitives.Slot(100)
|
||||
block := util.NewBeaconBlockDeneb()
|
||||
block.Block.Slot = slot
|
||||
commitment := [48]byte{1, 2, 3}
|
||||
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := signedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Start multiple concurrent calls
|
||||
numCalls := 5
|
||||
var wg sync.WaitGroup
|
||||
results := make([][]blocks.VerifiedRODataColumn, numCalls)
|
||||
errors := make([]error, numCalls)
|
||||
|
||||
for i := 0; i < numCalls; i++ {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
result, err := client.ReconstructDataColumnSidecars(ctx, signedBlock, blockRoot)
|
||||
results[index] = result
|
||||
errors[index] = err
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all calls to complete
|
||||
wg.Wait()
|
||||
|
||||
// Verify that GetBlobsV2 was called only once, not numCalls times
|
||||
finalCallCount := atomic.LoadInt32(&callCount)
|
||||
require.Equal(t, int32(1), finalCallCount, "Expected GetBlobsV2 to be called only once, but was called %d times", finalCallCount)
|
||||
|
||||
// Verify all calls got the same result length
|
||||
for i := 1; i < numCalls; i++ {
|
||||
require.Equal(t, len(results[0]), len(results[i]), "All concurrent calls should return same result length")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -71,4 +71,19 @@ var (
|
||||
Name: "execution_payload_bodies_count",
|
||||
Help: "The number of requested payload bodies is too large",
|
||||
})
|
||||
getBlobsRetryAttempts = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "getblobs_retry_attempts_total",
|
||||
Help: "Total number of getBlobsV2 retry attempts",
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
getBlobsRetryDuration = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "getblobs_retry_duration_seconds",
|
||||
Help: "Duration of getBlobsV2 retry cycles",
|
||||
Buckets: []float64{0.1, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0},
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/singleflight"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
@@ -162,6 +164,8 @@ type Service struct {
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
blobVerifier verification.NewBlobVerifier
|
||||
capabilityCache *capabilityCache
|
||||
activeRetries sync.Map // map[blockRoot]context.CancelFunc for tracking active retries
|
||||
reconstructSingleflight singleflight.Group
|
||||
}
|
||||
|
||||
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.
|
||||
|
||||
@@ -233,7 +233,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db)
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db, beacon.fetchP2P(), beacon.StateFeed())
|
||||
}
|
||||
|
||||
return beacon, nil
|
||||
|
||||
@@ -147,7 +147,6 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
@@ -174,7 +173,6 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
@@ -24,7 +23,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -229,6 +227,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -257,6 +256,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
// Set for 2nd peer
|
||||
@@ -546,8 +546,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
msg, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
msg, err := util.MockOptimisticUpdate()
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
@@ -613,8 +612,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
msg, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
msg, err := util.MockFinalityUpdate()
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
@@ -699,6 +697,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
// Create a listener.
|
||||
|
||||
@@ -10,59 +10,106 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ DataColumnsHandler = (*Service)(nil)
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
func (s *Service) EarliestAvailableSlot() primitives.Slot {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
func (s *Service) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
return s.earliestAvailableSlot
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
}
|
||||
|
||||
return s.custodyInfo.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the custody group count.
|
||||
func (s *Service) CustodyGroupCount() uint64 {
|
||||
s.custodyInfoMut.Lock()
|
||||
defer s.custodyInfoMut.Unlock()
|
||||
func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
return s.custodyGroupCount
|
||||
}
|
||||
|
||||
// UdpateCustodyInfo updates the custody group count and earliest available slot
|
||||
// if the new custody group count is greater than the stored one.
|
||||
// It returns the (potentially updated) earliest available slot and custody group count.
|
||||
func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
s.custodyInfoMut.Lock()
|
||||
defer s.custodyInfoMut.Unlock()
|
||||
|
||||
if custodyGroupCount <= s.custodyGroupCount {
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < s.earliestAvailableSlot {
|
||||
return s.custodyInfo.groupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo updates the stored custody group count to the incoming one
|
||||
// if the incoming one is greater than the stored one. In this case, the
|
||||
// incoming earliest available slot should be greater than or equal to the
|
||||
// stored one or an error is returned.
|
||||
//
|
||||
// - If there is no stored custody info, or
|
||||
// - If the incoming earliest available slot is greater than or equal to the
|
||||
// fulu fork slot and the incoming custody group count is greater than the
|
||||
// number of samples per slot
|
||||
//
|
||||
// then the stored earliest available slot is updated to the incoming one.
|
||||
//
|
||||
// This function returns a boolean indicating whether the custody info was
|
||||
// updated and the (possibly updated) custody info itself.
|
||||
//
|
||||
// Rationale:
|
||||
// - The custody group count can only be increased (specification)
|
||||
// - If the custody group count is increased before Fulu, we can still serve
|
||||
// all the data, since there is no sharding before Fulu. As a consequence
|
||||
// we do not need to update the earliest available slot in this case.
|
||||
// - If the custody group count is increased after Fulu, but to a value less
|
||||
// than or equal to the number of samples per slot, we can still serve all
|
||||
// the data, since we store all sampled data column sidecars in all cases.
|
||||
// As a consequence, we do not need to update the earliest available slot
|
||||
// - If the custody group count is increased after Fulu to a value higher than
|
||||
// the number of samples per slot, then, until the backfill is complete, we
|
||||
// are unable to serve the data column sidecars corresponding to the new
|
||||
// custody groups. As a consequence, we need to update the earliest
|
||||
// available slot to inform the peers that we are not able to serve data
|
||||
// column sidecars before this point.
|
||||
func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
s.custodyInfo = &custodyInfo{
|
||||
earliestAvailableSlot: earliestAvailableSlot,
|
||||
groupCount: custodyGroupCount,
|
||||
}
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
inMemory := s.custodyInfo
|
||||
if custodyGroupCount <= inMemory.groupCount {
|
||||
return inMemory.earliestAvailableSlot, inMemory.groupCount, nil
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < inMemory.earliestAvailableSlot {
|
||||
return 0, 0, errors.Errorf(
|
||||
"earliest available slot %d is less than the current one %d. (custody group count: %d, current one: %d)",
|
||||
earliestAvailableSlot, s.earliestAvailableSlot, custodyGroupCount, s.custodyGroupCount,
|
||||
earliestAvailableSlot, inMemory.earliestAvailableSlot, custodyGroupCount, inMemory.groupCount,
|
||||
)
|
||||
}
|
||||
|
||||
s.custodyGroupCount = custodyGroupCount
|
||||
if custodyGroupCount <= samplesPerSlot {
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return inMemory.earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
if earliestAvailableSlot >= fuluForkSlot {
|
||||
s.earliestAvailableSlot = earliestAvailableSlot
|
||||
if earliestAvailableSlot < fuluForkSlot {
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return inMemory.earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"earliestAvailableSlot": s.earliestAvailableSlot,
|
||||
"custodyGroupCount": s.custodyGroupCount,
|
||||
}).Debug("Custody info updated")
|
||||
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
inMemory.earliestAvailableSlot = earliestAvailableSlot
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
@@ -15,6 +18,174 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
func TestEarliestAvailableSlot(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
|
||||
_, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
slot, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupCount(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
|
||||
_, err := service.CustodyGroupCount()
|
||||
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), "no custody info available"))
|
||||
})
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected uint64 = 5
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
count, err := service.CustodyGroupCount()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SamplesPerSlot = 8
|
||||
config.FuluForkEpoch = 10
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
initialCustodyInfo *custodyInfo
|
||||
inputSlot primitives.Slot
|
||||
inputGroupCount uint64
|
||||
expectedUpdated bool
|
||||
expectedSlot primitives.Slot
|
||||
expectedGroupCount uint64
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "First time setting custody info",
|
||||
initialCustodyInfo: nil,
|
||||
inputSlot: 100,
|
||||
inputGroupCount: 5,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 100,
|
||||
expectedGroupCount: 5,
|
||||
},
|
||||
{
|
||||
name: "Group count decrease - no update",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 10,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 8,
|
||||
expectedUpdated: false,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 10,
|
||||
},
|
||||
{
|
||||
name: "Earliest slot decrease - error",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 100,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 50,
|
||||
inputGroupCount: 10,
|
||||
expectedErr: "earliest available slot 50 is less than the current one 100",
|
||||
},
|
||||
{
|
||||
name: "Group count increase but <= samples per slot",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 8,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 8,
|
||||
},
|
||||
{
|
||||
name: "Group count increase > samples per slot, before Fulu fork",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 15,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 15,
|
||||
},
|
||||
{
|
||||
name: "Group count increase > samples per slot, after Fulu fork",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 500,
|
||||
inputGroupCount: 15,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 500,
|
||||
expectedGroupCount: 15,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
}
|
||||
|
||||
slot, groupCount, err := service.UpdateCustodyInfo(tc.inputSlot, tc.inputGroupCount)
|
||||
|
||||
if tc.expectedErr != "" {
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), tc.expectedErr))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSlot, slot)
|
||||
require.Equal(t, tc.expectedGroupCount, groupCount)
|
||||
|
||||
if tc.expectedUpdated {
|
||||
require.NotNil(t, service.custodyInfo)
|
||||
require.Equal(t, tc.expectedSlot, service.custodyInfo.earliestAvailableSlot)
|
||||
require.Equal(t, tc.expectedGroupCount, service.custodyInfo.groupCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
@@ -109,3 +280,59 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromPeerENR(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
pid = "test-id"
|
||||
)
|
||||
|
||||
cgc := peerdas.Cgc(expectedENR)
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
record *enr.Record
|
||||
expected uint64
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "No ENR record",
|
||||
record: nil,
|
||||
expected: custodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "Empty ENR record",
|
||||
record: &enr.Record{},
|
||||
expected: custodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "Valid ENR with custody group count",
|
||||
record: func() *enr.Record {
|
||||
record := &enr.Record{}
|
||||
record.Set(cgc)
|
||||
return record
|
||||
}(),
|
||||
expected: expectedENR,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
if tc.record != nil {
|
||||
peers.Add(tc.record, pid, nil, network.DirOutbound)
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
}
|
||||
|
||||
actual := service.custodyGroupCountFromPeerENR(pid)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,14 +241,21 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody subnet count")
|
||||
return
|
||||
}
|
||||
custodyGroupCount, inRecordCustodyGroupCount := uint64(0), uint64(0)
|
||||
if params.FuluEnabled() {
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err = peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody group count")
|
||||
return
|
||||
}
|
||||
|
||||
custodyGroupCount := s.CustodyGroupCount()
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody group count")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// We add `1` to the current epoch because we want to prepare one epoch before the Fulu fork.
|
||||
if currentEpoch+1 < fuluForkEpoch {
|
||||
@@ -577,9 +584,15 @@ func (s *Service) createLocalNode(
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
custodyGroupCount := s.CustodyGroupCount()
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount, err := s.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
hostIP := net.ParseIP(s.cfg.HostAddress)
|
||||
|
||||
@@ -64,6 +64,7 @@ func TestCreateListener(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -90,6 +91,7 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -115,6 +117,7 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
@@ -190,6 +193,8 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
quicPort = 3000
|
||||
)
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Create a private key.
|
||||
address, privKey := createAddrAndPrivKey(t)
|
||||
|
||||
@@ -198,7 +203,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: tt.cfg,
|
||||
custodyGroupCount: params.BeaconConfig().CustodyRequirement,
|
||||
custodyInfo: &custodyInfo{groupCount: custodyRequirement},
|
||||
}
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
|
||||
@@ -251,7 +256,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
// Check cgc config.
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
|
||||
require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodyGroupCount)
|
||||
require.Equal(t, custodyRequirement, *custodyGroupCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -263,6 +268,7 @@ func TestRebootDiscoveryListener(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
@@ -295,6 +301,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{},
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -313,6 +320,7 @@ func TestMultiAddrConversion_OK(t *testing.T) {
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -386,6 +394,7 @@ func TestHostIsResolved(t *testing.T) {
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
ip, key := createAddrAndPrivKey(t)
|
||||
list, err := s.createListener(ip, key)
|
||||
@@ -455,6 +464,7 @@ func TestUDPMultiAddress(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
@@ -822,7 +832,7 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyGroupCount: params.BeaconConfig().CustodyRequirement,
|
||||
custodyInfo: &custodyInfo{groupCount: custodyGroupCount},
|
||||
}
|
||||
|
||||
// Set the listener and the metadata.
|
||||
|
||||
@@ -40,6 +40,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -73,6 +74,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: root,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
@@ -134,6 +136,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -168,6 +171,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
|
||||
@@ -33,14 +33,14 @@ type (
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
DataColumnsHandler
|
||||
CustodyManager
|
||||
}
|
||||
|
||||
// Accessor provides access to the Broadcaster, PeerManager and DataColumnsHandler interfaces.
|
||||
// Accessor provides access to the Broadcaster, PeerManager and CustodyManager interfaces.
|
||||
Accessor interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
DataColumnsHandler
|
||||
CustodyManager
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
@@ -120,10 +120,10 @@ type (
|
||||
MetadataSeq() uint64
|
||||
}
|
||||
|
||||
// DataColumnsHandler abstracts some data columns related methods.
|
||||
DataColumnsHandler interface {
|
||||
EarliestAvailableSlot() primitives.Slot
|
||||
CustodyGroupCount() uint64
|
||||
// CustodyManager abstracts some data columns related methods.
|
||||
CustodyManager interface {
|
||||
EarliestAvailableSlot() (primitives.Slot, error)
|
||||
CustodyGroupCount() (uint64, error)
|
||||
UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
CustodyGroupCountFromPeer(peer.ID) uint64
|
||||
}
|
||||
|
||||
@@ -63,36 +63,42 @@ var (
|
||||
)
|
||||
|
||||
// Service for managing peer to peer (p2p) networking.
|
||||
type Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfoMut sync.RWMutex // Protects custodyGroupCount and earliestAvailableSlot
|
||||
custodyGroupCount uint64
|
||||
earliestAvailableSlot primitives.Slot
|
||||
}
|
||||
type (
|
||||
Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
}
|
||||
|
||||
custodyInfo struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
groupCount uint64
|
||||
}
|
||||
)
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
// connections are made until the Start function is called during the service registry startup.
|
||||
|
||||
@@ -112,6 +112,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
s, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.custodyInfo = &custodyInfo{}
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
s.Start()
|
||||
@@ -209,6 +210,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: gvr[:],
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
@@ -250,6 +252,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: gvr[:],
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
@@ -279,6 +282,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.custodyInfo = &custodyInfo{}
|
||||
|
||||
go s.Start()
|
||||
|
||||
|
||||
@@ -74,6 +74,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
bootNodeForkDigest, err := bootNodeService.currentForkDigest()
|
||||
@@ -108,6 +109,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
nodeForkDigest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -157,6 +159,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
service.Start()
|
||||
defer func() {
|
||||
|
||||
@@ -46,7 +46,6 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -198,16 +198,16 @@ func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot -- fake.
|
||||
func (*FakeP2P) EarliestAvailableSlot() primitives.Slot {
|
||||
return 0
|
||||
func (*FakeP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount -- fake.
|
||||
func (*FakeP2P) CustodyGroupCount() uint64 {
|
||||
return 0
|
||||
func (*FakeP2P) CustodyGroupCount() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// SetCustostyGroupCount -- fake.
|
||||
// UpdateCustodyInfo -- fake.
|
||||
func (s *FakeP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
@@ -35,7 +35,6 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -61,7 +60,7 @@ type TestP2P struct {
|
||||
Digest [4]byte
|
||||
peers *peers.Status
|
||||
LocalMetadata metadata.Metadata
|
||||
custodyInfoMut sync.RWMutex
|
||||
custodyInfoMut sync.RWMutex // protects custodyGroupCount and earliestAvailableSlot
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
@@ -467,45 +466,31 @@ func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// CustodyGroupCount .
|
||||
func (s *TestP2P) CustodyGroupCount() uint64 {
|
||||
// EarliestAvailableSlot .
|
||||
func (s *TestP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.custodyGroupCount
|
||||
return s.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// SetCustodyGroupCount .
|
||||
// UdpateCustodyInfo updates the custody group count and earliest available slot
|
||||
// if the new custody group count is greater than the stored one.
|
||||
// It returns the (potentially updated) earliest available slot and custody group count.
|
||||
// CustodyGroupCount .
|
||||
func (s *TestP2P) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo .
|
||||
func (s *TestP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
s.custodyInfoMut.Lock()
|
||||
defer s.custodyInfoMut.Unlock()
|
||||
|
||||
if custodyGroupCount <= s.custodyGroupCount {
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < s.earliestAvailableSlot {
|
||||
return 0, 0, errors.Errorf(
|
||||
"earliest available slot %d is less than the current one %d. (custody group count: %d, current one: %d)",
|
||||
earliestAvailableSlot, s.earliestAvailableSlot, custodyGroupCount, s.custodyGroupCount,
|
||||
)
|
||||
}
|
||||
|
||||
s.earliestAvailableSlot = earliestAvailableSlot
|
||||
s.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot .
|
||||
func (s *TestP2P) EarliestAvailableSlot() primitives.Slot {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.earliestAvailableSlot
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer .
|
||||
|
||||
@@ -130,6 +130,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
|
||||
name: namespace + ".BlockRewards",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.BlockRewards,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -140,6 +141,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.AttestationRewards,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -150,6 +152,7 @@ func (s *Service) rewardsEndpoints(blocker lookup.Blocker, stater lookup.Stater,
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SyncCommitteeRewards,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -172,6 +175,7 @@ func (s *Service) builderEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".ExpectedWithdrawals",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ExpectedWithdrawals,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -194,6 +198,7 @@ func (s *Service) blobEndpoints(blocker lookup.Blocker) []endpoint {
|
||||
name: namespace + ".Blobs",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.Blobs,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -237,6 +242,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetAggregateAttestation",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAggregateAttestation,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -246,6 +252,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetAggregateAttestationV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAggregateAttestationV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -256,6 +263,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitContributionAndProofs,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -267,6 +275,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAggregateAndProofs,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -277,6 +286,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAggregateAndProofsV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -286,6 +296,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".ProduceSyncCommitteeContribution",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ProduceSyncCommitteeContribution,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -296,6 +307,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitSyncCommitteeSubscription,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -306,6 +318,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitBeaconCommitteeSubscription,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -315,6 +328,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetAttestationData",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttestationData,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -325,6 +339,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.RegisterValidator,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -335,6 +350,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttesterDuties,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -344,6 +360,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".GetProposerDuties",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetProposerDuties,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -354,6 +371,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetSyncCommitteeDuties,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -364,6 +382,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PrepareBeaconProposer,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -374,6 +393,7 @@ func (s *Service) validatorEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLiveness,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -383,6 +403,7 @@ func (s *Service) validatorEndpoints(
|
||||
name: namespace + ".ProduceBlockV3",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ProduceBlockV3,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -429,6 +450,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetSyncStatus",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetSyncStatus,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -438,6 +460,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetIdentity",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetIdentity,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -447,6 +470,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -456,6 +480,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetPeers",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPeers,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -465,6 +490,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetPeerCount",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPeerCount,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -474,6 +500,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetVersion",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetVersion,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -483,6 +510,7 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
name: namespace + ".GetHealth",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetHealth,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -533,6 +561,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetCommittees",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetCommittees,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -542,6 +571,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetStateFork",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetStateFork,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -551,6 +581,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetStateRoot",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetStateRoot,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -560,6 +591,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetSyncCommittees",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetSyncCommittees,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -569,6 +601,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetRandao",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetRandao,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -580,6 +613,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlock,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -591,6 +625,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlindedBlock,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -601,6 +636,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlockV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -611,6 +647,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlindedBlockV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -620,6 +657,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -630,6 +668,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockAttestations",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockAttestations,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -639,6 +678,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockAttestationsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockAttestationsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -648,6 +688,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlindedBlock",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlindedBlock,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -657,6 +698,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockRoot",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockRoot,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -667,6 +709,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListAttestations",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListAttestations,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -676,6 +719,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListAttestationsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListAttestationsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -686,6 +730,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttestations,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -696,6 +741,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttestationsV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -705,6 +751,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListVoluntaryExits",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListVoluntaryExits,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -715,6 +762,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitVoluntaryExit,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -725,6 +773,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitSyncCommitteeSignatures,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -734,6 +783,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".ListBLSToExecutionChanges",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListBLSToExecutionChanges,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -744,6 +794,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitBLSToExecutionChanges,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -754,6 +805,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetAttesterSlashings",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttesterSlashings,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -763,6 +815,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetAttesterSlashingsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetAttesterSlashingsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -773,6 +826,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttesterSlashings,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -783,6 +837,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitAttesterSlashingsV2,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -792,6 +847,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetProposerSlashings",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetProposerSlashings,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -802,6 +858,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.SubmitProposerSlashing,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -811,6 +868,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockHeaders",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockHeaders,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -820,6 +878,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetBlockHeader",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBlockHeader,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -829,6 +888,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetGenesis",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetGenesis,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -838,6 +898,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetFinalityCheckpoints",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetFinalityCheckpoints,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -848,6 +909,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidators,
|
||||
methods: []string{http.MethodGet, http.MethodPost},
|
||||
@@ -857,6 +919,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetValidator",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidator,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -867,6 +930,7 @@ func (s *Service) beaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidatorBalances,
|
||||
methods: []string{http.MethodGet, http.MethodPost},
|
||||
@@ -887,6 +951,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetDepositSnapshot",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetDepositSnapshot,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -896,6 +961,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetPendingDeposits",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPendingDeposits,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -914,6 +980,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".GetPendingPartialWithdrawals",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPendingPartialWithdrawals,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -939,6 +1006,7 @@ func (*Service) configEndpoints() []endpoint {
|
||||
name: namespace + ".GetDepositContract",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: config.GetDepositContract,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -948,6 +1016,7 @@ func (*Service) configEndpoints() []endpoint {
|
||||
name: namespace + ".GetForkSchedule",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: config.GetForkSchedule,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -957,6 +1026,7 @@ func (*Service) configEndpoints() []endpoint {
|
||||
name: namespace + ".GetSpec",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: config.GetSpec,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -976,6 +1046,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientBootstrap",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientBootstrap,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -985,6 +1056,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientUpdatesByRange",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientUpdatesByRange,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -994,6 +1066,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientFinalityUpdate",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientFinalityUpdate,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1003,6 +1076,7 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
name: namespace + ".GetLightClientOptimisticUpdate",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetLightClientOptimisticUpdate,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1029,6 +1103,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".GetBeaconStateV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetBeaconStateV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1038,6 +1113,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".GetForkChoiceHeadsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetForkChoiceHeadsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1047,6 +1123,7 @@ func (s *Service) debugEndpoints(stater lookup.Stater) []endpoint {
|
||||
name: namespace + ".GetForkChoice",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetForkChoice,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1106,6 +1183,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetWeakSubjectivity",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetWeakSubjectivity,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1115,6 +1193,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetValidatorCount",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidatorCount,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1124,6 +1203,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetValidatorCount",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetValidatorCount,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1134,6 +1214,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetIndividualVotes,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1143,6 +1224,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
name: namespace + ".GetChainHead",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetChainHead,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1153,6 +1235,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlobs,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1180,6 +1263,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".ListTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListTrustedPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1189,6 +1273,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".ListTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.ListTrustedPeer,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1199,6 +1284,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.AddTrustedPeer,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1209,6 +1295,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.AddTrustedPeer,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1218,6 +1305,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".RemoveTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.RemoveTrustedPeer,
|
||||
methods: []string{http.MethodDelete},
|
||||
@@ -1227,6 +1315,7 @@ func (s *Service) prysmNodeEndpoints() []endpoint {
|
||||
name: namespace + ".RemoveTrustedPeer",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.RemoveTrustedPeer,
|
||||
methods: []string{http.MethodDelete},
|
||||
@@ -1249,6 +1338,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPerformance,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1259,6 +1349,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetPerformance,
|
||||
methods: []string{http.MethodPost},
|
||||
@@ -1268,6 +1359,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
name: namespace + ".GetParticipation",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetParticipation,
|
||||
methods: []string{http.MethodGet},
|
||||
@@ -1277,6 +1369,7 @@ func (s *Service) prysmValidatorEndpoints(stater lookup.Stater, coreService *cor
|
||||
name: namespace + ".GetActiveSetChanges",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetActiveSetChanges,
|
||||
methods: []string{http.MethodGet},
|
||||
|
||||
@@ -33,9 +33,11 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -11,9 +11,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
dbtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2ptesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -53,7 +55,7 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(db)
|
||||
lcStore := lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
@@ -97,7 +99,7 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(db)
|
||||
lcStore := lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
@@ -141,7 +143,7 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
|
||||
t.Run("no bootstrap found", func(t *testing.T) {
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t)),
|
||||
LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com/", nil)
|
||||
request.SetPathValue("block_root", hexutil.Encode([]byte{0x00, 0x01, 0x02}))
|
||||
@@ -184,7 +186,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updatePeriod := startPeriod
|
||||
@@ -325,7 +327,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 2)
|
||||
@@ -445,7 +447,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
@@ -492,7 +494,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
@@ -536,7 +538,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
t.Run("start period before altair", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
startPeriod := 0
|
||||
url := fmt.Sprintf("http://foo.com/?count=128&start_period=%d", startPeriod)
|
||||
@@ -559,7 +561,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
t.Run("missing update in the middle", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
@@ -603,7 +605,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
t.Run("missing update at the beginning", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
@@ -663,8 +665,8 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s.LCStore.SetLastFinalityUpdate(update)
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
s.LCStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -688,8 +690,8 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s.LCStore.SetLastFinalityUpdate(update)
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
s.LCStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
@@ -727,7 +729,7 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
t.Run("no update", func(t *testing.T) {
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -743,8 +745,8 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s.LCStore.SetLastOptimisticUpdate(update)
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
s.LCStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -767,8 +769,8 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s.LCStore.SetLastOptimisticUpdate(update)
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
s.LCStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
|
||||
@@ -154,7 +154,6 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
}
|
||||
|
||||
// Convert statusV2 into status
|
||||
// TODO: Should we do it this way or the other way around?
|
||||
peerStatus := ðpb.Status{
|
||||
ForkDigest: pStatus.ForkDigest,
|
||||
FinalizedRoot: pStatus.FinalizedRoot,
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -300,12 +299,11 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
|
||||
}
|
||||
isPeerDASEnabled := coreTime.PeerDASIsActive(block.Block().Slot())
|
||||
|
||||
if block.IsBlinded() {
|
||||
block, blobSidecars, dataColumnSideCars, err = vs.handleBlindedBlock(ctx, block, isPeerDASEnabled)
|
||||
block, blobSidecars, dataColumnSideCars, err = vs.handleBlindedBlock(ctx, block)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
blobSidecars, dataColumnSideCars, err = vs.handleUnblindedBlock(block, req, isPeerDASEnabled)
|
||||
blobSidecars, dataColumnSideCars, err = vs.handleUnblindedBlock(block, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
@@ -317,6 +315,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
}
|
||||
|
||||
slot := block.Block().Slot()
|
||||
epoch := slots.ToEpoch(slot)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1)
|
||||
@@ -330,7 +329,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
if isPeerDASEnabled {
|
||||
if epoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root, slot); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive data columns: %v", err)
|
||||
}
|
||||
@@ -349,7 +348,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock, isPeerDASEnabled bool) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
}
|
||||
@@ -372,7 +371,10 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind")
|
||||
}
|
||||
|
||||
if isPeerDASEnabled {
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
if blockEpoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, bundle.GetBlobs(), bundle.GetProofs())
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
@@ -392,14 +394,16 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
isPeerDASEnabled bool,
|
||||
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if isPeerDASEnabled {
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
if blockEpoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestUnblinder_UnblindBlobSidecars_InvalidBundle(t *testing.T) {
|
||||
func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
|
||||
// Test that the function accepts BlobsBundler interface
|
||||
// This test focuses on the interface change rather than full integration
|
||||
|
||||
|
||||
t.Run("Interface compatibility with BlobsBundle", func(t *testing.T) {
|
||||
// Create a simple pre-Deneb block that will return nil (no processing needed)
|
||||
wBlock, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{
|
||||
@@ -87,7 +87,7 @@ func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
|
||||
t.Run("Function signature accepts BlobsBundler interface", func(t *testing.T) {
|
||||
// This test verifies that the function signature has been updated to accept BlobsBundler
|
||||
// We test this by verifying the code compiles with both types
|
||||
|
||||
|
||||
// Create a simple pre-Deneb block for the interface test
|
||||
wBlock, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{
|
||||
Block: ðpb.BeaconBlockCapella{
|
||||
@@ -106,7 +106,7 @@ func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
|
||||
_, err = unblindBlobsSidecars(wBlock, regularBundle)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify function accepts BlobsBundleV2 through the interface
|
||||
// Verify function accepts BlobsBundleV2 through the interface
|
||||
var bundleV2 enginev1.BlobsBundler = &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{make([]byte, 48)},
|
||||
Proofs: [][]byte{make([]byte, 48)},
|
||||
|
||||
@@ -169,6 +169,7 @@ go_test(
|
||||
"block_batcher_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"custody_test.go",
|
||||
"data_columns_reconstruct_test.go",
|
||||
"data_columns_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
@@ -195,6 +196,7 @@ go_test(
|
||||
"slot_aware_cache_test.go",
|
||||
"subscriber_beacon_aggregate_proof_test.go",
|
||||
"subscriber_beacon_blocks_test.go",
|
||||
"subscriber_data_column_sidecar_trigger_test.go",
|
||||
"subscriber_test.go",
|
||||
"subscription_topic_handler_test.go",
|
||||
"sync_fuzz_test.go",
|
||||
@@ -216,6 +218,7 @@ go_test(
|
||||
shard_count = 4,
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
|
||||
@@ -38,8 +38,8 @@ func (s batchState) String() string {
|
||||
return "import_complete"
|
||||
case batchEndSequence:
|
||||
return "end_sequence"
|
||||
case batchSidecarSync:
|
||||
return "sidecar_sync"
|
||||
case batchBlobSync:
|
||||
return "blob_sync"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
@@ -50,7 +50,7 @@ const (
|
||||
batchInit
|
||||
batchSequenced
|
||||
batchErrRetryable
|
||||
batchSidecarSync
|
||||
batchBlobSync
|
||||
batchImportable
|
||||
batchImportComplete
|
||||
batchEndSequence
|
||||
@@ -140,7 +140,7 @@ func (b batch) withResults(results verifiedROBlocks, bs *blobSync) batch {
|
||||
b.results = results
|
||||
b.bs = bs
|
||||
if bs.blobsNeeded() > 0 {
|
||||
return b.withState(batchSidecarSync)
|
||||
return b.withState(batchBlobSync)
|
||||
}
|
||||
return b.withState(batchImportable)
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
case b := <-p.fromWorkers:
|
||||
pid := b.busy
|
||||
busy[pid] = false
|
||||
if b.state == batchSidecarSync {
|
||||
if b.state == batchBlobSync {
|
||||
todo = append(todo, b)
|
||||
sortBatchDesc(todo)
|
||||
} else {
|
||||
|
||||
@@ -31,7 +31,7 @@ func (w *p2pWorker) run(ctx context.Context) {
|
||||
select {
|
||||
case b := <-w.todo:
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
|
||||
if b.state == batchSidecarSync {
|
||||
if b.state == batchBlobSync {
|
||||
w.done <- w.handleSidecars(ctx, b)
|
||||
} else {
|
||||
w.done <- w.handleBlocks(ctx, b)
|
||||
|
||||
@@ -15,6 +15,94 @@ import (
|
||||
|
||||
var nilFinalizedStateError = errors.New("finalized state is nil")
|
||||
|
||||
func (s *Service) maintainCustodyInfo() {
|
||||
const interval = 1 * time.Minute
|
||||
|
||||
async.RunEvery(s.ctx, interval, func() {
|
||||
if err := s.updateCustodyInfoIfNeeded(); err != nil {
|
||||
log.WithError(err).Error("Failed to update custody info")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) updateCustodyInfoIfNeeded() error {
|
||||
const minimumPeerCount = 1
|
||||
|
||||
// Get our actual custody group count.
|
||||
actualCustodyGrounpCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "p2p custody group count")
|
||||
}
|
||||
|
||||
// Get our target custody group count.
|
||||
targetCustodyGroupCount, err := s.custodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
// If the actual custody group count is already equal to the target, skip the update.
|
||||
if actualCustodyGrounpCount >= targetCustodyGroupCount {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check that all subscribed data column sidecars topics have at least `minimumPeerCount` peers.
|
||||
topics := s.cfg.p2p.PubSub().GetTopics()
|
||||
enoughPeers := true
|
||||
for _, topic := range topics {
|
||||
if !strings.Contains(topic, p2p.GossipDataColumnSidecarMessage) {
|
||||
continue
|
||||
}
|
||||
|
||||
if peers := s.cfg.p2p.PubSub().ListPeers(topic); len(peers) < minimumPeerCount {
|
||||
// If a topic has fewer than the minimum required peers, log a warning.
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"peerCount": len(peers),
|
||||
"minimumPeerCount": minimumPeerCount,
|
||||
}).Debug("Insufficient peers for data column sidecar topic to maintain custody count")
|
||||
enoughPeers = false
|
||||
}
|
||||
}
|
||||
|
||||
if !enoughPeers {
|
||||
return nil
|
||||
}
|
||||
|
||||
headROBlock, err := s.cfg.chain.HeadBlock(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "head block")
|
||||
}
|
||||
headSlot := headROBlock.Block().Slot()
|
||||
|
||||
storedEarliestSlot, storedGroupCount, err := s.cfg.p2p.UpdateCustodyInfo(headSlot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "p2p update custody info")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.beaconDB.UpdateCustodyInfo(s.ctx, storedEarliestSlot, storedGroupCount); err != nil {
|
||||
return errors.Wrap(err, "beacon db update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// custodyGroupCount computes the custody group count based on the custody requirement,
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
func (s *Service) custodyGroupCount() (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return beaconConfig.NumberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
validatorsCustodyRequirement, err := s.validatorsCustodyRequirement()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "validators custody requirement")
|
||||
}
|
||||
|
||||
return max(beaconConfig.CustodyRequirement, validatorsCustodyRequirement), nil
|
||||
}
|
||||
|
||||
// validatorsCustodyRequirements computes the custody requirements based on the
|
||||
// finalized state and the tracked validators.
|
||||
func (s *Service) validatorsCustodyRequirement() (uint64, error) {
|
||||
@@ -40,86 +128,3 @@ func (s *Service) validatorsCustodyRequirement() (uint64, error) {
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// custodyGroupCount computes the custody group count based on the custody requirement,
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
func (s *Service) custodyGroupCount() (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return beaconConfig.NumberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
validatorsCustodyRequirement, err := s.validatorsCustodyRequirement()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "validators custody requirement")
|
||||
}
|
||||
|
||||
return max(beaconConfig.CustodyRequirement, validatorsCustodyRequirement), nil
|
||||
}
|
||||
|
||||
func (s *Service) maintainCustodyGroupCount() {
|
||||
const (
|
||||
interval = 1 * time.Minute
|
||||
minimumPeerCount = 1
|
||||
)
|
||||
|
||||
async.RunEvery(s.ctx, interval, func() {
|
||||
custodyGroupCount, err := s.custodyGroupCount()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute custody group count")
|
||||
return
|
||||
}
|
||||
|
||||
// Check that all subscribed data column sidecars topics have at least `minimumPeerCount` peers.
|
||||
topics := s.cfg.p2p.PubSub().GetTopics()
|
||||
enoughPeers := true
|
||||
for _, topic := range topics {
|
||||
if !strings.Contains(topic, p2p.GossipDataColumnSidecarMessage) {
|
||||
continue
|
||||
}
|
||||
|
||||
if peers := s.cfg.p2p.PubSub().ListPeers(topic); len(peers) < minimumPeerCount {
|
||||
// If a topic has fewer than the minimum required peers, log a warning.
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"peerCount": len(peers),
|
||||
"minimumPeerCount": minimumPeerCount,
|
||||
}).Debug("Insufficient peers for data column sidecar topic to maintain custody count")
|
||||
enoughPeers = false
|
||||
}
|
||||
}
|
||||
|
||||
if !enoughPeers {
|
||||
return
|
||||
}
|
||||
|
||||
headROBlock, err := s.cfg.chain.HeadBlock(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve head block")
|
||||
return
|
||||
}
|
||||
headSlot := headROBlock.Block().Slot()
|
||||
|
||||
earliestSlot, storedGroupCount, err := s.cfg.p2p.UpdateCustodyInfo(headSlot, custodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update custody info")
|
||||
return
|
||||
}
|
||||
|
||||
if storedGroupCount >= custodyGroupCount {
|
||||
return
|
||||
}
|
||||
|
||||
// Update the custody group count in the P2P service and the database.
|
||||
if _, _, err := s.cfg.p2p.UpdateCustodyInfo(earliestSlot, storedGroupCount); err != nil {
|
||||
log.WithError(err).Error("Could not update custody info")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.cfg.beaconDB.SaveCustodyGroupCount(s.ctx, custodyGroupCount); err != nil {
|
||||
log.WithError(err).Error("Could not save custody group count")
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
195
beacon-chain/sync/custody_test.go
Normal file
195
beacon-chain/sync/custody_test.go
Normal file
@@ -0,0 +1,195 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
dbtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
type testSetup struct {
|
||||
service *Service
|
||||
p2pService *p2ptest.TestP2P
|
||||
beaconDB db.Database
|
||||
ctx context.Context
|
||||
initialSlot primitives.Slot
|
||||
initialCount uint64
|
||||
}
|
||||
|
||||
func setupCustodyTest(t *testing.T, withChain bool) *testSetup {
|
||||
ctx := t.Context()
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
beaconDB := dbtesting.SetupDB(t)
|
||||
|
||||
const (
|
||||
initialEarliestSlot = primitives.Slot(50)
|
||||
initialCustodyCount = uint64(5)
|
||||
)
|
||||
|
||||
_, _, err := p2pService.UpdateCustodyInfo(initialEarliestSlot, initialCustodyCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
dbEarliestAvailableSlot, dbCustodyCount, err := beaconDB.UpdateCustodyInfo(ctx, initialEarliestSlot, initialCustodyCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, initialEarliestSlot, dbEarliestAvailableSlot)
|
||||
require.Equal(t, initialCustodyCount, dbCustodyCount)
|
||||
|
||||
cfg := &config{
|
||||
p2p: p2pService,
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
|
||||
if withChain {
|
||||
const headSlot = primitives.Slot(100)
|
||||
block, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Body: ð.BeaconBlockBody{},
|
||||
Slot: headSlot,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg.chain = &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidAttestation: true,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
Block: block,
|
||||
}
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
|
||||
return &testSetup{
|
||||
service: service,
|
||||
p2pService: p2pService,
|
||||
beaconDB: beaconDB,
|
||||
ctx: ctx,
|
||||
initialSlot: initialEarliestSlot,
|
||||
initialCount: initialCustodyCount,
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *testSetup) assertCustodyInfo(t *testing.T, expectedSlot primitives.Slot, expectedCount uint64) {
|
||||
p2pEarliestSlot, err := ts.p2pService.EarliestAvailableSlot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedSlot, p2pEarliestSlot)
|
||||
|
||||
p2pCustodyCount, err := ts.p2pService.CustodyGroupCount()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCount, p2pCustodyCount)
|
||||
|
||||
dbEarliestSlot, dbCustodyCount, err := ts.beaconDB.UpdateCustodyInfo(ts.ctx, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedSlot, dbEarliestSlot)
|
||||
require.Equal(t, expectedCount, dbCustodyCount)
|
||||
}
|
||||
|
||||
func withSubscribeAllDataSubnets(t *testing.T, fn func()) {
|
||||
originalFlag := flags.Get().SubscribeAllDataSubnets
|
||||
defer func() {
|
||||
flags.Get().SubscribeAllDataSubnets = originalFlag
|
||||
}()
|
||||
flags.Get().SubscribeAllDataSubnets = true
|
||||
fn()
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfoIfNeeded(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.NumberOfCustodyGroups = 128
|
||||
beaconConfig.CustodyRequirement = 4
|
||||
beaconConfig.SamplesPerSlot = 8
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
t.Run("Skip update when actual custody count >= target", func(t *testing.T) {
|
||||
setup := setupCustodyTest(t, false)
|
||||
|
||||
err := setup.service.updateCustodyInfoIfNeeded()
|
||||
require.NoError(t, err)
|
||||
|
||||
setup.assertCustodyInfo(t, setup.initialSlot, setup.initialCount)
|
||||
})
|
||||
|
||||
t.Run("not enough peers in some subnets", func(t *testing.T) {
|
||||
const randomTopic = "aTotalRandomTopicName"
|
||||
require.Equal(t, false, strings.Contains(randomTopic, p2p.GossipDataColumnSidecarMessage))
|
||||
|
||||
withSubscribeAllDataSubnets(t, func() {
|
||||
setup := setupCustodyTest(t, false)
|
||||
|
||||
_, err := setup.service.cfg.p2p.SubscribeToTopic(p2p.GossipDataColumnSidecarMessage)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = setup.service.cfg.p2p.SubscribeToTopic(randomTopic)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = setup.service.updateCustodyInfoIfNeeded()
|
||||
require.NoError(t, err)
|
||||
|
||||
setup.assertCustodyInfo(t, setup.initialSlot, setup.initialCount)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should update", func(t *testing.T) {
|
||||
withSubscribeAllDataSubnets(t, func() {
|
||||
setup := setupCustodyTest(t, true)
|
||||
|
||||
err := setup.service.updateCustodyInfoIfNeeded()
|
||||
require.NoError(t, err)
|
||||
|
||||
const expectedSlot = primitives.Slot(100)
|
||||
setup.assertCustodyInfo(t, expectedSlot, beaconConfig.NumberOfCustodyGroups)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupCount(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.NumberOfCustodyGroups = 10
|
||||
config.CustodyRequirement = 3
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
t.Run("SubscribeAllDataSubnets enabled returns NumberOfCustodyGroups", func(t *testing.T) {
|
||||
withSubscribeAllDataSubnets(t, func() {
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
result, err := service.custodyGroupCount()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, config.NumberOfCustodyGroups, result)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("No tracked validators returns CustodyRequirement", func(t *testing.T) {
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
|
||||
result, err := service.custodyGroupCount()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, config.CustodyRequirement, result)
|
||||
})
|
||||
}
|
||||
@@ -50,7 +50,11 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
|
||||
// Retrieve our local node info.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
custodyGroupCount := s.cfg.p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
@@ -158,7 +162,11 @@ func (s *Service) broadcastMissingDataColumnSidecars(
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
// Retrieve the local node info.
|
||||
custodyGroupCount := s.cfg.p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peerdas info")
|
||||
|
||||
@@ -1,113 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// fetchDataColumnSidecars retrieves data column sidecars from the database for the given map
|
||||
// of block roots to data column indices. It checks if the requested data columns are available
|
||||
// in the database and returns the corresponding ReadOnlyDataColumnSidecars.
|
||||
// If some requested data is not available, it returns an error.
|
||||
func (s *Service) fetchDataColumnSidecars(blockRootToIndices map[[fieldparams.RootLength]byte][]uint64) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
if s.cfg.dataColumnStorage == nil {
|
||||
return nil, errors.New("data column storage is nil")
|
||||
}
|
||||
|
||||
if len(blockRootToIndices) == 0 {
|
||||
return map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{}, nil
|
||||
}
|
||||
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
|
||||
minColumnsForReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
|
||||
for blockRoot, indices := range blockRootToIndices {
|
||||
if len(indices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// First check cache to see what data is available
|
||||
storedDataColumns := s.cfg.dataColumnStorage.Summary(blockRoot)
|
||||
|
||||
// Check if all requested indices are present in cache
|
||||
storedIndices := storedDataColumns.Stored()
|
||||
allRequestedPresent := true
|
||||
for _, requestedIndex := range indices {
|
||||
if !storedIndices[requestedIndex] {
|
||||
allRequestedPresent = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if allRequestedPresent {
|
||||
// All requested data is present, retrieve directly from DB
|
||||
requestedColumns, err := s.fetchDataColumnSidecarsDirectly(blockRoot, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "fetch data columns directly for block root %#x", blockRoot)
|
||||
}
|
||||
result[blockRoot] = requestedColumns
|
||||
continue
|
||||
}
|
||||
|
||||
// Not all requested data is present, check if we can reconstruct
|
||||
if storedDataColumns.Count() < minColumnsForReconstruct {
|
||||
return nil, errors.New("some requested data columns are not available and insufficient data for reconstruction")
|
||||
}
|
||||
|
||||
// Retrieve data using reconstruction
|
||||
requestedColumns, err := s.fetchDataColumnSidecarsWithReconstruction(blockRoot, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "fetch data columns with reconstruction for block root %#x", blockRoot)
|
||||
}
|
||||
result[blockRoot] = requestedColumns
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// fetchDataColumnSidecarsDirectly retrieves data column sidecars directly from the database
|
||||
// when all requested indices are available.
|
||||
func (s *Service) fetchDataColumnSidecarsDirectly(blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
verifiedRODataColumns, err := s.cfg.dataColumnStorage.Get(blockRoot, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get data columns for block root %#x", blockRoot)
|
||||
}
|
||||
return verifiedRODataColumns, nil
|
||||
}
|
||||
|
||||
// fetchDataColumnSidecarsWithReconstruction retrieves data column sidecars by first reconstructing
|
||||
// all columns from stored data, then extracting the requested indices.
|
||||
func (s *Service) fetchDataColumnSidecarsWithReconstruction(blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Retrieve all stored columns for reconstruction
|
||||
allStoredColumns, err := s.cfg.dataColumnStorage.Get(blockRoot, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get all stored columns for reconstruction for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Attempt reconstruction
|
||||
reconstructedColumns, err := peerdas.ReconstructDataColumnSidecars(allStoredColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct data columns for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Health check: ensure we have the expected number of columns
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if uint64(len(reconstructedColumns)) != numberOfColumns {
|
||||
return nil, errors.Errorf("reconstructed %d columns but expected %d for block root %#x", len(reconstructedColumns), numberOfColumns, blockRoot)
|
||||
}
|
||||
|
||||
// Extract only the requested indices from reconstructed data using direct indexing
|
||||
requestedColumns := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||
for _, requestedIndex := range indices {
|
||||
if requestedIndex >= numberOfColumns {
|
||||
return nil, errors.Errorf("requested column index %d exceeds maximum %d for block root %#x", requestedIndex, numberOfColumns-1, blockRoot)
|
||||
}
|
||||
requestedColumns = append(requestedColumns, reconstructedColumns[requestedIndex])
|
||||
}
|
||||
|
||||
return requestedColumns, nil
|
||||
}
|
||||
@@ -392,7 +392,11 @@ func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []
|
||||
}
|
||||
|
||||
// Fetch data column sidecars.
|
||||
custodyGroupCount := f.p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := f.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return blobsPid, errors.Wrap(err, "fetch custody group count from peer")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
fetchedDataColumnsByRoot, err := prysmsync.RequestMissingDataColumnsByRange(ctx, f.clock, f.ctxMap, f.p2p, f.rateLimiter, samplingSize, f.dcs, dataColumnBlocks, batchSize)
|
||||
if err != nil {
|
||||
|
||||
@@ -179,7 +179,6 @@ func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQue
|
||||
|
||||
blobBatchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
lazilyPersistentStoreBlobs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, blobBatchVerifier)
|
||||
lazilyPersistentStoreColumn := das.NewLazilyPersistentStoreColumn(s.cfg.DataColumnStorage, nodeID, s.newDataColumnsVerifier, s.cfg.P2P.CustodyGroupCount())
|
||||
|
||||
log := log.WithField("firstSlot", data.bwb[0].Block.Block().Slot())
|
||||
logBlobs, logDataColumns := log, log
|
||||
@@ -208,6 +207,16 @@ func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQue
|
||||
}
|
||||
}
|
||||
|
||||
if len(blocksWithDataColumns) == 0 {
|
||||
return uint64(len(bwb)), nil
|
||||
}
|
||||
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "fetch custody group count from peer")
|
||||
}
|
||||
|
||||
lazilyPersistentStoreColumn := das.NewLazilyPersistentStoreColumn(s.cfg.DataColumnStorage, nodeID, s.newDataColumnsVerifier, custodyGroupCount)
|
||||
for i, b := range blocksWithDataColumns {
|
||||
logDataColumns := logDataColumns.WithFields(syncFields(b.Block))
|
||||
|
||||
@@ -428,7 +437,12 @@ func (s *Service) processBlocksWithDataColumns(ctx context.Context, bwbs []block
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
custodyGroupCount := s.cfg.P2P.CustodyGroupCount()
|
||||
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch custody group count from peer")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
|
||||
persistentStoreColumn := das.NewLazilyPersistentStoreColumn(s.cfg.DataColumnStorage, s.cfg.P2P.NodeID(), s.newDataColumnsVerifier, samplingSize)
|
||||
|
||||
@@ -394,7 +394,12 @@ func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) err
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
storage := s.cfg.DataColumnStorage
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
custodyGroupCount := s.cfg.P2P.CustodyGroupCount()
|
||||
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch custody group count from peer")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
|
||||
missingColumns, err := sync.MissingDataColumns(roBlock, nodeID, samplingSize, storage)
|
||||
|
||||
@@ -43,14 +43,14 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
// Fulu: https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#messages
|
||||
if forkIndex >= version.Fulu {
|
||||
return map[string]rpcHandler{
|
||||
p2p.RPCStatusTopicV2: s.statusRPCHandler, // Modified in Fulu
|
||||
p2p.RPCStatusTopicV2: s.statusRPCHandler, // Updated in Fulu
|
||||
p2p.RPCGoodByeTopicV1: s.goodbyeRPCHandler,
|
||||
p2p.RPCBlocksByRangeTopicV2: s.beaconBlocksByRangeRPCHandler,
|
||||
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler,
|
||||
p2p.RPCPingTopicV1: s.pingHandler,
|
||||
p2p.RPCMetaDataTopicV3: s.metaDataHandler, // Modified in Fulu
|
||||
p2p.RPCBlobSidecarsByRootTopicV1: s.blobSidecarByRootRPCHandler,
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler,
|
||||
p2p.RPCMetaDataTopicV3: s.metaDataHandler, // Updated in Fulu
|
||||
p2p.RPCBlobSidecarsByRootTopicV1: s.blobSidecarByRootRPCHandler, // Modified in Fulu
|
||||
p2p.RPCBlobSidecarsByRangeTopicV1: s.blobSidecarsByRangeRPCHandler, // Modified in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1: s.dataColumnSidecarByRootRPCHandler, // Added in Fulu
|
||||
p2p.RPCDataColumnSidecarsByRangeTopicV1: s.dataColumnSidecarsByRangeRPCHandler, // Added in Fulu
|
||||
}, nil
|
||||
@@ -61,8 +61,8 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
return map[string]rpcHandler{
|
||||
p2p.RPCStatusTopicV1: s.statusRPCHandler,
|
||||
p2p.RPCGoodByeTopicV1: s.goodbyeRPCHandler,
|
||||
p2p.RPCBlocksByRangeTopicV2: s.beaconBlocksByRangeRPCHandler,
|
||||
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler,
|
||||
p2p.RPCBlocksByRangeTopicV2: s.beaconBlocksByRangeRPCHandler, // Modified in Electra
|
||||
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler, // Modified in Electra
|
||||
p2p.RPCPingTopicV1: s.pingHandler,
|
||||
p2p.RPCMetaDataTopicV2: s.metaDataHandler,
|
||||
p2p.RPCBlobSidecarsByRootTopicV1: s.blobSidecarByRootRPCHandler, // Modified in Electra
|
||||
@@ -91,8 +91,8 @@ func (s *Service) rpcHandlerByTopicFromFork(forkIndex int) (map[string]rpcHandle
|
||||
handler := map[string]rpcHandler{
|
||||
p2p.RPCStatusTopicV1: s.statusRPCHandler,
|
||||
p2p.RPCGoodByeTopicV1: s.goodbyeRPCHandler,
|
||||
p2p.RPCBlocksByRangeTopicV2: s.beaconBlocksByRangeRPCHandler, // Updated in Altair and modified in Capella
|
||||
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler, // Updated in Altair and modified in Capella
|
||||
p2p.RPCBlocksByRangeTopicV2: s.beaconBlocksByRangeRPCHandler, // Updated in Altair and modified in Bellatrix and Capella
|
||||
p2p.RPCBlocksByRootTopicV2: s.beaconBlocksRootRPCHandler, // Updated in Altair and modified in Bellatrix and Capella
|
||||
p2p.RPCPingTopicV1: s.pingHandler,
|
||||
p2p.RPCMetaDataTopicV2: s.metaDataHandler, // Updated in Altair
|
||||
}
|
||||
|
||||
@@ -93,7 +93,12 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
|
||||
// If so, requests them and saves them to the storage.
|
||||
func (s *Service) requestAndSaveMissingDataColumnSidecars(block blocks.ROBlock) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
custodyGroupCount := s.cfg.p2p.CustodyGroupCount()
|
||||
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch custody group count from peer")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
db "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
@@ -54,7 +55,7 @@ func TestRPC_LightClientBootstrap(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: lightClient.NewLightClientStore(d),
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -176,7 +177,7 @@ func TestRPC_LightClientOptimisticUpdate(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: &lightClient.Store{},
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -202,7 +203,7 @@ func TestRPC_LightClientOptimisticUpdate(t *testing.T) {
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.lcStore.SetLastOptimisticUpdate(update)
|
||||
r.lcStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -296,7 +297,7 @@ func TestRPC_LightClientFinalityUpdate(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: &lightClient.Store{},
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -322,7 +323,7 @@ func TestRPC_LightClientFinalityUpdate(t *testing.T) {
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.lcStore.SetLastFinalityUpdate(update)
|
||||
r.lcStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -416,7 +417,7 @@ func TestRPC_LightClientUpdatesByRange(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: lightClient.NewLightClientStore(d),
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
@@ -343,7 +343,7 @@ func (s *Service) buildStatusFromStream(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err2 := stream.Write(resp); err != nil {
|
||||
if _, err2 := stream.Write(resp); err2 != nil {
|
||||
log.WithError(err2).Debug("Could not write to stream")
|
||||
}
|
||||
|
||||
@@ -351,13 +351,18 @@ func (s *Service) buildStatusFromStream(
|
||||
}
|
||||
|
||||
if streamVersion == p2p.SchemaVersionV2 {
|
||||
earliestAvailableSlot, err := s.cfg.p2p.EarliestAvailableSlot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "earliest available slot")
|
||||
}
|
||||
|
||||
status := &pb.StatusV2{
|
||||
ForkDigest: forkDigest[:],
|
||||
FinalizedRoot: finalizedRoot,
|
||||
FinalizedEpoch: FinalizedEpoch,
|
||||
HeadRoot: headRoot,
|
||||
HeadSlot: s.cfg.chain.HeadSlot(),
|
||||
EarliestAvailableSlot: s.cfg.p2p.EarliestAvailableSlot(),
|
||||
EarliestAvailableSlot: earliestAvailableSlot,
|
||||
}
|
||||
|
||||
return status, nil
|
||||
|
||||
@@ -124,6 +124,7 @@ type blockchainService interface {
|
||||
blockchain.OptimisticModeFetcher
|
||||
blockchain.SlashingReceiver
|
||||
blockchain.ForkchoiceFetcher
|
||||
blockchain.DataAvailabilityChecker
|
||||
}
|
||||
|
||||
// Service is responsible for handling all run time p2p related operations as the
|
||||
@@ -267,7 +268,11 @@ func (s *Service) Start() {
|
||||
s.processPendingBlocksQueue()
|
||||
s.processPendingAttsQueue()
|
||||
s.maintainPeerStatuses()
|
||||
s.maintainCustodyGroupCount()
|
||||
|
||||
if params.FuluEnabled() {
|
||||
s.maintainCustodyInfo()
|
||||
}
|
||||
|
||||
s.resyncIfBehind()
|
||||
|
||||
// Update sync metrics.
|
||||
|
||||
@@ -645,7 +645,7 @@ func (s *Service) dataColumnSubnetIndices(primitives.Slot) map[uint64]bool {
|
||||
|
||||
// samplingSize computes the sampling size based on the samples per slot value,
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/das-core.md#custody-sampling
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
func (s *Service) samplingSize() (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
@@ -659,7 +659,12 @@ func (s *Service) samplingSize() (uint64, error) {
|
||||
return 0, errors.Wrap(err, "validators custody requirement")
|
||||
}
|
||||
|
||||
return max(beaconConfig.SamplesPerSlot, validatorsCustodyRequirement), nil
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
return max(beaconConfig.SamplesPerSlot, validatorsCustodyRequirement, custodyGroupCount), nil
|
||||
}
|
||||
|
||||
func (s *Service) persistentAndAggregatorSubnetIndices(currentSlot primitives.Slot) map[uint64]bool {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/io/file"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -108,6 +109,18 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, ro
|
||||
log.Warning("Data column storage is not enabled, skip saving data column, but continue to reconstruct and broadcast data column")
|
||||
}
|
||||
|
||||
// Check if data is already available to avoid unnecessary execution client calls
|
||||
switch err := s.cfg.chain.IsDataAvailable(ctx, blockRoot, roSignedBlock); {
|
||||
case err == nil:
|
||||
log.Debug("Data already available – skipping execution-client call")
|
||||
return
|
||||
case errors.Is(err, blockchain.ErrDataNotAvailable):
|
||||
// continue
|
||||
default:
|
||||
log.WithError(err).Error("Failed to check data availability")
|
||||
return
|
||||
}
|
||||
|
||||
// When this function is called, it's from the time when the block is received, so in almost all situations we need to get the data column from EL instead of the blob storage.
|
||||
sidecars, err := s.cfg.executionReconstructor.ReconstructDataColumnSidecars(ctx, roSignedBlock, blockRoot)
|
||||
if err != nil {
|
||||
@@ -121,7 +134,12 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, ro
|
||||
}
|
||||
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
custodyGroupCount := s.cfg.p2p.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get custody group count")
|
||||
return
|
||||
}
|
||||
|
||||
info, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get peer info")
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
lruwrpr "github.com/OffchainLabs/prysm/v6/cache/lru"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -214,8 +215,11 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
chainService := &chainMock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
// Create a chain service that returns ErrDataNotAvailable to trigger execution service calls
|
||||
chainService := &ChainServiceDataNotAvailable{
|
||||
ChainService: &chainMock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
allColumns := make([]blocks.VerifiedRODataColumn, 128)
|
||||
@@ -295,3 +299,193 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// TestProcessDataColumnSidecarsFromExecution_DataAvailabilityCheck tests the data availability optimization
|
||||
func TestProcessDataColumnSidecarsFromExecution_DataAvailabilityCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a test block with KZG commitments
|
||||
block := util.NewBeaconBlockDeneb()
|
||||
block.Block.Slot = 100
|
||||
commitment := [48]byte{1, 2, 3}
|
||||
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("skips execution call when data is available", func(t *testing.T) {
|
||||
mockChain := &MockChainServiceTrackingCalls{
|
||||
ChainService: &chainMock.ChainService{},
|
||||
dataAvailable: true, // Data is available
|
||||
availabilityError: nil,
|
||||
isDataAvailableCalled: false,
|
||||
}
|
||||
|
||||
mockExecutionClient := &MockExecutionClientTrackingCalls{
|
||||
EngineClient: &mockExecution.EngineClient{},
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
executionReconstructor: mockExecutionClient,
|
||||
},
|
||||
}
|
||||
|
||||
// This should call IsDataAvailable and return early without calling execution client
|
||||
s.processDataColumnSidecarsFromExecution(ctx, signedBlock)
|
||||
|
||||
// Verify the expected call pattern
|
||||
assert.Equal(t, true, mockChain.isDataAvailableCalled, "Expected IsDataAvailable to be called")
|
||||
assert.Equal(t, false, mockExecutionClient.reconstructCalled, "Expected execution client NOT to be called when data is available")
|
||||
})
|
||||
|
||||
t.Run("returns early when IsDataAvailable returns error", func(t *testing.T) {
|
||||
mockChain := &MockChainServiceTrackingCalls{
|
||||
ChainService: &chainMock.ChainService{},
|
||||
dataAvailable: false, // This should be ignored due to error
|
||||
availabilityError: errors.New("test error from IsDataAvailable"),
|
||||
isDataAvailableCalled: false,
|
||||
}
|
||||
|
||||
mockExecutionClient := &MockExecutionClientTrackingCalls{
|
||||
EngineClient: &mockExecution.EngineClient{},
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
executionReconstructor: mockExecutionClient,
|
||||
},
|
||||
}
|
||||
|
||||
// This should call IsDataAvailable, get an error, and return early without calling execution client
|
||||
s.processDataColumnSidecarsFromExecution(ctx, signedBlock)
|
||||
|
||||
// Verify the expected call pattern
|
||||
assert.Equal(t, true, mockChain.isDataAvailableCalled, "Expected IsDataAvailable to be called")
|
||||
assert.Equal(t, false, mockExecutionClient.reconstructCalled, "Expected execution client NOT to be called when IsDataAvailable returns error")
|
||||
})
|
||||
|
||||
t.Run("calls execution client when data not available", func(t *testing.T) {
|
||||
mockChain := &MockChainServiceTrackingCalls{
|
||||
ChainService: &chainMock.ChainService{},
|
||||
dataAvailable: false, // Data not available
|
||||
availabilityError: nil,
|
||||
isDataAvailableCalled: false,
|
||||
}
|
||||
|
||||
mockExecutionClient := &MockExecutionClientTrackingCalls{
|
||||
EngineClient: &mockExecution.EngineClient{
|
||||
DataColumnSidecars: []blocks.VerifiedRODataColumn{}, // Empty response is fine for this test
|
||||
},
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
executionReconstructor: mockExecutionClient,
|
||||
},
|
||||
}
|
||||
|
||||
// This should call IsDataAvailable, get false, and proceed to call execution client
|
||||
s.processDataColumnSidecarsFromExecution(ctx, signedBlock)
|
||||
|
||||
// Verify the expected call pattern
|
||||
assert.Equal(t, true, mockChain.isDataAvailableCalled, "Expected IsDataAvailable to be called")
|
||||
assert.Equal(t, true, mockExecutionClient.reconstructCalled, "Expected execution client to be called when data is not available")
|
||||
})
|
||||
|
||||
t.Run("returns early when block has no KZG commitments", func(t *testing.T) {
|
||||
// Create a block without KZG commitments
|
||||
blockNoCommitments := util.NewBeaconBlockDeneb()
|
||||
blockNoCommitments.Block.Slot = 100
|
||||
blockNoCommitments.Block.Body.BlobKzgCommitments = [][]byte{} // No commitments
|
||||
|
||||
signedBlockNoCommitments, err := blocks.NewSignedBeaconBlock(blockNoCommitments)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChain := &MockChainServiceTrackingCalls{
|
||||
ChainService: &chainMock.ChainService{},
|
||||
dataAvailable: false,
|
||||
availabilityError: nil,
|
||||
isDataAvailableCalled: false,
|
||||
}
|
||||
|
||||
mockExecutionClient := &MockExecutionClientTrackingCalls{
|
||||
EngineClient: &mockExecution.EngineClient{},
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
executionReconstructor: mockExecutionClient,
|
||||
},
|
||||
}
|
||||
|
||||
// This should return early before checking data availability or calling execution client
|
||||
s.processDataColumnSidecarsFromExecution(ctx, signedBlockNoCommitments)
|
||||
|
||||
// Verify neither method was called since there are no commitments
|
||||
assert.Equal(t, false, mockChain.isDataAvailableCalled, "Expected IsDataAvailable NOT to be called when no KZG commitments")
|
||||
assert.Equal(t, false, mockExecutionClient.reconstructCalled, "Expected execution client NOT to be called when no KZG commitments")
|
||||
})
|
||||
}
|
||||
|
||||
// MockChainServiceTrackingCalls tracks calls to IsDataAvailable for testing
|
||||
type MockChainServiceTrackingCalls struct {
|
||||
isDataAvailableCalled bool
|
||||
dataAvailable bool
|
||||
*chainMock.ChainService
|
||||
availabilityError error
|
||||
}
|
||||
|
||||
func (m *MockChainServiceTrackingCalls) IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
m.isDataAvailableCalled = true
|
||||
if m.availabilityError != nil {
|
||||
return m.availabilityError
|
||||
}
|
||||
if !m.dataAvailable {
|
||||
return blockchain.ErrDataNotAvailable
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockExecutionClientTrackingCalls tracks calls to ReconstructDataColumnSidecars for testing
|
||||
type MockExecutionClientTrackingCalls struct {
|
||||
*mockExecution.EngineClient
|
||||
reconstructCalled bool
|
||||
}
|
||||
|
||||
func (m *MockExecutionClientTrackingCalls) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
m.reconstructCalled = true
|
||||
return m.EngineClient.DataColumnSidecars, m.EngineClient.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
func (m *MockExecutionClientTrackingCalls) ReconstructFullBlock(ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
return m.EngineClient.ReconstructFullBlock(ctx, blindedBlock)
|
||||
}
|
||||
|
||||
func (m *MockExecutionClientTrackingCalls) ReconstructFullBellatrixBlockBatch(ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
|
||||
return m.EngineClient.ReconstructFullBellatrixBlockBatch(ctx, blindedBlocks)
|
||||
}
|
||||
|
||||
func (m *MockExecutionClientTrackingCalls) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hasIndex func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return m.EngineClient.ReconstructBlobSidecars(ctx, block, blockRoot, hasIndex)
|
||||
}
|
||||
|
||||
// ChainServiceDataNotAvailable wraps ChainService and overrides IsDataAvailable to return ErrDataNotAvailable
|
||||
type ChainServiceDataNotAvailable struct {
|
||||
*chainMock.ChainService
|
||||
}
|
||||
|
||||
func (c *ChainServiceDataNotAvailable) IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return blockchain.ErrDataNotAvailable
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -29,6 +30,11 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
||||
return errors.Wrap(err, "reconstruct data columns")
|
||||
}
|
||||
|
||||
// Trigger getBlobsV2 when receiving data column sidecar
|
||||
if err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "failed to trigger getBlobsV2 for data column sidecar")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -52,3 +58,55 @@ func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.V
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// triggerGetBlobsV2ForDataColumnSidecar triggers getBlobsV2 retry when receiving a data column sidecar.
|
||||
// This function attempts to fetch the block and trigger the execution service's retry mechanism.
|
||||
func (s *Service) triggerGetBlobsV2ForDataColumnSidecar(ctx context.Context, blockRoot [32]byte) error {
|
||||
// Get the specific block by root from database
|
||||
signedBlock, err := s.cfg.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not fetch block from database for getBlobsV2 retry trigger")
|
||||
return nil
|
||||
}
|
||||
if signedBlock == nil || signedBlock.IsNil() {
|
||||
log.Debug("Block not found in database for getBlobsV2 retry trigger")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if this block has blob commitments that would need getBlobsV2
|
||||
blockBody := signedBlock.Block().Body()
|
||||
commitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if data is already available
|
||||
switch err := s.cfg.chain.IsDataAvailable(ctx, blockRoot, signedBlock); {
|
||||
case err == nil:
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Data already available, skipping getBlobsV2 retry")
|
||||
return nil
|
||||
|
||||
case errors.Is(err, blockchain.ErrDataNotAvailable):
|
||||
// fall through and trigger getBlobsV2.
|
||||
default:
|
||||
return errors.Wrap(err, "Error checking data availability during getBlobsV2 trigger")
|
||||
}
|
||||
|
||||
// Trigger the retry by calling the execution service's reconstruct method
|
||||
// ReconstructDataColumnSidecars handles concurrent calls internally
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Triggering getBlobsV2 retry for data column sidecar")
|
||||
|
||||
if s.cfg.executionReconstructor == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = s.cfg.executionReconstructor.ReconstructDataColumnSidecars(ctx, signedBlock, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getBlobsV2 retry triggered by data column sidecar failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
326
beacon-chain/sync/subscriber_data_column_sidecar_trigger_test.go
Normal file
326
beacon-chain/sync/subscriber_data_column_sidecar_trigger_test.go
Normal file
@@ -0,0 +1,326 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
blockchaintesting "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
dbtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TestDataColumnSubscriber_InvalidMessage tests error handling for invalid messages
|
||||
func TestDataColumnSubscriber_InvalidMessage(t *testing.T) {
|
||||
s := &Service{}
|
||||
|
||||
// Test with invalid message type (use a proto message that's not VerifiedRODataColumn)
|
||||
invalidMsg := ðpb.SignedBeaconBlock{}
|
||||
err := s.dataColumnSubscriber(context.Background(), invalidMsg)
|
||||
require.ErrorContains(t, "message was not type blocks.VerifiedRODataColumn", err)
|
||||
}
|
||||
|
||||
// TestTriggerGetBlobsV2ForDataColumnSidecar_BlockAvailability tests block availability checking
|
||||
func TestTriggerGetBlobsV2ForDataColumnSidecar_BlockAvailability(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
|
||||
// Test when block is not available
|
||||
t.Run("block not available", func(t *testing.T) {
|
||||
mockChain := &blockchaintesting.ChainService{}
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Test when HasBlock returns true but block is not in database
|
||||
t.Run("HasBlock true but not in database", func(t *testing.T) {
|
||||
mockChain := &blockchaintesting.ChainService{}
|
||||
// Mock HasBlock to return true
|
||||
mockChain.CanonicalRoots = map[[32]byte]bool{blockRoot: true}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestTriggerGetBlobsV2ForDataColumnSidecar_WithValidBlock tests with a valid block
|
||||
func TestTriggerGetBlobsV2ForDataColumnSidecar_WithValidBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a test block with KZG commitments
|
||||
slot := primitives.Slot(100)
|
||||
block := util.NewBeaconBlockDeneb()
|
||||
block.Block.Slot = slot
|
||||
|
||||
// Add KZG commitments to trigger getBlobsV2 retry logic
|
||||
commitment := [48]byte{1, 2, 3}
|
||||
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := signedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("block with KZG commitments triggers retry", func(t *testing.T) {
|
||||
// Mock execution reconstructor to track calls
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlock))
|
||||
|
||||
// Mock chain service that reports data is NOT available (to trigger execution service)
|
||||
mockChain := &MockChainServiceWithAvailability{
|
||||
ChainService: &blockchaintesting.ChainService{DB: db},
|
||||
dataAvailable: false, // Data not available, should trigger execution service
|
||||
availabilityError: nil,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait a bit for the goroutine to execute
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify that the execution reconstructor was called
|
||||
if !mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars to be called")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("does not start retry if data already available", func(t *testing.T) {
|
||||
// Mock execution reconstructor to track calls
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlock))
|
||||
|
||||
// Mock chain service that reports data is already available
|
||||
mockChain := &MockChainServiceWithAvailability{
|
||||
ChainService: &blockchaintesting.ChainService{DB: db},
|
||||
dataAvailable: true,
|
||||
availabilityError: nil,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait a bit to ensure no goroutine was started
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify that the execution reconstructor was NOT called since data is already available
|
||||
if mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars NOT to be called when data is already available")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("calls execution service when data not available", func(t *testing.T) {
|
||||
// Mock execution reconstructor to track calls
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlock))
|
||||
|
||||
// Mock chain service that returns ErrDataNotAvailable
|
||||
mockChain := &MockChainServiceWithAvailability{
|
||||
ChainService: &blockchaintesting.ChainService{DB: db},
|
||||
dataAvailable: false, // Data not available
|
||||
availabilityError: blockchain.ErrDataNotAvailable, // Should trigger execution service call
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err) // Function should succeed and call execution service
|
||||
|
||||
// Wait a bit for the goroutine to execute
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify that the execution reconstructor was called
|
||||
if !mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars to be called when data is not available")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("returns error when availability check returns error", func(t *testing.T) {
|
||||
// Mock execution reconstructor to track calls
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlock))
|
||||
|
||||
// Mock chain service that returns an error for availability check
|
||||
mockChain := &MockChainServiceWithAvailability{
|
||||
ChainService: &blockchaintesting.ChainService{DB: db},
|
||||
dataAvailable: false, // This should be ignored due to error
|
||||
availabilityError: errors.New("availability check error"), // Error should cause function to return error
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.ErrorContains(t, "availability check error", err) // Function should return the availability check error
|
||||
|
||||
// Verify that the execution reconstructor was NOT called since function returned early with error
|
||||
if mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars NOT to be called when availability check returns error")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("block without KZG commitments does not trigger retry", func(t *testing.T) {
|
||||
// Create block without KZG commitments
|
||||
blockNoCommitments := util.NewBeaconBlockDeneb()
|
||||
blockNoCommitments.Block.Slot = slot
|
||||
blockNoCommitments.Block.Body.BlobKzgCommitments = [][]byte{} // No commitments
|
||||
|
||||
signedBlockNoCommitments, err := blocks.NewSignedBeaconBlock(blockNoCommitments)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRootNoCommitments, err := signedBlockNoCommitments.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlockNoCommitments))
|
||||
|
||||
mockChain := &blockchaintesting.ChainService{
|
||||
DB: db, // Set the DB so HasBlock can find the block
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRootNoCommitments)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait a bit to ensure no goroutine was started
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify that the execution reconstructor was NOT called
|
||||
if mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars NOT to be called for block without commitments")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// MockExecutionReconstructor is a mock implementation for testing
|
||||
type MockExecutionReconstructor struct {
|
||||
reconstructCalled bool
|
||||
reconstructError error
|
||||
reconstructResult []blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
func (m *MockExecutionReconstructor) ReconstructFullBlock(ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockExecutionReconstructor) ReconstructFullBellatrixBlockBatch(ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockExecutionReconstructor) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockExecutionReconstructor) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
m.reconstructCalled = true
|
||||
return m.reconstructResult, m.reconstructError
|
||||
}
|
||||
|
||||
// MockChainServiceWithAvailability wraps the testing ChainService to allow configuring IsDataAvailable
|
||||
type MockChainServiceWithAvailability struct {
|
||||
*blockchaintesting.ChainService
|
||||
dataAvailable bool
|
||||
availabilityError error
|
||||
}
|
||||
|
||||
// IsDataAvailable overrides the default implementation to return configurable values for testing
|
||||
func (m *MockChainServiceWithAvailability) IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if m.availabilityError != nil {
|
||||
return m.availabilityError
|
||||
}
|
||||
if !m.dataAvailable {
|
||||
return blockchain.ErrDataNotAvailable
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func (s *Service) lightClientOptimisticUpdateSubscriber(_ context.Context, msg p
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("Saving newly received light client optimistic update.")
|
||||
|
||||
s.lcStore.SetLastOptimisticUpdate(update)
|
||||
s.lcStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
@@ -55,7 +55,7 @@ func (s *Service) lightClientFinalityUpdateSubscriber(_ context.Context, msg pro
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("Saving newly received light client finality update.")
|
||||
|
||||
s.lcStore.SetLastFinalityUpdate(update)
|
||||
s.lcStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
@@ -684,7 +685,7 @@ func TestSubscribe_ReceivesLCOptimisticUpdate(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: &lightClient.Store{},
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
topic := p2p.LightClientOptimisticUpdateTopicFormat
|
||||
@@ -751,7 +752,7 @@ func TestSubscribe_ReceivesLCFinalityUpdate(t *testing.T) {
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: &lightClient.Store{},
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
topic := p2p.LightClientFinalityUpdateTopicFormat
|
||||
|
||||
@@ -131,7 +131,7 @@ func (s *Service) validateLightClientFinalityUpdate(ctx context.Context, pid pee
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
if !lightclient.IsBetterFinalityUpdate(newUpdate, s.lcStore.LastFinalityUpdate()) {
|
||||
if !lightclient.IsFinalityUpdateValidForBroadcast(newUpdate, s.lcStore.LastFinalityUpdate()) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", newUpdate.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", newUpdate.SignatureSlot()),
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -102,7 +103,7 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
// drift back appropriate number of epochs based on fork + 2 slots for signature slot + time for gossip propagation + any extra drift
|
||||
genesisDrift := v*slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals + test.genesisDrift
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0)}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}, lcStore: &lightClient.Store{}}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}, lcStore: lightClient.NewLightClientStore(nil, &p2ptest.FakeP2P{}, new(event.Feed))}
|
||||
|
||||
var oldUpdate interfaces.LightClientOptimisticUpdate
|
||||
var err error
|
||||
@@ -111,7 +112,7 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
oldUpdate, err = lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.lcStore.SetLastOptimisticUpdate(oldUpdate)
|
||||
s.lcStore.SetLastOptimisticUpdate(oldUpdate, false)
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, v, test.newUpdateOptions...)
|
||||
@@ -242,7 +243,7 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
// drift back appropriate number of epochs based on fork + 2 slots for signature slot + time for gossip propagation + any extra drift
|
||||
genesisDrift := v*slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals + test.genesisDrift
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0)}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}, lcStore: &lightClient.Store{}}
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}, clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)}, lcStore: lightClient.NewLightClientStore(nil, &p2ptest.FakeP2P{}, new(event.Feed))}
|
||||
|
||||
var oldUpdate interfaces.LightClientFinalityUpdate
|
||||
var err error
|
||||
@@ -251,7 +252,7 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
oldUpdate, err = lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.lcStore.SetLastFinalityUpdate(oldUpdate)
|
||||
s.lcStore.SetLastFinalityUpdate(oldUpdate, false)
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, v, test.newUpdateOptions...)
|
||||
|
||||
5
changelog/bastin_move_broadcast_to_store.md
Normal file
5
changelog/bastin_move_broadcast_to_store.md
Normal file
@@ -0,0 +1,5 @@
|
||||
### Changed
|
||||
|
||||
- Moved the broadcast and event notifier logic for saving LC updates to the store function.
|
||||
- Fixed the issue with broadcasting more than twice per LC Finality update, and the if-case bug.
|
||||
- Separated the finality update validation rules for saving and broadcasting.
|
||||
3
changelog/manu-cgc.md
Normal file
3
changelog/manu-cgc.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
- Update validator custody to the latest specification, including the new status message.
|
||||
|
||||
3
changelog/radek_do-not-compare-liveness.md
Normal file
3
changelog/radek_do-not-compare-liveness.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Do not compare liveness response with LH in e2e Beacon API evaluator.
|
||||
5
changelog/rose2221-develop.md
Normal file
5
changelog/rose2221-develop.md
Normal file
@@ -0,0 +1,5 @@
|
||||
### Added
|
||||
|
||||
- **Gzip Compression for Beacon API:**
|
||||
Fixed an issue where the beacon chain server ignored the `Accept-Encoding: gzip` header and returned uncompressed JSON responses. With this change, endpoints that use the `AcceptHeaderHandler` now also compress responses when a client requests gzip encoding.
|
||||
Fixes [#14593](https://github.com/prysmaticlabs/prysm/issues/14593).
|
||||
@@ -631,7 +631,7 @@ func TestJsonMarshalUnmarshal(t *testing.T) {
|
||||
BlobGasUsed: 1024,
|
||||
ExcessBlobGas: 2048,
|
||||
}
|
||||
|
||||
|
||||
bundleV2 := &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
Proofs: [][]byte{make([]byte, 48), make([]byte, 48)},
|
||||
|
||||
@@ -322,6 +322,7 @@ var (
|
||||
}())),
|
||||
"/validator/liveness/{param1}": newMetadata[structs.GetLivenessResponse](
|
||||
v1PathTemplate,
|
||||
withSanityCheckOnly(),
|
||||
withParams(func(currentEpoch primitives.Epoch) []string {
|
||||
return []string{fmt.Sprintf("%v", currentEpoch)}
|
||||
}),
|
||||
|
||||
@@ -53,6 +53,7 @@ go_library(
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
lightclienttypes "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
v11 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -28,6 +29,7 @@ type TestLightClient struct {
|
||||
version int
|
||||
increaseAttestedSlotBy uint64
|
||||
increaseFinalizedSlotBy uint64
|
||||
increaseSignatureSlotBy uint64
|
||||
|
||||
T *testing.T
|
||||
Ctx context.Context
|
||||
@@ -112,6 +114,13 @@ func WithIncreasedFinalizedSlot(increaseBy uint64) LightClientOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithIncreasedSignatureSlot specifies the number of slots to increase the signature slot by. This does not affect the attested/finalized block's slot.
|
||||
func WithIncreasedSignatureSlot(increaseBy uint64) LightClientOption {
|
||||
return func(l *TestLightClient) {
|
||||
l.increaseSignatureSlotBy = increaseBy
|
||||
}
|
||||
}
|
||||
|
||||
func (l *TestLightClient) setupTestAltair() *TestLightClient {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -121,6 +130,9 @@ func (l *TestLightClient) setupTestAltair() *TestLightClient {
|
||||
}
|
||||
|
||||
signatureSlot := attestedSlot.Add(1)
|
||||
if l.increaseSignatureSlotBy > 0 {
|
||||
signatureSlot = signatureSlot.Add(l.increaseSignatureSlotBy)
|
||||
}
|
||||
|
||||
// Attested State
|
||||
attestedState, err := NewBeaconStateAltair()
|
||||
@@ -232,6 +244,9 @@ func (l *TestLightClient) setupTestBellatrix() *TestLightClient {
|
||||
}
|
||||
|
||||
signatureSlot := attestedSlot.Add(1)
|
||||
if l.increaseSignatureSlotBy > 0 {
|
||||
signatureSlot = signatureSlot.Add(l.increaseSignatureSlotBy)
|
||||
}
|
||||
|
||||
// Attested State & Block
|
||||
attestedState, err := NewBeaconStateBellatrix()
|
||||
@@ -404,6 +419,9 @@ func (l *TestLightClient) setupTestCapella() *TestLightClient {
|
||||
}
|
||||
|
||||
signatureSlot := attestedSlot.Add(1)
|
||||
if l.increaseSignatureSlotBy > 0 {
|
||||
signatureSlot = signatureSlot.Add(l.increaseSignatureSlotBy)
|
||||
}
|
||||
|
||||
// Attested State
|
||||
attestedState, err := NewBeaconStateCapella()
|
||||
@@ -577,6 +595,9 @@ func (l *TestLightClient) setupTestDeneb() *TestLightClient {
|
||||
}
|
||||
|
||||
signatureSlot := attestedSlot.Add(1)
|
||||
if l.increaseSignatureSlotBy > 0 {
|
||||
signatureSlot = signatureSlot.Add(l.increaseSignatureSlotBy)
|
||||
}
|
||||
|
||||
// Attested State
|
||||
attestedState, err := NewBeaconStateDeneb()
|
||||
@@ -751,6 +772,9 @@ func (l *TestLightClient) setupTestElectra() *TestLightClient {
|
||||
}
|
||||
|
||||
signatureSlot := attestedSlot.Add(1)
|
||||
if l.increaseSignatureSlotBy > 0 {
|
||||
signatureSlot = signatureSlot.Add(l.increaseSignatureSlotBy)
|
||||
}
|
||||
|
||||
// Attested State & Block
|
||||
attestedState, err := NewBeaconStateElectra()
|
||||
@@ -1044,3 +1068,55 @@ func (l *TestLightClient) CheckSyncAggregate(sa *ethpb.SyncAggregate) {
|
||||
require.DeepSSZEqual(l.T, syncAggregate.SyncCommitteeBits, sa.SyncCommitteeBits, "SyncAggregate bits is not equal")
|
||||
require.DeepSSZEqual(l.T, syncAggregate.SyncCommitteeSignature, sa.SyncCommitteeSignature, "SyncAggregate signature is not equal")
|
||||
}
|
||||
|
||||
func MockOptimisticUpdate() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
pbUpdate := ðpb.LightClientOptimisticUpdateAltair{
|
||||
AttestedHeader: ðpb.LightClientHeaderAltair{
|
||||
Beacon: ðpb.BeaconBlockHeader{
|
||||
Slot: primitives.Slot(32),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
SignatureSlot: primitives.Slot(33),
|
||||
}
|
||||
return lightclienttypes.NewWrappedOptimisticUpdateAltair(pbUpdate)
|
||||
}
|
||||
|
||||
func MockFinalityUpdate() (interfaces.LightClientFinalityUpdate, error) {
|
||||
finalityBranch := make([][]byte, fieldparams.FinalityBranchDepth)
|
||||
for i := 0; i < len(finalityBranch); i++ {
|
||||
finalityBranch[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
pbUpdate := ðpb.LightClientFinalityUpdateAltair{
|
||||
FinalizedHeader: ðpb.LightClientHeaderAltair{
|
||||
Beacon: ðpb.BeaconBlockHeader{
|
||||
Slot: primitives.Slot(31),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
FinalityBranch: finalityBranch,
|
||||
AttestedHeader: ðpb.LightClientHeaderAltair{
|
||||
Beacon: ðpb.BeaconBlockHeader{
|
||||
Slot: primitives.Slot(32),
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
SignatureSlot: primitives.Slot(33),
|
||||
}
|
||||
return lightclienttypes.NewWrappedFinalityUpdateAltair(pbUpdate)
|
||||
}
|
||||
|
||||
@@ -18,4 +18,4 @@ func init() {
|
||||
func TestAnalyzer(t *testing.T) {
|
||||
testdata := analysistest.TestData()
|
||||
analysistest.RunWithSuggestedFixes(t, testdata, logcapitalization.Analyzer, "a")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user