mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
251 Commits
hashtree-i
...
peerdas-ge
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f8716d8f77 | ||
|
|
58795d5ce3 | ||
|
|
c558798fe8 | ||
|
|
ba1699fdee | ||
|
|
adf62a6b45 | ||
|
|
9e5b3fb599 | ||
|
|
eaf4b4f9bf | ||
|
|
0b0b7ff0a9 | ||
|
|
f1be39f7f1 | ||
|
|
3815ff4c28 | ||
|
|
76a0759e13 | ||
|
|
5cd2d99606 | ||
|
|
1a2a0688e1 | ||
|
|
6d0524dcf5 | ||
|
|
8ec9da81c0 | ||
|
|
facb70e12c | ||
|
|
3d91b35f4e | ||
|
|
dc70dae9d0 | ||
|
|
9e2c04400c | ||
|
|
60058266e8 | ||
|
|
291c4ac9b5 | ||
|
|
045776ff75 | ||
|
|
0a386cbdfd | ||
|
|
4f02e44446 | ||
|
|
41600b67e3 | ||
|
|
cec236ff7d | ||
|
|
62dac40734 | ||
|
|
d3763d56cf | ||
|
|
461fa50c34 | ||
|
|
149e220b61 | ||
|
|
7b059560f6 | ||
|
|
ae4b982a6c | ||
|
|
111e5c462f | ||
|
|
f330021785 | ||
|
|
6d4e1d5f7a | ||
|
|
bd6b4ecd5b | ||
|
|
415622ec49 | ||
|
|
df65458834 | ||
|
|
2005d5c6f2 | ||
|
|
7d72fbebe7 | ||
|
|
43c111bca2 | ||
|
|
d7d8764a91 | ||
|
|
9b7f91d947 | ||
|
|
57e27199bd | ||
|
|
685761666d | ||
|
|
41c2f1d802 | ||
|
|
a75974b5f5 | ||
|
|
0725dff5e8 | ||
|
|
0d95d3d022 | ||
|
|
384270f9a7 | ||
|
|
8e9d3f5f4f | ||
|
|
d6d542889c | ||
|
|
f8e6b9d1a8 | ||
|
|
8f25d1e986 | ||
|
|
81e9fda34b | ||
|
|
ede560bee1 | ||
|
|
34a1bf835a | ||
|
|
b0bceac9c0 | ||
|
|
0ff2d2fa21 | ||
|
|
8477a84454 | ||
|
|
e95d1c54cf | ||
|
|
4af3763013 | ||
|
|
a520db7276 | ||
|
|
f8abf0565f | ||
|
|
11a6af9bf9 | ||
|
|
6f8a654874 | ||
|
|
f0c01fdb4b | ||
|
|
a015ae6a29 | ||
|
|
457aa117f3 | ||
|
|
d302b494df | ||
|
|
b3db1b6b74 | ||
|
|
66e4d5e816 | ||
|
|
41f109aa5b | ||
|
|
cfd4ceb4dd | ||
|
|
df211c3384 | ||
|
|
89e78d7da3 | ||
|
|
e76ea84596 | ||
|
|
f10d6e8e16 | ||
|
|
91eb43b595 | ||
|
|
90710ec57d | ||
|
|
3dc65f991e | ||
|
|
4d9789401b | ||
|
|
f72d59b004 | ||
|
|
e25497be3e | ||
|
|
8897a26f84 | ||
|
|
b2a26f2b62 | ||
|
|
09659010f8 | ||
|
|
589042df20 | ||
|
|
312b93e9b1 | ||
|
|
f86f76e447 | ||
|
|
c311e652eb | ||
|
|
6a5d78a331 | ||
|
|
a2fd30497e | ||
|
|
a94561f8dc | ||
|
|
af875b78c9 | ||
|
|
61207bd3ac | ||
|
|
0b6fcd7d17 | ||
|
|
fe2766e716 | ||
|
|
9135d765e1 | ||
|
|
eca87f29d1 | ||
|
|
00821c8f55 | ||
|
|
4b9e92bcd7 | ||
|
|
b01d9005b8 | ||
|
|
8d812d5f0e | ||
|
|
24a3cb2a8b | ||
|
|
66d1d3e248 | ||
|
|
99933678ea | ||
|
|
34f8e1e92b | ||
|
|
a6a41a8755 | ||
|
|
f110b94fac | ||
|
|
33023aa282 | ||
|
|
eeb3cdc99e | ||
|
|
1e7147f060 | ||
|
|
8936beaff3 | ||
|
|
c00283f247 | ||
|
|
a4269cf308 | ||
|
|
91f3c8a4d0 | ||
|
|
30c7ee9c7b | ||
|
|
456d8b9eb9 | ||
|
|
4fe3e6d31a | ||
|
|
01ee1c80b4 | ||
|
|
c14fe47a81 | ||
|
|
b9deabbf0a | ||
|
|
5d66a98e78 | ||
|
|
2d46d6ffae | ||
|
|
57107e50a7 | ||
|
|
47271254f6 | ||
|
|
f304028874 | ||
|
|
8abc5e159a | ||
|
|
b1ac53c4dd | ||
|
|
27ab68c856 | ||
|
|
ddf5a3953b | ||
|
|
92d2fc101d | ||
|
|
8996000d2b | ||
|
|
a2fcba2349 | ||
|
|
abe8638991 | ||
|
|
0b5064b474 | ||
|
|
da9d4cf5b9 | ||
|
|
a62cca15dd | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
a216cb4105 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
@@ -1727,7 +1727,7 @@ func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
|
||||
t.Run("Interface signature verification", func(t *testing.T) {
|
||||
// This test verifies that the SubmitBlindedBlock method signature
|
||||
// has been updated to return BlobsBundler interface
|
||||
|
||||
|
||||
client := &Client{}
|
||||
|
||||
// Verify the method exists with the correct signature
|
||||
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/validator"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -699,6 +700,11 @@ func (m *SyncCommitteeMessage) ToConsensus() (*eth.SyncCommitteeMessage, error)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
// Add validation to check if the signature is valid BLS format
|
||||
_, err = bls.SignatureFromBytes(sig)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SyncCommitteeMessage{
|
||||
Slot: primitives.Slot(slot),
|
||||
|
||||
@@ -73,6 +73,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -194,6 +195,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -174,6 +174,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), arg.headBlock, arg.headRoot, nextSlot)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
|
||||
@@ -102,8 +102,6 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), args.headBlock, args.headRoot, s.CurrentSlot()+1)
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -36,7 +35,7 @@ func WithMaxGoroutines(x int) Option {
|
||||
// WithLCStore for light client store access.
|
||||
func WithLCStore() Option {
|
||||
return func(s *Service) error {
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB)
|
||||
s.lcStore = lightclient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -235,14 +234,6 @@ func WithSyncChecker(checker Checker) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustodyInfo sets the custody info for the blockchain service.
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.CustodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSlasherEnabled sets whether the slasher is enabled or not.
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -666,7 +666,9 @@ func (s *Service) areDataColumnsAvailable(
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS.
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
@@ -689,16 +691,20 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/das-core.md#custody-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.CustodyInfo.Mut.RLock()
|
||||
defer s.cfg.CustodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
// Compute the sampling size.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
@@ -895,6 +901,118 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
}
|
||||
}
|
||||
|
||||
// areDataColumnsImmediatelyAvailable checks if all required data columns are currently
|
||||
// available in the database without waiting for missing ones.
|
||||
func (s *Service) areDataColumnsImmediatelyAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has no commitments there is nothing to check.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count error")
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
// Compute the sampling size.
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if storedDataColumnsCount >= minimumColumnCountToReconstruct {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If any data is missing, return error immediately (don't wait)
|
||||
missingIndices := uint64MapToSortedSlice(missingMap)
|
||||
return fmt.Errorf("data columns not immediately available, missing %v", missingIndices)
|
||||
}
|
||||
|
||||
// areBlobsImmediatelyAvailable checks if all required blobs are currently
|
||||
// available in the database without waiting for missing ones.
|
||||
func (s *Service) areBlobsImmediatelyAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// expected is the number of kzg commitments observed in the block.
|
||||
expected := len(kzgCommitments)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingBlobIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If any blobs are missing, return error immediately (don't wait)
|
||||
missingIndices := uint64MapToSortedSlice(missing)
|
||||
return fmt.Errorf("blobs not immediately available, missing %v", missingIndices)
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -198,8 +197,7 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
|
||||
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
|
||||
|
||||
// Check if the finalized checkpoint has changed
|
||||
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
|
||||
if finalizedCheckpoint == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -224,17 +222,7 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug("Saving new light client finality update")
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientFinalityUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client finality update")
|
||||
}
|
||||
s.lcStore.SetLastFinalityUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -266,17 +254,7 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug("Saving new light client optimistic update")
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: newUpdate,
|
||||
})
|
||||
|
||||
if err = s.cfg.P2P.BroadcastLightClientOptimisticUpdate(ctx, newUpdate); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast light client optimistic update")
|
||||
}
|
||||
s.lcStore.SetLastOptimisticUpdate(newUpdate, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2894,7 +2894,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
@@ -2907,7 +2906,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
t.Run("Fulu - no missing data columns", func(t *testing.T) {
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
columnsToSave: []uint64{1, 17, 19, 42, 75, 87, 102, 117, 119}, // 119 is not needed
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
@@ -2922,7 +2920,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: []uint64{1, 17, 19, 75, 102, 117, 119}, // 119 is not needed, 42 and 87 are missing
|
||||
|
||||
blobKzgCommitmentsCount: 3,
|
||||
@@ -2959,6 +2957,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -2971,10 +2972,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
|
||||
|
||||
@@ -2983,12 +2980,14 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&custodyInfo), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
_, _, err := service.cfg.P2P.UpdateCustodyInfo(0, cgc)
|
||||
require.NoError(t, err)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
@@ -3020,6 +3019,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
|
||||
defer cancel()
|
||||
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -3028,7 +3030,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
options: []Option{WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
@@ -3170,7 +3172,7 @@ func TestProcessLightClientOptimisticUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = &lightClient.Store{}
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
|
||||
var oldActualUpdate interfaces.LightClientOptimisticUpdate
|
||||
var err error
|
||||
@@ -3246,39 +3248,39 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
expectReplace: true,
|
||||
},
|
||||
{
|
||||
name: "Old update is better - age - no supermajority",
|
||||
name: "Old update is better - finalized slot is higher",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
|
||||
newOptions: []util.LightClientOption{},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "Old update is better - age - both supermajority",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1), util.WithSupermajority()},
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "Old update is better - supermajority",
|
||||
oldOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
name: "Old update is better - attested slot is higher",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
newOptions: []util.LightClientOption{},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "New update is better - age - both supermajority",
|
||||
oldOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1), util.WithSupermajority()},
|
||||
name: "Old update is better - signature slot is higher",
|
||||
oldOptions: []util.LightClientOption{util.WithIncreasedSignatureSlot(1)},
|
||||
newOptions: []util.LightClientOption{},
|
||||
expectReplace: false,
|
||||
},
|
||||
{
|
||||
name: "New update is better - finalized slot is higher",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
expectReplace: true,
|
||||
},
|
||||
{
|
||||
name: "New update is better - age - no supermajority",
|
||||
name: "New update is better - attested slot is higher",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
expectReplace: true,
|
||||
},
|
||||
{
|
||||
name: "New update is better - supermajority",
|
||||
name: "New update is better - signature slot is higher",
|
||||
oldOptions: []util.LightClientOption{},
|
||||
newOptions: []util.LightClientOption{util.WithSupermajority()},
|
||||
newOptions: []util.LightClientOption{util.WithIncreasedSignatureSlot(1)},
|
||||
expectReplace: true,
|
||||
},
|
||||
}
|
||||
@@ -3310,7 +3312,7 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
|
||||
t.Run(version.String(testVersion)+"_"+tc.name, func(t *testing.T) {
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(forkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
s.lcStore = &lightClient.Store{}
|
||||
s.lcStore = lightClient.NewLightClientStore(s.cfg.BeaconDB, s.cfg.P2P, s.cfg.StateNotifier.StateFeed())
|
||||
|
||||
var actualOldUpdate, actualNewUpdate interfaces.LightClientFinalityUpdate
|
||||
var err error
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -31,6 +30,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -38,12 +39,22 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DataAvailabilityChecker defines an interface for checking if data is available
|
||||
// for a given block root. This interface is implemented by the blockchain service
|
||||
// which has knowledge of the beacon chain's data availability requirements.
|
||||
// Returns nil if data is available, ErrDataNotAvailable if data is not available,
|
||||
// or another error for other failures.
|
||||
type DataAvailabilityChecker interface {
|
||||
IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error
|
||||
}
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
@@ -97,7 +108,6 @@ type config struct {
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
SyncChecker Checker
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Checker is an interface used to determine if a node is in initial sync
|
||||
@@ -108,25 +118,32 @@ type Checker interface {
|
||||
|
||||
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
|
||||
|
||||
// ErrDataNotAvailable is returned when block data is not immediately available for processing.
|
||||
var ErrDataNotAvailable = errors.New("block data is not available")
|
||||
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -137,7 +154,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -147,12 +166,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -178,7 +200,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
@@ -296,6 +320,20 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -516,6 +554,83 @@ func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
custodyRequirement := beaconConfig.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = beaconConfig.NumberOfColumns
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// IsDataAvailable implements the DataAvailabilityChecker interface for use by the execution service.
|
||||
// It checks if all required blob and data column data is immediately available in the database without waiting.
|
||||
func (s *Service) IsDataAvailable(ctx context.Context, blockRoot [fieldparams.RootLength]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
blockVersion := block.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsImmediatelyAvailable(ctx, blockRoot, block); err != nil {
|
||||
return errors.Wrap(ErrDataNotAvailable, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := s.areBlobsImmediatelyAvailable(ctx, blockRoot, block); err != nil {
|
||||
return errors.Wrap(ErrDataNotAvailable, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
@@ -532,3 +647,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -568,7 +568,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -54,6 +55,7 @@ type mockBroadcaster struct {
|
||||
|
||||
type mockAccessor struct {
|
||||
mockBroadcaster
|
||||
mockCustodyManager
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
@@ -97,6 +99,43 @@ func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.Sig
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
// mockCustodyManager is a mock implementation of p2p.CustodyManager
|
||||
type mockCustodyManager struct {
|
||||
mut sync.RWMutex
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
dch.mut.Lock()
|
||||
defer dch.mut.Unlock()
|
||||
|
||||
dch.earliestAvailableSlot = earliestAvailableSlot
|
||||
dch.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ p2p.CustodyManager = (*mockCustodyManager)(nil)
|
||||
|
||||
type testServiceRequirements struct {
|
||||
ctx context.Context
|
||||
db db.Database
|
||||
|
||||
@@ -732,6 +732,11 @@ func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]b
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
// IsDataAvailable implements the data availability checker interface for testing
|
||||
func (c *ChainService) IsDataAvailable(_ context.Context, _ [32]byte, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockSyncChecker is a mock implementation of blockchain.Checker.
|
||||
// We can't make an assertion here that this is true because that would create a circular dependency.
|
||||
type MockSyncChecker struct {
|
||||
|
||||
@@ -102,13 +102,13 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
return nil, err
|
||||
} else if n == params.BeaconConfig().PendingPartialWithdrawalsLimit && !isFullExitRequest {
|
||||
// if the PendingPartialWithdrawalsLimit is met, the user would have paid for a partial withdrawal that's not included
|
||||
log.Debugln("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
log.Debug("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
continue
|
||||
}
|
||||
|
||||
vIdx, exists := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(wr.ValidatorPubkey))
|
||||
if !exists {
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
log.WithField("validator", hexutil.Encode(wr.ValidatorPubkey)).Debug("Skipping execution layer withdrawal request, validator index not found")
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndexReadOnly(vIdx)
|
||||
@@ -120,23 +120,23 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
log.Debug("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch().AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
log.Debug("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/fork.md#upgrading-the-state
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/fork.md#upgrading-the-state
|
||||
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -10,8 +10,12 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -39,6 +43,9 @@ go_test(
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -750,7 +750,9 @@ func UpdateHasSupermajority(syncAggregate *pb.SyncAggregate) bool {
|
||||
return numActiveParticipants*3 >= maxActiveParticipants*2
|
||||
}
|
||||
|
||||
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
// IsFinalityUpdateValidForBroadcast checks if a finality update needs to be broadcasted.
|
||||
// It is also used to check if an incoming gossiped finality update is valid for forwarding and saving.
|
||||
func IsFinalityUpdateValidForBroadcast(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
return true
|
||||
}
|
||||
@@ -772,6 +774,35 @@ func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityU
|
||||
return true
|
||||
}
|
||||
|
||||
// IsBetterFinalityUpdate checks if the new finality update is better than the old one for saving.
|
||||
// This does not concern broadcasting, but rather the decision of whether to save the new update.
|
||||
// For broadcasting checks, use IsFinalityUpdateValidForBroadcast.
|
||||
func IsBetterFinalityUpdate(newUpdate, oldUpdate interfaces.LightClientFinalityUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot)
|
||||
newFinalizedSlot := newUpdate.FinalizedHeader().Beacon().Slot
|
||||
newAttestedSlot := newUpdate.AttestedHeader().Beacon().Slot
|
||||
|
||||
oldFinalizedSlot := oldUpdate.FinalizedHeader().Beacon().Slot
|
||||
oldAttestedSlot := oldUpdate.AttestedHeader().Beacon().Slot
|
||||
|
||||
if newFinalizedSlot < oldFinalizedSlot {
|
||||
return false
|
||||
}
|
||||
if newFinalizedSlot == oldFinalizedSlot {
|
||||
if newAttestedSlot < oldAttestedSlot {
|
||||
return false
|
||||
}
|
||||
if newAttestedSlot == oldAttestedSlot && newUpdate.SignatureSlot() <= oldUpdate.SignatureSlot() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func IsBetterOptimisticUpdate(newUpdate, oldUpdate interfaces.LightClientOptimisticUpdate) bool {
|
||||
if oldUpdate == nil {
|
||||
return true
|
||||
|
||||
@@ -4,7 +4,11 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -16,13 +20,17 @@ type Store struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
beaconDB iface.HeadAccessDatabase
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate
|
||||
lastFinalityUpdate interfaces.LightClientFinalityUpdate // tracks the best finality update seen so far
|
||||
lastOptimisticUpdate interfaces.LightClientOptimisticUpdate // tracks the best optimistic update seen so far
|
||||
p2p p2p.Accessor
|
||||
stateFeed event.SubscriberSender
|
||||
}
|
||||
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase) *Store {
|
||||
func NewLightClientStore(db iface.HeadAccessDatabase, p p2p.Accessor, e event.SubscriberSender) *Store {
|
||||
return &Store{
|
||||
beaconDB: db,
|
||||
beaconDB: db,
|
||||
p2p: p,
|
||||
stateFeed: e,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,10 +151,23 @@ func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate) {
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
log.Debug("Saved new light client finality update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
@@ -155,10 +176,23 @@ func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate) {
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
log.Debug("Saved new light client optimistic update")
|
||||
|
||||
s.stateFeed.Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LastOptimisticUpdate() interfaces.LightClientOptimisticUpdate {
|
||||
|
||||
@@ -3,7 +3,10 @@ package light_client_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -21,7 +24,7 @@ func TestLightClientStore(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Initialize the light client store
|
||||
lcStore := &lightClient.Store{}
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), &p2pTesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
// Create test light client updates for Capella and Deneb
|
||||
lCapella := util.NewTestLightClient(t, version.Capella)
|
||||
@@ -45,24 +48,118 @@ func TestLightClientStore(t *testing.T) {
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get finality with Capella update. Optimistic update should be nil
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella)
|
||||
lcStore.SetLastFinalityUpdate(finUpdateCapella, false)
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
|
||||
// Set and get optimistic with Capella update. Finality update should be Capella
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateCapella, false)
|
||||
require.Equal(t, opUpdateCapella, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
require.Equal(t, finUpdateCapella, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with Deneb update
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb)
|
||||
lcStore.SetLastFinalityUpdate(finUpdateDeneb, false)
|
||||
lcStore.SetLastOptimisticUpdate(opUpdateDeneb, false)
|
||||
require.Equal(t, finUpdateDeneb, lcStore.LastFinalityUpdate(), "lastFinalityUpdate is wrong")
|
||||
require.Equal(t, opUpdateDeneb, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate is wrong")
|
||||
|
||||
// Set and get finality and optimistic with nil update
|
||||
lcStore.SetLastFinalityUpdate(nil)
|
||||
lcStore.SetLastOptimisticUpdate(nil)
|
||||
require.IsNil(t, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should be nil")
|
||||
require.IsNil(t, lcStore.LastOptimisticUpdate(), "lastOptimisticUpdate should be nil")
|
||||
}
|
||||
|
||||
func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := lightClient.NewLightClientStore(testDB.SetupDB(t), p2p, new(event.Feed))
|
||||
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update0, lcStore.LastFinalityUpdate()), "update0 should be better than nil")
|
||||
// update0 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update0, lcStore.LastFinalityUpdate()), "update0 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l1 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(1))
|
||||
update1, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l1.Ctx, l1.State, l1.Block, l1.AttestedState, l1.AttestedBlock, l1.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update1, update0), "update1 should be better than update0")
|
||||
// update1 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update1, lcStore.LastFinalityUpdate()), "update1 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 2 with same finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l2 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update2, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l2.Ctx, l2.State, l2.Block, l2.AttestedState, l2.AttestedBlock, l2.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update2, update1), "update2 should be better than update1")
|
||||
// update2 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update2, lcStore.LastFinalityUpdate()), "update2 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l3 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedAttestedSlot(3), util.WithSupermajority())
|
||||
update3, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l3.Ctx, l3.State, l3.Block, l3.AttestedState, l3.AttestedBlock, l3.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update3, update2), "update3 should be better than update2")
|
||||
// update3 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update3, lcStore.LastFinalityUpdate()), "update3 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
l4 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(1), util.WithSupermajority())
|
||||
update4, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l4.Ctx, l4.State, l4.Block, l4.AttestedState, l4.AttestedBlock, l4.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update4, update3), "update4 should be better than update3")
|
||||
// update4 should be valid for broadcast - meaning it should be broadcasted
|
||||
require.Equal(t, true, lightClient.IsFinalityUpdateValidForBroadcast(update4, lcStore.LastFinalityUpdate()), "update4 should be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
l5 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(2), util.WithSupermajority())
|
||||
update5, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l5.Ctx, l5.State, l5.Block, l5.AttestedState, l5.AttestedBlock, l5.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update5, update4), "update5 should be better than update4")
|
||||
// update5 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update5, lcStore.LastFinalityUpdate()), "update5 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
l6 := util.NewTestLightClient(t, version.Altair, util.WithIncreasedFinalizedSlot(1), util.WithIncreasedAttestedSlot(3))
|
||||
update6, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l6.Ctx, l6.State, l6.Block, l6.AttestedState, l6.AttestedBlock, l6.FinalizedBlock)
|
||||
require.NoError(t, err, "Failed to create light client finality update")
|
||||
|
||||
require.Equal(t, true, lightClient.IsBetterFinalityUpdate(update6, update5), "update6 should be better than update5")
|
||||
// update6 should not be valid for broadcast - meaning it should not be broadcasted
|
||||
require.Equal(t, false, lightClient.IsFinalityUpdateValidForBroadcast(update6, lcStore.LastFinalityUpdate()), "update6 should not be valid for broadcast")
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -53,7 +52,6 @@ go_test(
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -31,15 +31,8 @@ var (
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
type CustodyType int
|
||||
|
||||
const (
|
||||
Target CustodyType = iota
|
||||
Actual
|
||||
)
|
||||
|
||||
// CustodyGroups computes the custody groups the node should participate in for custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#get_custody_groups
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#get_custody_groups
|
||||
func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
@@ -102,7 +95,7 @@ func CustodyGroups(nodeId enode.ID, custodyGroupCount uint64) ([]uint64, error)
|
||||
}
|
||||
|
||||
// ComputeColumnsForCustodyGroup computes the columns for a given custody group.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#compute_columns_for_custody_group
|
||||
func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfCustodyGroups := beaconConfig.NumberOfCustodyGroups
|
||||
@@ -127,7 +120,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
|
||||
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
|
||||
return nil, nil
|
||||
@@ -176,19 +169,6 @@ func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
return columnIndex % numberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
// CustodyGroupSamplingSize returns the number of custody groups the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/das-core.md#custody-sampling
|
||||
func (custodyInfo *CustodyInfo) CustodyGroupSamplingSize(ct CustodyType) uint64 {
|
||||
custodyGroupCount := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
if ct == Actual {
|
||||
custodyGroupCount = custodyInfo.ActualGroupCount()
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
return max(samplesPerSlot, custodyGroupCount)
|
||||
}
|
||||
|
||||
// CustodyColumns computes the custody columns from the custody groups.
|
||||
func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
@@ -219,7 +199,7 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
|
||||
// the KZG commitment includion proofs and cells and cell proofs.
|
||||
// The returned value contains pointers to function parameters.
|
||||
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.3/specs/fulu/validator.md#get_data_column_sidecars
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars
|
||||
func dataColumnsSidecars(
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
blobKzgCommitments [][]byte,
|
||||
|
||||
@@ -104,62 +104,6 @@ func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupSamplingSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
custodyType peerdas.CustodyType
|
||||
validatorsCustodyRequirement uint64
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "target, lower than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 2,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "target, higher than samples per slot",
|
||||
custodyType: peerdas.Target,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "actual, lower than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 3,
|
||||
toAdvertiseCustodyGroupCount: 4,
|
||||
expected: 8,
|
||||
},
|
||||
{
|
||||
name: "actual, higher than samples per slot",
|
||||
custodyType: peerdas.Actual,
|
||||
validatorsCustodyRequirement: 100,
|
||||
toAdvertiseCustodyGroupCount: 101,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a custody info.
|
||||
custodyInfo := peerdas.CustodyInfo{}
|
||||
|
||||
// Set the validators custody requirement for target custody group count.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Compute the custody group sampling size.
|
||||
actual := custodyInfo.CustodyGroupSamplingSize(tc.custodyType)
|
||||
|
||||
// Check the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyColumns(t *testing.T) {
|
||||
t.Run("group too large", func(t *testing.T) {
|
||||
_, err := peerdas.CustodyColumns([]uint64{1_000_000})
|
||||
|
||||
@@ -4,45 +4,17 @@ import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// info contains all useful peerDAS related information regarding a peer.
|
||||
type (
|
||||
info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
targetCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
validatorsCustodyRequirement uint64
|
||||
}
|
||||
|
||||
toAdverstiseCustodyGroupCount struct {
|
||||
mut sync.RWMutex
|
||||
value uint64
|
||||
}
|
||||
|
||||
CustodyInfo struct {
|
||||
// Mut is a mutex to be used by caller to ensure neither
|
||||
// TargetCustodyGroupCount nor ToAdvertiseCustodyGroupCount are being modified.
|
||||
// (This is not necessary to use this mutex for any data protection.)
|
||||
Mut sync.RWMutex
|
||||
|
||||
// TargetGroupCount represents the target number of custody groups we should custody
|
||||
// regarding the validators we are tracking.
|
||||
TargetGroupCount targetCustodyGroupCount
|
||||
|
||||
// ToAdvertiseGroupCount represents the number of custody groups to advertise to the network.
|
||||
ToAdvertiseGroupCount toAdverstiseCustodyGroupCount
|
||||
}
|
||||
)
|
||||
type info struct {
|
||||
CustodyGroups map[uint64]bool
|
||||
CustodyColumns map[uint64]bool
|
||||
DataColumnsSubnets map[uint64]bool
|
||||
}
|
||||
|
||||
const (
|
||||
nodeInfoCacheSize = 200
|
||||
@@ -109,61 +81,6 @@ func Info(nodeID enode.ID, custodyGroupCount uint64) (*info, bool, error) {
|
||||
return result, false, nil
|
||||
}
|
||||
|
||||
// ActualGroupCount returns the actual custody group count.
|
||||
func (custodyInfo *CustodyInfo) ActualGroupCount() uint64 {
|
||||
return min(custodyInfo.TargetGroupCount.Get(), custodyInfo.ToAdvertiseGroupCount.Get())
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the number of groups we should participate in for custody.
|
||||
func (tcgc *targetCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
tcgc.mut.RLock()
|
||||
defer tcgc.mut.RUnlock()
|
||||
|
||||
// If no validators are tracked, return the default custody requirement.
|
||||
if tcgc.validatorsCustodyRequirement == 0 {
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// Return the validators custody requirement.
|
||||
return tcgc.validatorsCustodyRequirement
|
||||
}
|
||||
|
||||
// setValidatorsCustodyRequirement sets the validators custody requirement.
|
||||
func (tcgc *targetCustodyGroupCount) SetValidatorsCustodyRequirement(value uint64) {
|
||||
tcgc.mut.Lock()
|
||||
defer tcgc.mut.Unlock()
|
||||
|
||||
tcgc.validatorsCustodyRequirement = value
|
||||
}
|
||||
|
||||
// Get returns the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Get() uint64 {
|
||||
// If subscribed to all subnets, return the number of custody groups.
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return params.BeaconConfig().NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
tacgc.mut.RLock()
|
||||
defer tacgc.mut.RUnlock()
|
||||
|
||||
return max(tacgc.value, custodyRequirement)
|
||||
}
|
||||
|
||||
// Set sets the to advertise custody group count.
|
||||
func (tacgc *toAdverstiseCustodyGroupCount) Set(value uint64) {
|
||||
tacgc.mut.Lock()
|
||||
defer tacgc.mut.Unlock()
|
||||
|
||||
tacgc.value = value
|
||||
}
|
||||
|
||||
// createInfoCacheIfNeeded creates a new cache if it doesn't exist.
|
||||
func createInfoCacheIfNeeded() error {
|
||||
nodeInfoCacheMut.Lock()
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
@@ -26,108 +25,3 @@ func TestInfo(t *testing.T) {
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllColumns bool
|
||||
validatorsCustodyRequirement uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all data subnets",
|
||||
subscribeToAllColumns: true,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "no validators attached",
|
||||
subscribeToAllColumns: false,
|
||||
validatorsCustodyRequirement: 0,
|
||||
expected: 4,
|
||||
},
|
||||
{
|
||||
name: "some validators attached",
|
||||
subscribeToAllColumns: false,
|
||||
validatorsCustodyRequirement: 100,
|
||||
expected: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllColumns {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the validators custody requirement.
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(tc.validatorsCustodyRequirement)
|
||||
|
||||
// Get the target custody group count.
|
||||
actual := custodyInfo.TargetGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAdvertiseCustodyGroupCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllColumns bool
|
||||
toAdvertiseCustodyGroupCount uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribed to all subnets",
|
||||
subscribeToAllColumns: true,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 128,
|
||||
},
|
||||
{
|
||||
name: "higher than custody requirement",
|
||||
subscribeToAllColumns: false,
|
||||
toAdvertiseCustodyGroupCount: 100,
|
||||
expected: 100,
|
||||
},
|
||||
{
|
||||
name: "lower than custody requirement",
|
||||
subscribeToAllColumns: false,
|
||||
toAdvertiseCustodyGroupCount: 1,
|
||||
expected: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Subscribe to all subnets if needed.
|
||||
if tc.subscribeToAllColumns {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
}
|
||||
|
||||
// Create a custody info.
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
|
||||
// Set the to advertise custody group count.
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(tc.toAdvertiseCustodyGroupCount)
|
||||
|
||||
// Get the to advertise custody group count.
|
||||
actual := custodyInfo.ToAdvertiseGroupCount.Get()
|
||||
|
||||
// Compare the expected and actual values.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,10 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodyGroupCountEnrKey = "cgc"
|
||||
kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
)
|
||||
const kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
|
||||
var (
|
||||
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
@@ -27,13 +24,13 @@ var (
|
||||
ErrCannotLoadCustodyGroupCount = errors.New("cannot load the custody group count from peer")
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#custody-group-count
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#custody-group-count
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
|
||||
func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCountKey }
|
||||
|
||||
// VerifyDataColumnSidecar verifies if the data column sidecar is valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
@@ -60,7 +57,7 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
||||
// This is done to improve performance since the internal KZG library is way more
|
||||
// efficient when verifying in batch.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
@@ -96,7 +93,7 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
||||
return ErrNilBlockHeader
|
||||
@@ -128,7 +125,7 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
}
|
||||
|
||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
return columnIndex % dataColumnSidecarSubnetCount
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/validator.md#validator-custody
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
totalNodeBalance := uint64(0)
|
||||
for index := range validatorsIndex {
|
||||
|
||||
@@ -39,10 +39,8 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -22,8 +22,8 @@ type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
@@ -38,13 +38,18 @@ type DataColumnsVerifier interface {
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, newDataColumnsVerifier verification.NewDataColumnsVerifier, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
nodeID enode.ID,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
custodyGroupCount uint64,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
custodyInfo: custodyInfo,
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custodyGroupCount: custodyGroupCount,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,6 +160,8 @@ func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, curre
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
@@ -183,11 +190,9 @@ func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, bl
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the groups count.
|
||||
custodyGroupCount := s.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
@@ -4,10 +4,8 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -29,7 +27,7 @@ var commitments = [][]byte{
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
@@ -44,7 +42,7 @@ func TestPersist(t *testing.T) {
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
@@ -59,7 +57,7 @@ func TestPersist(t *testing.T) {
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
@@ -76,7 +74,7 @@ func TestPersist(t *testing.T) {
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
@@ -114,7 +112,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
@@ -135,9 +133,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
@@ -221,14 +219,10 @@ func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -33,6 +33,7 @@ type ReadOnlyDatabase interface {
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HighestRootsBelowSlot(ctx context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error)
|
||||
EarliestSlot(ctx context.Context) (primitives.Slot, error)
|
||||
// State related methods.
|
||||
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
StateOrError(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
@@ -56,12 +57,11 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// light client operations
|
||||
// Light client operations
|
||||
LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]interfaces.LightClientUpdate, error)
|
||||
LightClientUpdate(ctx context.Context, period uint64) (interfaces.LightClientUpdate, error)
|
||||
LightClientBootstrap(ctx context.Context, blockRoot []byte) (interfaces.LightClientBootstrap, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
// Origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillStatus(context.Context) (*dbval.BackfillStatus, error)
|
||||
}
|
||||
@@ -102,6 +102,10 @@ type NoHeadAccessDatabase interface {
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
}
|
||||
|
||||
// HeadAccessDatabase defines a struct with access to reading chain head data.
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"backup.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"custody.go",
|
||||
"deposit_contract.go",
|
||||
"encoding.go",
|
||||
"error.go",
|
||||
@@ -83,6 +84,7 @@ go_test(
|
||||
"backup_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"custody_test.go",
|
||||
"deposit_contract_test.go",
|
||||
"encoding_test.go",
|
||||
"execution_chain_test.go",
|
||||
|
||||
@@ -860,6 +860,47 @@ func (s *Store) SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primi
|
||||
})
|
||||
}
|
||||
|
||||
// EarliestStoredSlot returns the earliest slot in the database.
|
||||
func (s *Store) EarliestSlot(ctx context.Context) (primitives.Slot, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.EarliestSlot")
|
||||
defer span.End()
|
||||
|
||||
earliestAvailableSlot := primitives.Slot(0)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the root corresponding to the earliest available block.
|
||||
c := tx.Bucket(blockSlotIndicesBucket).Cursor()
|
||||
k, v := c.First()
|
||||
if k == nil || v == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
|
||||
// The genesis block may be indexed in this bucket, even if we started from a checkpoint.
|
||||
// Because of this, we check the next block. If the next block is still in the genesis epoch,
|
||||
// then we consider we have the whole chain.
|
||||
if slot != 0 {
|
||||
earliestAvailableSlot = slot
|
||||
}
|
||||
|
||||
k, v = c.Next()
|
||||
if k == nil || v == nil {
|
||||
// Only the genesis block is available.
|
||||
return nil
|
||||
}
|
||||
slot = bytesutil.BytesToSlotBigEndian(k)
|
||||
if slot < slotsPerEpoch {
|
||||
// We are still in the genesis epoch, so we consider we have the whole chain.
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot = slot
|
||||
return nil
|
||||
})
|
||||
|
||||
return earliestAvailableSlot, err
|
||||
}
|
||||
|
||||
type slotRoot struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
@@ -883,7 +924,7 @@ func (s *Store) slotRootsInRange(ctx context.Context, start, end primitives.Slot
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(key); ; /* rely on internal checks to exit */ k, v = c.Prev() {
|
||||
if len(k) == 0 && len(v) == 0 {
|
||||
// The `edge`` variable and this `if` deal with 2 edge cases:
|
||||
// The `edge` variable and this `if` deal with 2 edge cases:
|
||||
// - Seeking past the end of the bucket (the `end` param is higher than the highest slot).
|
||||
// - Seeking before the beginning of the bucket (the `start` param is lower than the lowest slot).
|
||||
// In both of these cases k,v will be nil and we can handle the same way using `edge` to
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -1327,3 +1328,86 @@ func TestStore_RegistrationsByValidatorID(t *testing.T) {
|
||||
want := errors.Wrap(ErrNotFoundFeeRecipient, "validator id 3")
|
||||
require.Equal(t, want.Error(), err.Error())
|
||||
}
|
||||
|
||||
// Block creates a phase0 beacon block at the specified slot and saves it to the database.
|
||||
func createAndSaveBlock(t *testing.T, ctx context.Context, db *Store, slot primitives.Slot) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = slot
|
||||
|
||||
wrappedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrappedBlock))
|
||||
}
|
||||
|
||||
func TestStore_EarliestSlot(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("empty database returns ErrNotFound", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with only genesis block", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with genesis and blocks in genesis epoch", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
// Create and save a block in the genesis epoch
|
||||
createAndSaveBlock(t, ctx, db, primitives.Slot(slotsPerEpoch-1))
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), slot)
|
||||
})
|
||||
|
||||
t.Run("database with genesis and blocks beyond genesis epoch", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Create and save genesis block (slot 0)
|
||||
createAndSaveBlock(t, ctx, db, 0)
|
||||
|
||||
// Create and save a block beyond the genesis epoch
|
||||
nextEpochSlot := primitives.Slot(slotsPerEpoch)
|
||||
createAndSaveBlock(t, ctx, db, nextEpochSlot)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nextEpochSlot, slot)
|
||||
})
|
||||
|
||||
t.Run("database starting from checkpoint (non-zero earliest slot)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Simulate starting from a checkpoint by creating blocks starting from a later slot
|
||||
checkpointSlot := primitives.Slot(slotsPerEpoch * 10) // 10 epochs later
|
||||
nextEpochSlot := checkpointSlot + slotsPerEpoch
|
||||
|
||||
// Create and save first block at checkpoint slot
|
||||
createAndSaveBlock(t, ctx, db, checkpointSlot)
|
||||
|
||||
// Create and save another block in the next epoch
|
||||
createAndSaveBlock(t, ctx, db, nextEpochSlot)
|
||||
|
||||
slot, err := db.EarliestSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nextEpochSlot, slot)
|
||||
})
|
||||
}
|
||||
|
||||
129
beacon-chain/db/kv/custody.go
Normal file
129
beacon-chain/db/kv/custody.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// UpdateCustodyInfo atomically updates the custody group count only it is greater than the stored one.
|
||||
// In this case, it also updates the earliest available slot with the provided value.
|
||||
// It returns the (potentially updated) custody group count and earliest available slot.
|
||||
func (s *Store) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateCustodyInfo")
|
||||
defer span.End()
|
||||
|
||||
storedGroupCount, storedEarliestAvailableSlot := uint64(0), primitives.Slot(0)
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
// Retrieve the stored custody group count.
|
||||
storedGroupCountBytes := bucket.Get(groupCountKey)
|
||||
if len(storedGroupCountBytes) != 0 {
|
||||
storedGroupCount = bytesutil.BytesToUint64BigEndian(storedGroupCountBytes)
|
||||
}
|
||||
|
||||
// Retrieve the stored earliest available slot.
|
||||
storedEarliestAvailableSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(storedEarliestAvailableSlotBytes) != 0 {
|
||||
storedEarliestAvailableSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(storedEarliestAvailableSlotBytes))
|
||||
}
|
||||
|
||||
// Exit early if the new custody group count is lower than or equal to the stored one.
|
||||
if custodyGroupCount <= storedGroupCount {
|
||||
return nil
|
||||
}
|
||||
|
||||
storedGroupCount, storedEarliestAvailableSlot = custodyGroupCount, earliestAvailableSlot
|
||||
|
||||
// Store the earliest available slot.
|
||||
bytes := bytesutil.Uint64ToBytesBigEndian(uint64(earliestAvailableSlot))
|
||||
if err := bucket.Put(earliestAvailableSlotKey, bytes); err != nil {
|
||||
return errors.Wrap(err, "put earliest available slot")
|
||||
}
|
||||
|
||||
// Store the custody group count.
|
||||
bytes = bytesutil.Uint64ToBytesBigEndian(custodyGroupCount)
|
||||
if err := bucket.Put(groupCountKey, bytes); err != nil {
|
||||
return errors.Wrap(err, "put custody group count")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"earliestAvailableSlot": storedEarliestAvailableSlot,
|
||||
"groupCount": storedGroupCount,
|
||||
}).Debug("Custody info")
|
||||
|
||||
return storedEarliestAvailableSlot, storedGroupCount, nil
|
||||
}
|
||||
|
||||
// UpdateSubscribedToAllDataSubnets updates the "subscribed to all data subnets" status in the database
|
||||
// only if `subscribed` is `true`.
|
||||
// It returns the previous subscription status.
|
||||
func (s *Store) UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.UpdateSubscribedToAllDataSubnets")
|
||||
defer span.End()
|
||||
|
||||
result := false
|
||||
if !subscribed {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subscribe all data subnets flag.
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the custody bucket.
|
||||
bucket, err := tx.CreateBucketIfNotExists(custodyBucket)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create custody bucket")
|
||||
}
|
||||
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) != 0 && bytes[0] == 1 {
|
||||
result = true
|
||||
}
|
||||
|
||||
if err := bucket.Put(subscribeAllDataSubnetsKey, []byte{1}); err != nil {
|
||||
return errors.Wrap(err, "put subscribe all data subnets")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
176
beacon-chain/db/kv/custody_test.go
Normal file
176
beacon-chain/db/kv/custody_test.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// getCustodyInfoFromDB reads the custody info directly from the database for testing purposes.
|
||||
func getCustodyInfoFromDB(t *testing.T, db *Store) (primitives.Slot, uint64) {
|
||||
t.Helper()
|
||||
var earliestSlot primitives.Slot
|
||||
var groupCount uint64
|
||||
|
||||
err := db.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read group count
|
||||
groupCountBytes := bucket.Get(groupCountKey)
|
||||
if len(groupCountBytes) != 0 {
|
||||
groupCount = bytesutil.BytesToUint64BigEndian(groupCountBytes)
|
||||
}
|
||||
|
||||
// Read earliest available slot
|
||||
earliestSlotBytes := bucket.Get(earliestAvailableSlotKey)
|
||||
if len(earliestSlotBytes) != 0 {
|
||||
earliestSlot = primitives.Slot(bytesutil.BytesToUint64BigEndian(earliestSlotBytes))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return earliestSlot, groupCount
|
||||
}
|
||||
|
||||
// getSubscriptionStatusFromDB reads the subscription status directly from the database for testing purposes.
|
||||
func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
t.Helper()
|
||||
var subscribed bool
|
||||
|
||||
err := db.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(custodyBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bytes := bucket.Get(subscribeAllDataSubnetsKey)
|
||||
if len(bytes) != 0 && bytes[0] == 1 {
|
||||
subscribed = true
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return subscribed
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("initial update with empty database", func(t *testing.T) {
|
||||
const (
|
||||
earliestSlot = primitives.Slot(100)
|
||||
groupCount = uint64(5)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestSlot, slot)
|
||||
require.Equal(t, groupCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, earliestSlot, storedSlot)
|
||||
require.Equal(t, groupCount, storedCount)
|
||||
})
|
||||
|
||||
t.Run("update with higher group count", func(t *testing.T) {
|
||||
const (
|
||||
initialSlot = primitives.Slot(100)
|
||||
initialCount = uint64(5)
|
||||
earliestSlot = primitives.Slot(200)
|
||||
groupCount = uint64(10)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
_, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestSlot, slot)
|
||||
require.Equal(t, groupCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, earliestSlot, storedSlot)
|
||||
require.Equal(t, groupCount, storedCount)
|
||||
})
|
||||
|
||||
t.Run("update with lower group count should not update", func(t *testing.T) {
|
||||
const (
|
||||
initialSlot = primitives.Slot(200)
|
||||
initialCount = uint64(10)
|
||||
earliestSlot = primitives.Slot(300)
|
||||
groupCount = uint64(8)
|
||||
)
|
||||
|
||||
db := setupDB(t)
|
||||
|
||||
_, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot, count, err := db.UpdateCustodyInfo(ctx, earliestSlot, groupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, initialSlot, slot)
|
||||
require.Equal(t, initialCount, count)
|
||||
|
||||
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
|
||||
require.Equal(t, initialSlot, storedSlot)
|
||||
require.Equal(t, initialCount, storedCount)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateSubscribedToAllDataSubnets(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("initial update with empty database - set to false", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, false, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
|
||||
t.Run("attempt to update from true to false (should not change)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
_, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
prev, err := db.UpdateSubscribedToAllDataSubnets(ctx, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, prev)
|
||||
|
||||
stored := getSubscriptionStatusFromDB(t, db)
|
||||
require.Equal(t, true, stored)
|
||||
})
|
||||
}
|
||||
@@ -123,6 +123,7 @@ var Buckets = [][]byte{
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
custodyBucket,
|
||||
}
|
||||
|
||||
// KVStoreOption is a functional option that modifies a kv.Store.
|
||||
|
||||
@@ -70,4 +70,10 @@ var (
|
||||
|
||||
// Migrations
|
||||
migrationsBucket = []byte("migrations")
|
||||
|
||||
// Custody
|
||||
custodyBucket = []byte("custody")
|
||||
groupCountKey = []byte("group-count")
|
||||
earliestAvailableSlotKey = []byte("earliest-available-slot")
|
||||
subscribeAllDataSubnetsKey = []byte("subscribe-all-data-subnets")
|
||||
)
|
||||
|
||||
@@ -43,6 +43,7 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return errors.Wrap(err, "failed to initialize origin block w/ bytes + config+fork")
|
||||
}
|
||||
blk := wblk.Block()
|
||||
slot := blk.Slot()
|
||||
|
||||
blockRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -51,43 +52,43 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
|
||||
pr := blk.ParentRoot()
|
||||
bf := &dbval.BackfillStatus{
|
||||
LowSlot: uint64(wblk.Block().Slot()),
|
||||
LowSlot: uint64(slot),
|
||||
LowRoot: blockRoot[:],
|
||||
LowParentRoot: pr[:],
|
||||
OriginRoot: blockRoot[:],
|
||||
OriginSlot: uint64(wblk.Block().Slot()),
|
||||
OriginSlot: uint64(slot),
|
||||
}
|
||||
|
||||
if err = s.SaveBackfillStatus(ctx, bf); err != nil {
|
||||
return errors.Wrap(err, "unable to save backfill status data to db for checkpoint sync")
|
||||
}
|
||||
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint block to db")
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Info("Saving checkpoint data into database")
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
return errors.Wrap(err, "save block")
|
||||
}
|
||||
|
||||
// save state
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Info("Calling SaveState")
|
||||
if err = s.SaveState(ctx, state, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
return errors.Wrap(err, "save state")
|
||||
}
|
||||
|
||||
if err = s.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: state.Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not save state summary")
|
||||
return errors.Wrap(err, "save state summary")
|
||||
}
|
||||
|
||||
// mark block as head of chain, so that processing will pick up from this point
|
||||
if err = s.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
return errors.Wrap(err, "save head block root")
|
||||
}
|
||||
|
||||
// save origin block root in a special key, to be used when the canonical
|
||||
// origin (start of chain, ie alternative to genesis) block or state is needed
|
||||
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save origin block root")
|
||||
return errors.Wrap(err, "save origin checkpoint block root")
|
||||
}
|
||||
|
||||
// rebuild the checkpoint from the block
|
||||
@@ -96,15 +97,18 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chkpt := ðpb.Checkpoint{
|
||||
Epoch: primitives.Epoch(slotEpoch),
|
||||
Root: blockRoot[:],
|
||||
}
|
||||
|
||||
if err = s.SaveJustifiedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as justified")
|
||||
return errors.Wrap(err, "save justified checkpoint")
|
||||
}
|
||||
|
||||
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "could not mark checkpoint sync block as finalized")
|
||||
return errors.Wrap(err, "save finalized checkpoint")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -74,6 +74,7 @@ go_library(
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_sync//singleflight:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -84,6 +85,7 @@ go_test(
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"deposit_test.go",
|
||||
"engine_client_broadcast_test.go",
|
||||
"engine_client_fuzz_test.go",
|
||||
"engine_client_test.go",
|
||||
"execution_chain_test.go",
|
||||
|
||||
@@ -99,6 +99,8 @@ const (
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
// defaultGetBlobsRetryInterval is the default retry interval for getBlobsV2 calls.
|
||||
defaultGetBlobsRetryInterval = 200 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -652,9 +654,94 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
// It uses singleflight to ensure only one reconstruction per blockRoot.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Use singleflight to ensure only one reconstruction per blockRoot
|
||||
v, err, _ := s.reconstructSingleflight.Do(fmt.Sprintf("%x", blockRoot), func() (interface{}, error) {
|
||||
// Try reconstruction once
|
||||
result, err := s.reconstructDataColumnSidecarsOnce(ctx, signedROBlock, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to reconstruct data column sidecars")
|
||||
}
|
||||
if len(result) > 0 {
|
||||
return result, nil // Success - return data
|
||||
}
|
||||
|
||||
// Empty result - initiate retry mechanism
|
||||
|
||||
// Create a new context with a timeout for the retry goroutine.
|
||||
retryCtx, cancel := context.WithTimeout(s.ctx, time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)
|
||||
|
||||
// LoadOrStore atomically checks for an existing retry and stores
|
||||
// a new one if none exists. This prevents a race condition.
|
||||
// The stored value is the cancel function for the new context.
|
||||
_, loaded := s.activeRetries.LoadOrStore(blockRoot, cancel)
|
||||
|
||||
if loaded {
|
||||
// Another goroutine already started the retry process. The current one can exit.
|
||||
cancel() // Cancel the context we just created as it won't be used.
|
||||
return []blocks.VerifiedRODataColumn{}, nil
|
||||
}
|
||||
|
||||
// This goroutine is now responsible for starting the retry.
|
||||
// Perform periodic retry attempts for data column reconstruction inline.
|
||||
go func() {
|
||||
startTime := time.Now()
|
||||
// Defer the cancellation of the context and the removal of the active retry tracker.
|
||||
defer func() {
|
||||
cancel()
|
||||
s.activeRetries.Delete(blockRoot)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(defaultGetBlobsRetryInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
attemptCount := 0
|
||||
retryLog := log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot))
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
attemptCount++
|
||||
getBlobsRetryAttempts.WithLabelValues("attempt").Inc()
|
||||
|
||||
// Retry reconstruction
|
||||
retryLog.WithField("attempt", attemptCount).Debug("Retrying data column reconstruction")
|
||||
result, err := s.reconstructDataColumnSidecarsOnce(retryCtx, signedROBlock, blockRoot)
|
||||
if err != nil {
|
||||
retryLog.WithError(err).Debug("Reconstruction attempt failed, will retry")
|
||||
continue
|
||||
}
|
||||
if len(result) > 0 {
|
||||
retryLog.WithField("attempts", attemptCount).Debug("Retry succeeded")
|
||||
getBlobsRetryAttempts.WithLabelValues("success_reconstructed").Inc()
|
||||
getBlobsRetryDuration.WithLabelValues("success").Observe(time.Since(startTime).Seconds())
|
||||
// Clean up active retry tracker immediately on success
|
||||
s.activeRetries.Delete(blockRoot)
|
||||
return
|
||||
}
|
||||
|
||||
case <-retryCtx.Done():
|
||||
retryLog.WithField("attempts", attemptCount).Debug("Retry timeout")
|
||||
getBlobsRetryAttempts.WithLabelValues("timeout").Inc()
|
||||
getBlobsRetryDuration.WithLabelValues("timeout").Observe(time.Since(startTime).Seconds())
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Return empty result for now; the background retry will handle it.
|
||||
return []blocks.VerifiedRODataColumn{}, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v.([]blocks.VerifiedRODataColumn), nil
|
||||
}
|
||||
|
||||
// reconstructDataColumnSidecarsOnce performs a single attempt to reconstruct data column sidecars.
|
||||
func (s *Service) reconstructDataColumnSidecarsOnce(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
@@ -1008,6 +1095,12 @@ func toBlockNumArg(number *big.Int) string {
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
// hasActiveRetry checks if there's an active retry for the given block root.
|
||||
func (s *Service) hasActiveRetry(blockRoot [fieldparams.RootLength]byte) bool {
|
||||
_, exists := s.activeRetries.Load(blockRoot)
|
||||
return exists
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
|
||||
92
beacon-chain/execution/engine_client_broadcast_test.go
Normal file
92
beacon-chain/execution/engine_client_broadcast_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
// TestStartRetryIfNeeded_AtomicBehavior tests that the atomic retry start behavior
|
||||
// prevents race conditions by ensuring only one retry can be active per blockRoot.
|
||||
func TestStartRetryIfNeeded_AtomicBehavior(t *testing.T) {
|
||||
t.Run("prevents multiple concurrent retry claims", func(t *testing.T) {
|
||||
service := &Service{
|
||||
activeRetries: sync.Map{},
|
||||
}
|
||||
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
claimCount := int64(0)
|
||||
|
||||
numConcurrentCalls := 20
|
||||
var wg sync.WaitGroup
|
||||
startSignal := make(chan struct{})
|
||||
|
||||
// Launch multiple goroutines that try to claim retry slot simultaneously
|
||||
for i := 0; i < numConcurrentCalls; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
<-startSignal // Wait for signal to maximize race contention
|
||||
|
||||
// Simulate the atomic claim logic from startRetryIfNeeded
|
||||
cancelFunc := func() {}
|
||||
if _, loaded := service.activeRetries.LoadOrStore(blockRoot, cancelFunc); !loaded {
|
||||
// We won the race - count successful claims
|
||||
atomic.AddInt64(&claimCount, 1)
|
||||
|
||||
// Simulate some work before cleaning up
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
service.activeRetries.Delete(blockRoot)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Start all goroutines simultaneously to maximize race condition
|
||||
close(startSignal)
|
||||
wg.Wait()
|
||||
|
||||
// Verify only one goroutine successfully claimed the retry slot
|
||||
actualClaimCount := atomic.LoadInt64(&claimCount)
|
||||
require.Equal(t, int64(1), actualClaimCount, "Only one goroutine should successfully claim retry slot despite %d concurrent attempts", numConcurrentCalls)
|
||||
|
||||
t.Logf("Success: %d concurrent attempts resulted in only 1 successful claim (atomic behavior verified)", numConcurrentCalls)
|
||||
})
|
||||
|
||||
t.Run("hasActiveRetry correctly detects active retries", func(t *testing.T) {
|
||||
service := &Service{
|
||||
activeRetries: sync.Map{},
|
||||
}
|
||||
|
||||
blockRoot1 := [32]byte{1, 2, 3}
|
||||
blockRoot2 := [32]byte{4, 5, 6}
|
||||
|
||||
// Initially no active retries
|
||||
if service.hasActiveRetry(blockRoot1) {
|
||||
t.Error("Should not have active retry initially")
|
||||
}
|
||||
|
||||
// Add active retry for blockRoot1
|
||||
service.activeRetries.Store(blockRoot1, func() {})
|
||||
|
||||
// Verify detection
|
||||
if !service.hasActiveRetry(blockRoot1) {
|
||||
t.Error("Should detect active retry for blockRoot1")
|
||||
}
|
||||
if service.hasActiveRetry(blockRoot2) {
|
||||
t.Error("Should not detect active retry for blockRoot2")
|
||||
}
|
||||
|
||||
// Remove active retry
|
||||
service.activeRetries.Delete(blockRoot1)
|
||||
|
||||
// Verify removal
|
||||
if service.hasActiveRetry(blockRoot1) {
|
||||
t.Error("Should not detect active retry after deletion")
|
||||
}
|
||||
|
||||
t.Logf("Success: hasActiveRetry correctly tracks retry state")
|
||||
})
|
||||
}
|
||||
@@ -11,7 +11,10 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
@@ -2723,3 +2726,412 @@ func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test retry helper methods
|
||||
func TestRetryHelperMethods(t *testing.T) {
|
||||
client := &Service{}
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("hasActiveRetry returns false initially", func(t *testing.T) {
|
||||
hasActive := client.hasActiveRetry(blockRoot)
|
||||
require.Equal(t, false, hasActive)
|
||||
})
|
||||
|
||||
t.Run("hasActiveRetry returns true after storing cancel function", func(t *testing.T) {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
client.activeRetries.Store(blockRoot, cancel)
|
||||
|
||||
hasActive := client.hasActiveRetry(blockRoot)
|
||||
require.Equal(t, true, hasActive)
|
||||
|
||||
// Clean up
|
||||
client.activeRetries.Delete(blockRoot)
|
||||
})
|
||||
}
|
||||
|
||||
// Test ReconstructDataColumnSidecars with retry logic
|
||||
func TestReconstructDataColumnSidecars_WithRetry(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create test block
|
||||
kzgCommitments := createRandomKzgCommitments(t, 3)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("successful initial call does not trigger retry", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup server that returns all blobs
|
||||
blobMasks := []bool{true, true, true}
|
||||
srv := createBlobServerV2(t, 3, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
|
||||
// Should not have any active retries since initial call succeeded
|
||||
require.Equal(t, false, client.hasActiveRetry(r))
|
||||
})
|
||||
|
||||
t.Run("failed initial call triggers retry", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should have active retry since initial call returned empty
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Clean up
|
||||
if cancel, ok := client.activeRetries.Load(r); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
t.Run("does not start duplicate retry", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// First call should start retry
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Second call should not start another retry
|
||||
dataColumns, err = client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Clean up
|
||||
if cancel, ok := client.activeRetries.Load(r); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test timeout and cleanup behavior
|
||||
func TestRetryTimeout(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create test block
|
||||
kzgCommitments := createRandomKzgCommitments(t, 1)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("retry cleans up after timeout", func(t *testing.T) {
|
||||
// Setup server that always returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Modify config to have very short slot time for testing
|
||||
originalConfig := params.BeaconConfig()
|
||||
cfg := originalConfig.Copy()
|
||||
cfg.SecondsPerSlot = 1 // 1 second timeout for retry
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
defer params.OverrideBeaconConfig(originalConfig)
|
||||
|
||||
// Call ReconstructDataColumnSidecars which will start retry internally
|
||||
ctx := context.Background()
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err) // Should not error, just return empty result
|
||||
|
||||
// Wait a bit for the retry goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should have active retry initially
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Wait for timeout (longer than the 1 second timeout we set)
|
||||
time.Sleep(1200 * time.Millisecond)
|
||||
|
||||
// Should be cleaned up after timeout
|
||||
require.Equal(t, false, client.hasActiveRetry(r))
|
||||
})
|
||||
}
|
||||
|
||||
// Test concurrent retry scenarios
|
||||
func TestConcurrentRetries(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("multiple blocks can have concurrent retries", func(t *testing.T) {
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Create multiple test blocks
|
||||
testBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 3)
|
||||
roots := make([][32]byte, 3)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
kzgCommitments := createRandomKzgCommitments(t, 1)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
testBlocks[i] = signedB
|
||||
roots[i] = [32]byte{byte(i), byte(i), byte(i)}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Start retries for all blocks
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, testBlocks[i], roots[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Wait a bit for the goroutines to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// All should have active retries
|
||||
for i := 0; i < 3; i++ {
|
||||
require.Equal(t, true, client.hasActiveRetry(roots[i]))
|
||||
}
|
||||
|
||||
// Clean up
|
||||
for i := 0; i < 3; i++ {
|
||||
if cancel, ok := client.activeRetries.Load(roots[i]); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test end-to-end retry behavior with data availability changes
|
||||
func TestRetryBehaviorWithDataAvailability(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create test block
|
||||
kzgCommitments := createRandomKzgCommitments(t, 1)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("retry stops when data becomes available", func(t *testing.T) {
|
||||
// Setup server that returns no blobs initially
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Start the initial reconstruction which should trigger retry
|
||||
ctx := context.Background()
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify retry started
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Wait for retry timeout (the retry will continue since there's no way to stop it now)
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
// Retry should still be active since there's no availability check to stop it
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
})
|
||||
|
||||
t.Run("retry continues when data is not available", func(t *testing.T) {
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Start the initial reconstruction which should trigger retry
|
||||
ctx := context.Background()
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify retry started
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Wait a bit - retry should still be active
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Clean up
|
||||
if cancel, ok := client.activeRetries.Load(r); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
|
||||
// Wait for cleanup
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
require.Equal(t, false, client.hasActiveRetry(r))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
// TestConcurrentReconstructDataColumnSidecars tests that concurrent calls to ReconstructDataColumnSidecars
|
||||
// don't result in multiple getBlobsV2 calls for the same block root
|
||||
func TestConcurrentReconstructDataColumnSidecars(t *testing.T) {
|
||||
t.Run("concurrent calls share result", func(t *testing.T) {
|
||||
// Setup server that tracks call count
|
||||
callCount := int32(0)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
atomic.AddInt32(&callCount, 1)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
// Simulate some processing time
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
if strings.Contains(r.URL.RequestURI(), GetBlobsV2) {
|
||||
// Return empty result - simulating EL doesn't have the data yet
|
||||
resp := []interface{}{nil}
|
||||
respJSON, _ := json.Marshal(map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
})
|
||||
_, _ = w.Write(respJSON)
|
||||
return
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
// Setup client
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Create test block with KZG commitments
|
||||
slot := primitives.Slot(100)
|
||||
block := util.NewBeaconBlockDeneb()
|
||||
block.Block.Slot = slot
|
||||
commitment := [48]byte{1, 2, 3}
|
||||
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := signedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Start multiple concurrent calls
|
||||
numCalls := 5
|
||||
var wg sync.WaitGroup
|
||||
results := make([][]blocks.VerifiedRODataColumn, numCalls)
|
||||
errors := make([]error, numCalls)
|
||||
|
||||
for i := 0; i < numCalls; i++ {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
result, err := client.ReconstructDataColumnSidecars(ctx, signedBlock, blockRoot)
|
||||
results[index] = result
|
||||
errors[index] = err
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all calls to complete
|
||||
wg.Wait()
|
||||
|
||||
// Verify that GetBlobsV2 was called only once, not numCalls times
|
||||
finalCallCount := atomic.LoadInt32(&callCount)
|
||||
require.Equal(t, int32(1), finalCallCount, "Expected GetBlobsV2 to be called only once, but was called %d times", finalCallCount)
|
||||
|
||||
// Verify all calls got the same result length
|
||||
for i := 1; i < numCalls; i++ {
|
||||
require.Equal(t, len(results[0]), len(results[i]), "All concurrent calls should return same result length")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -71,4 +71,19 @@ var (
|
||||
Name: "execution_payload_bodies_count",
|
||||
Help: "The number of requested payload bodies is too large",
|
||||
})
|
||||
getBlobsRetryAttempts = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "getblobs_retry_attempts_total",
|
||||
Help: "Total number of getBlobsV2 retry attempts",
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
getBlobsRetryDuration = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "getblobs_retry_duration_seconds",
|
||||
Help: "Duration of getBlobsV2 retry cycles",
|
||||
Buckets: []float64{0.1, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0},
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/singleflight"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
@@ -162,6 +164,8 @@ type Service struct {
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
blobVerifier verification.NewBlobVerifier
|
||||
capabilityCache *capabilityCache
|
||||
activeRetries sync.Map // map[blockRoot]context.CancelFunc for tracking active retries
|
||||
reconstructSingleflight singleflight.Group
|
||||
}
|
||||
|
||||
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.
|
||||
|
||||
@@ -23,7 +23,6 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
@@ -124,7 +123,6 @@ type BeaconNode struct {
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
@@ -166,7 +164,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
|
||||
}
|
||||
|
||||
@@ -236,7 +233,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.finalizedStateAtStartUp = nil
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db)
|
||||
beacon.lcStore = lightclient.NewLightClientStore(beacon.db, beacon.fetchP2P(), beacon.StateFeed())
|
||||
}
|
||||
|
||||
return beacon, nil
|
||||
@@ -300,6 +297,7 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
@@ -510,6 +508,10 @@ func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath str
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if err := b.DataColumnStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
@@ -717,7 +719,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -800,7 +801,6 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithCustodyInfo(b.custodyInfo),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
@@ -888,7 +888,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithCustodyInfo(b.custodyInfo),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
regularsync.WithBatchVerifierLimit(b.cliCtx.Int(flags.BatchVerifierLimit.Name)),
|
||||
@@ -915,6 +915,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -1009,6 +1010,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1036,6 +1038,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
@@ -1177,6 +1180,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
// TODO: Add backfill for data column storage
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
|
||||
@@ -147,7 +147,6 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
@@ -174,7 +173,6 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
@@ -24,7 +23,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -229,6 +227,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -257,6 +256,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
// Set for 2nd peer
|
||||
@@ -546,8 +546,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
msg, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
msg, err := util.MockOptimisticUpdate()
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
@@ -613,8 +612,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair)
|
||||
msg, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
msg, err := util.MockFinalityUpdate()
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
@@ -699,6 +697,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
// Create a listener.
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"time"
|
||||
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
)
|
||||
@@ -40,7 +39,6 @@ type Config struct {
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabase
|
||||
ClockWaiter startup.ClockWaiter
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
@@ -3,11 +3,114 @@ package p2p
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ DataColumnsHandler = (*Service)(nil)
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
func (s *Service) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
}
|
||||
|
||||
return s.custodyInfo.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the custody group count.
|
||||
func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
}
|
||||
|
||||
return s.custodyInfo.groupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo updates the stored custody group count to the incoming one
|
||||
// if the incoming one is greater than the stored one. In this case, the
|
||||
// incoming earliest available slot should be greater than or equal to the
|
||||
// stored one or an error is returned.
|
||||
//
|
||||
// - If there is no stored custody info, or
|
||||
// - If the incoming earliest available slot is greater than or equal to the
|
||||
// fulu fork slot and the incoming custody group count is greater than the
|
||||
// number of samples per slot
|
||||
//
|
||||
// then the stored earliest available slot is updated to the incoming one.
|
||||
//
|
||||
// This function returns a boolean indicating whether the custody info was
|
||||
// updated and the (possibly updated) custody info itself.
|
||||
//
|
||||
// Rationale:
|
||||
// - The custody group count can only be increased (specification)
|
||||
// - If the custody group count is increased before Fulu, we can still serve
|
||||
// all the data, since there is no sharding before Fulu. As a consequence
|
||||
// we do not need to update the earliest available slot in this case.
|
||||
// - If the custody group count is increased after Fulu, but to a value less
|
||||
// than or equal to the number of samples per slot, we can still serve all
|
||||
// the data, since we store all sampled data column sidecars in all cases.
|
||||
// As a consequence, we do not need to update the earliest available slot
|
||||
// - If the custody group count is increased after Fulu to a value higher than
|
||||
// the number of samples per slot, then, until the backfill is complete, we
|
||||
// are unable to serve the data column sidecars corresponding to the new
|
||||
// custody groups. As a consequence, we need to update the earliest
|
||||
// available slot to inform the peers that we are not able to serve data
|
||||
// column sidecars before this point.
|
||||
func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
s.custodyInfo = &custodyInfo{
|
||||
earliestAvailableSlot: earliestAvailableSlot,
|
||||
groupCount: custodyGroupCount,
|
||||
}
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
inMemory := s.custodyInfo
|
||||
if custodyGroupCount <= inMemory.groupCount {
|
||||
return inMemory.earliestAvailableSlot, inMemory.groupCount, nil
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < inMemory.earliestAvailableSlot {
|
||||
return 0, 0, errors.Errorf(
|
||||
"earliest available slot %d is less than the current one %d. (custody group count: %d, current one: %d)",
|
||||
earliestAvailableSlot, inMemory.earliestAvailableSlot, custodyGroupCount, inMemory.groupCount,
|
||||
)
|
||||
}
|
||||
|
||||
if custodyGroupCount <= samplesPerSlot {
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return inMemory.earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
if earliestAvailableSlot < fuluForkSlot {
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return inMemory.earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
inMemory.earliestAvailableSlot = earliestAvailableSlot
|
||||
inMemory.groupCount = custodyGroupCount
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
// It first tries to get the custody group count from the peer's metadata,
|
||||
@@ -72,3 +175,19 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
|
||||
return custodyGroupCount
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := beaconConfig.FuluForkEpoch
|
||||
if fuluForkEpoch == beaconConfig.FarFutureEpoch {
|
||||
return beaconConfig.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
@@ -15,6 +18,174 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
func TestEarliestAvailableSlot(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
|
||||
_, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
slot, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupCount(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
|
||||
_, err := service.CustodyGroupCount()
|
||||
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), "no custody info available"))
|
||||
})
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected uint64 = 5
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
count, err := service.CustodyGroupCount()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SamplesPerSlot = 8
|
||||
config.FuluForkEpoch = 10
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
initialCustodyInfo *custodyInfo
|
||||
inputSlot primitives.Slot
|
||||
inputGroupCount uint64
|
||||
expectedUpdated bool
|
||||
expectedSlot primitives.Slot
|
||||
expectedGroupCount uint64
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "First time setting custody info",
|
||||
initialCustodyInfo: nil,
|
||||
inputSlot: 100,
|
||||
inputGroupCount: 5,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 100,
|
||||
expectedGroupCount: 5,
|
||||
},
|
||||
{
|
||||
name: "Group count decrease - no update",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 10,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 8,
|
||||
expectedUpdated: false,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 10,
|
||||
},
|
||||
{
|
||||
name: "Earliest slot decrease - error",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 100,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 50,
|
||||
inputGroupCount: 10,
|
||||
expectedErr: "earliest available slot 50 is less than the current one 100",
|
||||
},
|
||||
{
|
||||
name: "Group count increase but <= samples per slot",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 8,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 8,
|
||||
},
|
||||
{
|
||||
name: "Group count increase > samples per slot, before Fulu fork",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 60,
|
||||
inputGroupCount: 15,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 50,
|
||||
expectedGroupCount: 15,
|
||||
},
|
||||
{
|
||||
name: "Group count increase > samples per slot, after Fulu fork",
|
||||
initialCustodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: 50,
|
||||
groupCount: 5,
|
||||
},
|
||||
inputSlot: 500,
|
||||
inputGroupCount: 15,
|
||||
expectedUpdated: true,
|
||||
expectedSlot: 500,
|
||||
expectedGroupCount: 15,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
}
|
||||
|
||||
slot, groupCount, err := service.UpdateCustodyInfo(tc.inputSlot, tc.inputGroupCount)
|
||||
|
||||
if tc.expectedErr != "" {
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), tc.expectedErr))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSlot, slot)
|
||||
require.Equal(t, tc.expectedGroupCount, groupCount)
|
||||
|
||||
if tc.expectedUpdated {
|
||||
require.NotNil(t, service.custodyInfo)
|
||||
require.Equal(t, tc.expectedSlot, service.custodyInfo.earliestAvailableSlot)
|
||||
require.Equal(t, tc.expectedGroupCount, service.custodyInfo.groupCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
@@ -109,3 +280,59 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCustodyGroupCountFromPeerENR(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
pid = "test-id"
|
||||
)
|
||||
|
||||
cgc := peerdas.Cgc(expectedENR)
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
record *enr.Record
|
||||
expected uint64
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "No ENR record",
|
||||
record: nil,
|
||||
expected: custodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "Empty ENR record",
|
||||
record: &enr.Record{},
|
||||
expected: custodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "Valid ENR with custody group count",
|
||||
record: func() *enr.Record {
|
||||
record := &enr.Record{}
|
||||
record.Set(cgc)
|
||||
return record
|
||||
}(),
|
||||
expected: expectedENR,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
if tc.record != nil {
|
||||
peers.Add(tc.record, pid, nil, network.DirOutbound)
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
}
|
||||
|
||||
actual := service.custodyGroupCountFromPeerENR(pid)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,22 +236,40 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
custodyGroupCount, inRecordCustodyGroupCount := uint64(0), uint64(0)
|
||||
if params.FuluEnabled() {
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err = peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody group count")
|
||||
return
|
||||
}
|
||||
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody group count")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// We add `1` to the current epoch because we want to prepare one epoch before the Fulu fork.
|
||||
if currentEpoch+1 < fuluForkEpoch {
|
||||
// Is our custody group count record up to date?
|
||||
isCustodyGroupCountUpToDate := custodyGroupCount == inRecordCustodyGroupCount
|
||||
|
||||
// Altair behaviour.
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate && (!params.FuluEnabled() || isCustodyGroupCountUpToDate) {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS, custodyGroupCount)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
@@ -259,16 +277,6 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current custody group count.
|
||||
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
|
||||
|
||||
// Get the custody group count we store in our record.
|
||||
inRecordCustodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody subnet count")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the custody group count in our metadata.
|
||||
inMetadataCustodyGroupCount := s.Metadata().CustodyGroupCount()
|
||||
|
||||
@@ -565,11 +573,6 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount := s.cfg.CustodyInfo.ActualGroupCount()
|
||||
localNode.Set(peerdas.Cgc(custodyGroupCount))
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
@@ -581,6 +584,16 @@ func (s *Service) createLocalNode(
|
||||
localNode = initializeAttSubnets(localNode)
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount, err := s.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
hostIP := net.ParseIP(s.cfg.HostAddress)
|
||||
if hostIP.To4() == nil && hostIP.To16() == nil {
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
@@ -65,6 +64,7 @@ func TestCreateListener(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -91,6 +91,7 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -116,6 +117,7 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
@@ -157,27 +159,27 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "valid config",
|
||||
cfg: &Config{CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host address",
|
||||
cfg: &Config{HostAddress: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{HostAddress: "invalid"},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host address",
|
||||
cfg: &Config{HostAddress: "192.168.0.1", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{HostAddress: "192.168.0.1"},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid host DNS",
|
||||
cfg: &Config{HostDNS: "invalid", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{HostDNS: "invalid"},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid host DNS",
|
||||
cfg: &Config{HostDNS: "www.google.com", CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{HostDNS: "www.google.com"},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
@@ -191,6 +193,8 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
quicPort = 3000
|
||||
)
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Create a private key.
|
||||
address, privKey := createAddrAndPrivKey(t)
|
||||
|
||||
@@ -199,6 +203,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: tt.cfg,
|
||||
custodyInfo: &custodyInfo{groupCount: custodyRequirement},
|
||||
}
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
|
||||
@@ -210,7 +215,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedAddress := address
|
||||
if tt.cfg.HostAddress != "" {
|
||||
if tt.cfg != nil && tt.cfg.HostAddress != "" {
|
||||
expectedAddress = net.ParseIP(tt.cfg.HostAddress)
|
||||
}
|
||||
|
||||
@@ -250,8 +255,8 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
// Check cgc config.
|
||||
custodyGroupCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, custodyGroupCount)))
|
||||
require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodyGroupCount)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
|
||||
require.Equal(t, custodyRequirement, *custodyGroupCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -263,6 +268,7 @@ func TestRebootDiscoveryListener(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
@@ -295,6 +301,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{},
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
|
||||
require.NoError(t, err)
|
||||
@@ -313,6 +320,7 @@ func TestMultiAddrConversion_OK(t *testing.T) {
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -386,6 +394,7 @@ func TestHostIsResolved(t *testing.T) {
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
ip, key := createAddrAndPrivKey(t)
|
||||
list, err := s.createListener(ip, key)
|
||||
@@ -455,6 +464,7 @@ func TestUDPMultiAddress(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
@@ -655,7 +665,7 @@ func checkPingCountCacheMetadataRecord(
|
||||
if expected.custodyGroupCount != nil {
|
||||
// Check custody subnet count in ENR.
|
||||
var actualCustodyGroupCount uint64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodyGroupCountEnrKey, &actualCustodyGroupCount))
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, &actualCustodyGroupCount))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expected.custodyGroupCount, actualCustodyGroupCount)
|
||||
|
||||
@@ -818,10 +828,11 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
actualPingCount++
|
||||
return nil
|
||||
},
|
||||
cfg: &Config{UDPPort: 2000, CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
cfg: &Config{UDPPort: 2000},
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
custodyInfo: &custodyInfo{groupCount: custodyGroupCount},
|
||||
}
|
||||
|
||||
// Set the listener and the metadata.
|
||||
|
||||
@@ -40,6 +40,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -73,6 +74,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: root,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
@@ -134,6 +136,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
@@ -168,6 +171,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -32,13 +33,14 @@ type (
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
DataColumnsHandler
|
||||
CustodyManager
|
||||
}
|
||||
|
||||
// Accessor provides access to the Broadcaster and PeerManager interfaces.
|
||||
// Accessor provides access to the Broadcaster, PeerManager and CustodyManager interfaces.
|
||||
Accessor interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
CustodyManager
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
@@ -118,8 +120,11 @@ type (
|
||||
MetadataSeq() uint64
|
||||
}
|
||||
|
||||
// DataColumnsHandler abstracts some data columns related methods.
|
||||
DataColumnsHandler interface {
|
||||
// CustodyManager abstracts some data columns related methods.
|
||||
CustodyManager interface {
|
||||
EarliestAvailableSlot() (primitives.Slot, error)
|
||||
CustodyGroupCount() (uint64, error)
|
||||
UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
CustodyGroupCountFromPeer(peer.ID) uint64
|
||||
}
|
||||
)
|
||||
|
||||
@@ -54,7 +54,7 @@ type PeerData struct {
|
||||
NextValidTime time.Time
|
||||
// Chain related data.
|
||||
MetaData metadata.Metadata
|
||||
ChainState *ethpb.Status
|
||||
ChainState *ethpb.StatusV2
|
||||
ChainStateLastUpdated time.Time
|
||||
ChainStateValidationError error
|
||||
// Scorers internal data.
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Scorer = (*BadResponsesScorer)(nil)
|
||||
@@ -132,13 +131,14 @@ func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
// if peerData, ok := s.store.PeerData(pid); ok {
|
||||
// TODO: Remote this out of devnet
|
||||
// if peerData.BadResponses >= s.config.Threshold {
|
||||
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
// return nil
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package scorers_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
@@ -13,39 +12,41 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
// const pid = "peer1"
|
||||
|
||||
ctx := t.Context()
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 4,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
// scorer.Increment(pid)
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
@@ -137,56 +138,60 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
|
||||
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pid := peer.ID("peer1")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pid)
|
||||
// if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// } else {
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
for i := 0; i < len(pids); i++ {
|
||||
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
}
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pids[1])
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
return badPeers[i] < badPeers[j]
|
||||
})
|
||||
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
}
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
// for i := 0; i < len(pids); i++ {
|
||||
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
// }
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pids[1])
|
||||
// scorer.Increment(pids[2])
|
||||
// scorer.Increment(pids[4])
|
||||
// }
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
// want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
// badPeers := scorer.BadPeers()
|
||||
// sort.Slice(badPeers, func(i, j int) bool {
|
||||
// return badPeers[i] < badPeers[j]
|
||||
// })
|
||||
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
// }
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -112,7 +112,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
}
|
||||
|
||||
// SetPeerStatus sets chain state data for a given peer.
|
||||
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, validationError error) {
|
||||
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.StatusV2, validationError error) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
|
||||
@@ -130,14 +130,14 @@ func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, val
|
||||
// PeerStatus gets the chain state of the given remote peer.
|
||||
// This can return nil if there is no known chain state for the peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.StatusV2, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.peerStatusNoLock(pid)
|
||||
}
|
||||
|
||||
// peerStatusNoLock lock-free version of PeerStatus.
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.Status, error) {
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.StatusV2, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.ChainState == nil {
|
||||
return nil, peerdata.ErrNoPeerStatus
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent bad peer",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
@@ -48,7 +48,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer no head slot for the host node is known",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
@@ -61,7 +61,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer head is before ours",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(128)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
@@ -75,12 +75,12 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := primitives.Slot(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
// Set another peer to a higher score.
|
||||
scorer.SetPeerStatus("peer2", &pb.Status{
|
||||
scorer.SetPeerStatus("peer2", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 128,
|
||||
}, nil)
|
||||
@@ -95,7 +95,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := primitives.Slot(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
@@ -108,7 +108,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer no max known slot",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 0,
|
||||
}, nil)
|
||||
@@ -141,7 +141,7 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
}
|
||||
@@ -160,9 +160,9 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.StatusV2{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
@@ -179,12 +179,12 @@ func TestScorers_PeerStatus_PeerStatus(t *testing.T) {
|
||||
})
|
||||
status, err := peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
require.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err)
|
||||
assert.Equal(t, (*pb.Status)(nil), status)
|
||||
assert.Equal(t, (*pb.StatusV2)(nil), status)
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.Status{
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadSlot: 128,
|
||||
}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.Status{
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.StatusV2{
|
||||
HeadSlot: 128,
|
||||
}, p2ptypes.ErrInvalidEpoch)
|
||||
status, err = peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
|
||||
@@ -211,99 +211,102 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestScorers_Service_loop(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_loop(t *testing.T) {
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 5,
|
||||
DecayInterval: 50 * time.Millisecond,
|
||||
},
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
DecayInterval: 25 * time.Millisecond,
|
||||
Decay: 64,
|
||||
},
|
||||
},
|
||||
})
|
||||
s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 5,
|
||||
// DecayInterval: 50 * time.Millisecond,
|
||||
// },
|
||||
// BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
// DecayInterval: 25 * time.Millisecond,
|
||||
// Decay: 64,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
pid1 := peer.ID("peer1")
|
||||
peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
// pid1 := peer.ID("peer1")
|
||||
// peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
// for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
// s1.Increment(pid1)
|
||||
// }
|
||||
// assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
// s2.IncrementProcessedBlocks("peer1", 221)
|
||||
// assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- struct{}{}
|
||||
}()
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Error("Timed out")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// done := make(chan struct{}, 1)
|
||||
// go func() {
|
||||
// defer func() {
|
||||
// done <- struct{}{}
|
||||
// }()
|
||||
// ticker := time.NewTicker(50 * time.Millisecond)
|
||||
// defer ticker.Stop()
|
||||
// for {
|
||||
// select {
|
||||
// case <-ticker.C:
|
||||
// if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
// return
|
||||
// }
|
||||
// case <-ctx.Done():
|
||||
// t.Error("Timed out")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
|
||||
<-done
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
// <-done
|
||||
// assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
// assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
// }
|
||||
|
||||
func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// }
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
// for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
// }
|
||||
|
||||
@@ -62,7 +62,9 @@ const (
|
||||
|
||||
const (
|
||||
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
CollocationLimit = 5
|
||||
// TODO: Revert this when out of devnet.
|
||||
// CollocationLimit = 5
|
||||
CollocationLimit = 9999
|
||||
|
||||
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
|
||||
maxLimitBuffer = 150
|
||||
@@ -205,14 +207,14 @@ func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
|
||||
}
|
||||
|
||||
// SetChainState sets the chain state of the given remote peer.
|
||||
func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) {
|
||||
func (p *Status) SetChainState(pid peer.ID, chainState *pb.StatusV2) {
|
||||
p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
|
||||
}
|
||||
|
||||
// ChainState gets the chain state of the given remote peer.
|
||||
// This will error if the peer does not exist.
|
||||
// This will error if there is no known chain state for the peer.
|
||||
func (p *Status) ChainState(pid peer.ID) (*pb.Status, error) {
|
||||
func (p *Status) ChainState(pid peer.ID) (*pb.StatusV2, error) {
|
||||
return p.scorers.PeerStatusScorer().PeerStatus(pid)
|
||||
}
|
||||
|
||||
@@ -780,6 +782,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
// and is shared by at least minPeers.
|
||||
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
|
||||
@@ -2,7 +2,6 @@ package peers_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -289,7 +288,7 @@ func TestPeerChainState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedEpoch := primitives.Epoch(123)
|
||||
p.SetChainState(id, &pb.Status{FinalizedEpoch: finalizedEpoch})
|
||||
p.SetChainState(id, &pb.StatusV2{FinalizedEpoch: finalizedEpoch})
|
||||
|
||||
resChainState, err := p.ChainState(id)
|
||||
require.NoError(t, err)
|
||||
@@ -324,59 +323,60 @@ func TestPeerWithNilChainState(t *testing.T) {
|
||||
|
||||
resChainState, err := p.ChainState(id)
|
||||
require.Equal(t, peerdata.ErrNoPeerStatus, err)
|
||||
var nothing *pb.Status
|
||||
var nothing *pb.StatusV2
|
||||
require.Equal(t, resChainState, nothing)
|
||||
}
|
||||
|
||||
func TestPeerBadResponses(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerBadResponses(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
{
|
||||
_, err := id.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
// require.NoError(t, err)
|
||||
// {
|
||||
// _, err := id.MarshalBinary()
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
direction := network.DirInbound
|
||||
p.Add(new(enr.Record), id, address, direction)
|
||||
// address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
// require.NoError(t, err, "Failed to create address")
|
||||
// direction := network.DirInbound
|
||||
// p.Add(new(enr.Record), id, address, direction)
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
// resBadResponses, err := scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// }
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
@@ -495,100 +495,102 @@ func TestPeerValidTime(t *testing.T) {
|
||||
assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers")
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPrune(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// if i%7 == 0 {
|
||||
// // Peer added as disconnected.
|
||||
// _ = addPeer(t, p, peers.PeerDisconnected)
|
||||
// }
|
||||
// // Peer added to peer handler.
|
||||
// _ = addPeer(t, p, peers.PeerConnected)
|
||||
// }
|
||||
|
||||
disPeers := p.Disconnected()
|
||||
firstPID := disPeers[0]
|
||||
secondPID := disPeers[1]
|
||||
thirdPID := disPeers[2]
|
||||
// disPeers := p.Disconnected()
|
||||
// firstPID := disPeers[0]
|
||||
// secondPID := disPeers[1]
|
||||
// thirdPID := disPeers[2]
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
|
||||
// Make first peer a bad peer
|
||||
scorer.Increment(firstPID)
|
||||
scorer.Increment(firstPID)
|
||||
// // Make first peer a bad peer
|
||||
// scorer.Increment(firstPID)
|
||||
// scorer.Increment(firstPID)
|
||||
|
||||
// Add bad response for p2.
|
||||
scorer.Increment(secondPID)
|
||||
// // Add bad response for p2.
|
||||
// scorer.Increment(secondPID)
|
||||
|
||||
// Prune peers
|
||||
p.Prune()
|
||||
// // Prune peers
|
||||
// p.Prune()
|
||||
|
||||
// Bad peer is expected to still be kept in handler.
|
||||
badRes, err := scorer.Count(firstPID)
|
||||
assert.NoError(t, err, "error is supposed to be nil")
|
||||
assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
// // Bad peer is expected to still be kept in handler.
|
||||
// badRes, err := scorer.Count(firstPID)
|
||||
// assert.NoError(t, err, "error is supposed to be nil")
|
||||
// assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
|
||||
// Not so good peer is pruned away so that we can reduce the
|
||||
// total size of the handler.
|
||||
_, err = scorer.Count(secondPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
// // Not so good peer is pruned away so that we can reduce the
|
||||
// // total size of the handler.
|
||||
// _, err = scorer.Count(secondPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
|
||||
// Last peer has been removed.
|
||||
_, err = scorer.Count(thirdPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
}
|
||||
// // Last peer has been removed.
|
||||
// _, err = scorer.Count(thirdPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
// }
|
||||
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerIPTracker(t *testing.T) {
|
||||
// resetCfg := features.InitWithReset(&features.Flags{
|
||||
// EnablePeerScorer: false,
|
||||
// })
|
||||
// defer resetCfg()
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
// badIP := "211.227.218.116"
|
||||
// var badPeers []peer.ID
|
||||
// for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
// port := strconv.Itoa(3000 + i)
|
||||
// addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
// }
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
// }
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
// // Add in bad peers, so that our records are trimmed out
|
||||
// // from the peer store.
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// // Peer added to peer handler.
|
||||
// pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
// p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
@@ -616,7 +618,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
p.SetChainState(pid1, &pb.StatusV2{
|
||||
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 3,
|
||||
FinalizedRoot: mockroot3[:],
|
||||
@@ -624,7 +626,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
p.SetChainState(pid2, &pb.StatusV2{
|
||||
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 4,
|
||||
FinalizedRoot: mockroot4[:],
|
||||
@@ -632,7 +634,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
p.SetChainState(pid3, &pb.StatusV2{
|
||||
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 5,
|
||||
FinalizedRoot: mockroot5[:],
|
||||
@@ -640,7 +642,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 4
|
||||
pid4 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid4, &pb.Status{
|
||||
p.SetChainState(pid4, &pb.StatusV2{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
@@ -648,7 +650,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 5
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid5, &pb.Status{
|
||||
p.SetChainState(pid5, &pb.StatusV2{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
@@ -1012,7 +1014,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
for _, peerConfig := range tt.peers {
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.StatusV2{
|
||||
FinalizedEpoch: peerConfig.finalizedEpoch,
|
||||
HeadSlot: peerConfig.headSlot,
|
||||
})
|
||||
@@ -1039,7 +1041,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
|
||||
for i := 0; i <= maxPeers+100; i++ {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
|
||||
FinalizedEpoch: 10,
|
||||
})
|
||||
}
|
||||
@@ -1062,7 +1064,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
|
||||
for i, headSlot := range peerSlots {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
|
||||
HeadSlot: headSlot,
|
||||
})
|
||||
}
|
||||
@@ -1085,17 +1087,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
|
||||
})
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
p.SetChainState(pid1, &pb.StatusV2{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
p.SetChainState(pid2, &pb.StatusV2{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
p.SetChainState(pid3, &pb.StatusV2{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
|
||||
|
||||
@@ -108,6 +108,8 @@ const (
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
|
||||
RPCStatusTopicV2 = protocolPrefix + StatusMessageName + SchemaVersionV2
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
|
||||
// RPCBlocksByRootTopicV2 defines the v2 topic for the blocks by root rpc method.
|
||||
@@ -130,6 +132,7 @@ var (
|
||||
RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Status Message
|
||||
RPCStatusTopicV1: new(pb.Status),
|
||||
RPCStatusTopicV2: new(pb.StatusV2),
|
||||
|
||||
// RPC Goodbye Message
|
||||
RPCGoodByeTopicV1: new(primitives.SSZUint64),
|
||||
@@ -166,7 +169,7 @@ var (
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -201,6 +204,7 @@ var (
|
||||
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
fuluMapping = map[string]string{
|
||||
StatusMessageName: SchemaVersionV2,
|
||||
MetadataMessageName: SchemaVersionV3,
|
||||
}
|
||||
|
||||
|
||||
@@ -141,6 +141,11 @@ func TestTopicFromMessage_CorrectType(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/beacon_blocks_by_range/2", topic)
|
||||
|
||||
// Modified in fulu fork.
|
||||
topic, err = TopicFromMessage(StatusMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/status/2", topic)
|
||||
|
||||
// Modified both in altair and fulu fork.
|
||||
topic, err = TopicFromMessage(MetadataMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
prysmnetwork "github.com/OffchainLabs/prysm/v6/network"
|
||||
@@ -62,33 +63,42 @@ var (
|
||||
)
|
||||
|
||||
// Service for managing peer to peer (p2p) networking.
|
||||
type Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
}
|
||||
type (
|
||||
Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
}
|
||||
|
||||
custodyInfo struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
groupCount uint64
|
||||
}
|
||||
)
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
// connections are made until the Start function is called during the service registry startup.
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -114,6 +112,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
s, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.custodyInfo = &custodyInfo{}
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
s.Start()
|
||||
@@ -211,6 +210,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: gvr[:],
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
@@ -252,6 +252,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: gvr[:],
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
@@ -281,6 +282,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.custodyInfo = &custodyInfo{}
|
||||
|
||||
go s.Start()
|
||||
|
||||
@@ -383,48 +385,49 @@ func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.C
|
||||
return fd
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
peers *peers.Status
|
||||
info peer.AddrInfo
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "bad peer",
|
||||
peers: func() *peers.Status {
|
||||
ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
for i := 0; i < 10; i++ {
|
||||
ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
}
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _, _ := createHost(t, 34567)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
ctx := t.Context()
|
||||
s := &Service{
|
||||
host: h,
|
||||
peers: tt.peers,
|
||||
}
|
||||
err := s.connectWithPeer(ctx, tt.info)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// TODO: Uncomment out of devnet.
|
||||
// func TestService_connectWithPeer(t *testing.T) {
|
||||
// params.SetupTestConfigCleanup(t)
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// peers *peers.Status
|
||||
// info peer.AddrInfo
|
||||
// wantErr string
|
||||
// }{
|
||||
// {
|
||||
// name: "bad peer",
|
||||
// peers: func() *peers.Status {
|
||||
// ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// for i := 0; i < 10; i++ {
|
||||
// ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
// }
|
||||
// return ps
|
||||
// }(),
|
||||
// info: peer.AddrInfo{ID: "bad"},
|
||||
// wantErr: "bad peer",
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// h, _, _ := createHost(t, 34567)
|
||||
// defer func() {
|
||||
// if err := h.Close(); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// }()
|
||||
// ctx := t.Context()
|
||||
// s := &Service{
|
||||
// host: h,
|
||||
// peers: tt.peers,
|
||||
// }
|
||||
// err := s.connectWithPeer(ctx, tt.info)
|
||||
// if len(tt.wantErr) > 0 {
|
||||
// require.ErrorContains(t, tt.wantErr, err)
|
||||
// } else {
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -390,11 +390,23 @@ func (s *Service) updateSubnetRecordWithMetadata(bitV bitfield.Bitvector64) {
|
||||
// with a new value for a bitfield of subnets tracked. It also record's
|
||||
// the sync committee subnet in the enr. It also updates the node's
|
||||
// metadata by increasing the sequence number and the subnets tracked by the node.
|
||||
func (s *Service) updateSubnetRecordWithMetadataV2(bitVAtt bitfield.Bitvector64, bitVSync bitfield.Bitvector4) {
|
||||
func (s *Service) updateSubnetRecordWithMetadataV2(
|
||||
bitVAtt bitfield.Bitvector64,
|
||||
bitVSync bitfield.Bitvector4,
|
||||
custodyGroupCount uint64,
|
||||
) {
|
||||
entry := enr.WithEntry(attSubnetEnrKey, &bitVAtt)
|
||||
subEntry := enr.WithEntry(syncCommsSubnetEnrKey, &bitVSync)
|
||||
s.dv5Listener.LocalNode().Set(entry)
|
||||
s.dv5Listener.LocalNode().Set(subEntry)
|
||||
|
||||
localNode := s.dv5Listener.LocalNode()
|
||||
localNode.Set(entry)
|
||||
localNode.Set(subEntry)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCountEntry := enr.WithEntry(custodyGroupCountEnrKey, custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
|
||||
s.metaData = wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: s.metaData.SequenceNumber() + 1,
|
||||
Attnets: bitVAtt,
|
||||
@@ -421,10 +433,8 @@ func (s *Service) updateSubnetRecordWithMetadataV3(
|
||||
localNode.Set(syncSubnetsEntry)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
|
||||
newSeqNumber := s.metaData.SequenceNumber() + 1
|
||||
|
||||
s.metaData = wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: newSeqNumber,
|
||||
SeqNumber: s.metaData.SequenceNumber() + 1,
|
||||
Attnets: bitVAtt,
|
||||
Syncnets: bitVSync,
|
||||
CustodyGroupCount: custodyGroupCount,
|
||||
|
||||
@@ -74,6 +74,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
bootNodeForkDigest, err := bootNodeService.currentForkDigest()
|
||||
@@ -108,6 +109,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
nodeForkDigest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -157,6 +159,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
service.Start()
|
||||
defer func() {
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
@@ -24,6 +25,7 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -196,6 +197,22 @@ func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot -- fake.
|
||||
func (*FakeP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount -- fake.
|
||||
func (*FakeP2P) CustodyGroupCount() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo -- fake.
|
||||
func (s *FakeP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer -- fake.
|
||||
func (*FakeP2P) CustodyGroupCountFromPeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
}
|
||||
m.peers.Add(createENR(), id0, ma0, network.DirInbound)
|
||||
m.peers.SetConnectionState(id0, peers.Connected)
|
||||
m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: 10})
|
||||
m.peers.SetChainState(id0, &pb.StatusV2{FinalizedEpoch: 10})
|
||||
id1, err := peer.Decode(MockRawPeerId1)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
@@ -76,7 +76,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
}
|
||||
m.peers.Add(createENR(), id1, ma1, network.DirOutbound)
|
||||
m.peers.SetConnectionState(id1, peers.Connected)
|
||||
m.peers.SetChainState(id1, &pb.Status{FinalizedEpoch: 11})
|
||||
m.peers.SetChainState(id1, &pb.StatusV2{FinalizedEpoch: 11})
|
||||
}
|
||||
return m.peers
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -48,16 +50,19 @@ const (
|
||||
|
||||
// TestP2P represents a p2p implementation that can be used for testing.
|
||||
type TestP2P struct {
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled atomic.Bool
|
||||
DelaySend bool
|
||||
Digest [4]byte
|
||||
peers *peers.Status
|
||||
LocalMetadata metadata.Metadata
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
BroadcastCalled atomic.Bool
|
||||
DelaySend bool
|
||||
Digest [4]byte
|
||||
peers *peers.Status
|
||||
LocalMetadata metadata.Metadata
|
||||
custodyInfoMut sync.RWMutex // protects custodyGroupCount and earliestAvailableSlot
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -461,6 +466,34 @@ func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.Disc
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// EarliestAvailableSlot .
|
||||
func (s *TestP2P) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount .
|
||||
func (s *TestP2P) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoMut.RLock()
|
||||
defer s.custodyInfoMut.RUnlock()
|
||||
|
||||
return s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo .
|
||||
func (s *TestP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error) {
|
||||
s.custodyInfoMut.Lock()
|
||||
defer s.custodyInfoMut.Unlock()
|
||||
|
||||
s.earliestAvailableSlot = earliestAvailableSlot
|
||||
s.custodyGroupCount = custodyGroupCount
|
||||
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer .
|
||||
func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of groups.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
@@ -206,8 +206,8 @@ func (s BlobSidecarsByRootReq) Swap(i, j int) {
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s BlobSidecarsByRootReq) Len() int {
|
||||
return len(s)
|
||||
func (s *BlobSidecarsByRootReq) Len() int {
|
||||
return len(*s)
|
||||
}
|
||||
|
||||
// ====================================
|
||||
|
||||
@@ -1103,9 +1103,9 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(msgsInPool))
|
||||
assert.Equal(t, primitives.Slot(1), msgsInPool[0].Slot)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(msgsInPool[0].BlockRoot))
|
||||
assert.Equal(t, "0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c", hexutil.Encode(msgsInPool[0].BlockRoot))
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), msgsInPool[0].ValidatorIndex)
|
||||
assert.Equal(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505", hexutil.Encode(msgsInPool[0].Signature))
|
||||
assert.Equal(t, "0xb591bd4ca7d745b6e027879645d7c014fecb8c58631af070f7607acc0c1c948a5102a33267f0e4ba41a85b254b07df91185274375b2e6436e37e81d2fd46cb3751f5a6c86efb7499c1796c0c17e122a54ac067bb0f5ff41f3241659cceb0c21c", hexutil.Encode(msgsInPool[0].Signature))
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -2497,23 +2497,23 @@ var (
|
||||
singleSyncCommitteeMsg = `[
|
||||
{
|
||||
"slot": "1",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"beacon_block_root": "0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c",
|
||||
"validator_index": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
"signature": "0xb591bd4ca7d745b6e027879645d7c014fecb8c58631af070f7607acc0c1c948a5102a33267f0e4ba41a85b254b07df91185274375b2e6436e37e81d2fd46cb3751f5a6c86efb7499c1796c0c17e122a54ac067bb0f5ff41f3241659cceb0c21c"
|
||||
}
|
||||
]`
|
||||
multipleSyncCommitteeMsg = `[
|
||||
{
|
||||
"slot": "1",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"beacon_block_root": "0xbacd20f09da907734434f052bd4c9503aa16bab1960e89ea20610d08d064481c",
|
||||
"validator_index": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
"signature": "0xb591bd4ca7d745b6e027879645d7c014fecb8c58631af070f7607acc0c1c948a5102a33267f0e4ba41a85b254b07df91185274375b2e6436e37e81d2fd46cb3751f5a6c86efb7499c1796c0c17e122a54ac067bb0f5ff41f3241659cceb0c21c"
|
||||
},
|
||||
{
|
||||
"slot": "2",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"beacon_block_root": "0x2757f6fd8590925cd000a86a3e543f98a93eae23781783a33e34504729a8ad0c",
|
||||
"validator_index": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
"signature": "0x99dfe11b6c8b306d2c72eb891926d37922d226ea8e1e7484d6c30fab746494f192b0daa3e40c13f1e335b35238f3362c113455a329b1fab0bc500bc47f643786f49e151d5b5052afb51af57ba5aa34a6051dc90ee4de83a26eb54a895061d89a"
|
||||
}
|
||||
]`
|
||||
// signature is invalid
|
||||
@@ -2523,6 +2523,18 @@ var (
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "1",
|
||||
"signature": "foo"
|
||||
},
|
||||
{
|
||||
"slot": "1121",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
},
|
||||
{
|
||||
"slot": "1121",
|
||||
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"validator_index": "2",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
]`
|
||||
// signatures are invalid
|
||||
|
||||
@@ -33,9 +33,11 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -11,9 +11,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
dbtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2ptesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -53,7 +55,7 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(db)
|
||||
lcStore := lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
@@ -97,7 +99,7 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
lcStore := lightclient.NewLightClientStore(db)
|
||||
lcStore := lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed))
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
@@ -141,7 +143,7 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
|
||||
t.Run("no bootstrap found", func(t *testing.T) {
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t)),
|
||||
LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com/", nil)
|
||||
request.SetPathValue("block_root", hexutil.Encode([]byte{0x00, 0x01, 0x02}))
|
||||
@@ -184,7 +186,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updatePeriod := startPeriod
|
||||
@@ -325,7 +327,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 2)
|
||||
@@ -445,7 +447,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
@@ -492,7 +494,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
@@ -536,7 +538,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
t.Run("start period before altair", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
startPeriod := 0
|
||||
url := fmt.Sprintf("http://foo.com/?count=128&start_period=%d", startPeriod)
|
||||
@@ -559,7 +561,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
t.Run("missing update in the middle", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
@@ -603,7 +605,7 @@ func TestLightClientHandler_GetLightClientByRange(t *testing.T) {
|
||||
t.Run("missing update at the beginning", func(t *testing.T) {
|
||||
db := dbtesting.SetupDB(t)
|
||||
s := &Server{
|
||||
LCStore: lightclient.NewLightClientStore(db),
|
||||
LCStore: lightclient.NewLightClientStore(db, &p2ptesting.FakeP2P{}, new(event.Feed)),
|
||||
}
|
||||
|
||||
updates := make([]interfaces.LightClientUpdate, 3)
|
||||
@@ -663,8 +665,8 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s.LCStore.SetLastFinalityUpdate(update)
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
s.LCStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -688,8 +690,8 @@ func TestLightClientHandler_GetLightClientFinalityUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s.LCStore.SetLastFinalityUpdate(update)
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
s.LCStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
@@ -727,7 +729,7 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
t.Run("no update", func(t *testing.T) {
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -743,8 +745,8 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s.LCStore.SetLastOptimisticUpdate(update)
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
s.LCStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -767,8 +769,8 @@ func TestLightClientHandler_GetLightClientOptimisticUpdate(t *testing.T) {
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{LCStore: &lightclient.Store{}}
|
||||
s.LCStore.SetLastOptimisticUpdate(update)
|
||||
s := &Server{LCStore: lightclient.NewLightClientStore(dbtesting.SetupDB(t), &p2ptesting.FakeP2P{}, new(event.Feed))}
|
||||
s.LCStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
request := httptest.NewRequest("GET", "http://foo.com", nil)
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
|
||||
@@ -109,6 +109,8 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
peerInfo.MetadataV0 = metadata.MetadataObjV0()
|
||||
case metadata.MetadataObjV1() != nil:
|
||||
peerInfo.MetadataV1 = metadata.MetadataObjV1()
|
||||
case metadata.MetadataObjV2() != nil:
|
||||
peerInfo.MetadataV2 = metadata.MetadataObjV2()
|
||||
}
|
||||
}
|
||||
addresses := peerStore.Addrs(pid)
|
||||
@@ -127,7 +129,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
if err != nil {
|
||||
// In the event chain state is non existent, we
|
||||
// initialize with the zero value.
|
||||
pStatus = new(ethpb.Status)
|
||||
pStatus = new(ethpb.StatusV2)
|
||||
}
|
||||
lastUpdated, err := peers.ChainStateLastUpdated(pid)
|
||||
if err != nil {
|
||||
@@ -150,6 +152,16 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
BehaviourPenalty: float32(bPenalty),
|
||||
ValidationError: errorToString(peers.Scorers().ValidationError(pid)),
|
||||
}
|
||||
|
||||
// Convert statusV2 into status
|
||||
peerStatus := ðpb.Status{
|
||||
ForkDigest: pStatus.ForkDigest,
|
||||
FinalizedRoot: pStatus.FinalizedRoot,
|
||||
FinalizedEpoch: pStatus.FinalizedEpoch,
|
||||
HeadRoot: pStatus.HeadRoot,
|
||||
HeadSlot: pStatus.HeadSlot,
|
||||
}
|
||||
|
||||
return ðpb.DebugPeerResponse{
|
||||
ListeningAddresses: stringAddrs,
|
||||
Direction: pbDirection,
|
||||
@@ -157,7 +169,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
PeerId: pid.String(),
|
||||
Enr: enr,
|
||||
PeerInfo: peerInfo,
|
||||
PeerStatus: pStatus,
|
||||
PeerStatus: peerStatus,
|
||||
LastUpdated: unixTime,
|
||||
ScoreInfo: scoreInfo,
|
||||
}, nil
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# gazelle:ignore
|
||||
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
@@ -37,6 +39,7 @@ go_library(
|
||||
"//api/client/builder:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
@@ -47,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -180,7 +184,6 @@ common_deps = [
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
]
|
||||
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
timeout = "moderate",
|
||||
|
||||
@@ -29,12 +29,19 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r1, err := eb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
result, err := vs.constructGenericBeaconBlock(b, nil, primitives.ZeroWei())
|
||||
bundle := &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{{1, 2, 3}},
|
||||
Proofs: [][]byte{{4, 5, 6}},
|
||||
Blobs: [][]byte{{7, 8, 9}},
|
||||
}
|
||||
result, err := vs.constructGenericBeaconBlock(b, bundle, primitives.ZeroWei())
|
||||
require.NoError(t, err)
|
||||
r2, err := result.GetFulu().Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, r1, r2)
|
||||
require.Equal(t, result.IsBlinded, false)
|
||||
require.DeepEqual(t, bundle.Blobs, result.GetFulu().GetBlobs())
|
||||
require.DeepEqual(t, bundle.Proofs, result.GetFulu().GetKzgProofs())
|
||||
})
|
||||
|
||||
// Test for Electra version
|
||||
|
||||
@@ -15,9 +15,12 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -58,28 +61,31 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert slot to time")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
}).Info("Begin building block")
|
||||
|
||||
log := log.WithField("slot", req.Slot)
|
||||
log.WithField("sinceSlotStartTime", time.Since(t)).Info("Begin building block")
|
||||
|
||||
// A syncing validator should not produce a block.
|
||||
if vs.SyncChecker.Syncing() {
|
||||
log.Error("Fail to build block: node is syncing")
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
log.WithError(err).Error("Fail to build block: node is optimistic")
|
||||
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
head, parentRoot, err := vs.getParentState(ctx, req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get parent state")
|
||||
return nil, err
|
||||
}
|
||||
sBlk, err := getEmptyBlock(req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get empty block")
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
|
||||
}
|
||||
// Set slot, graffiti, randao reveal, and parent root.
|
||||
@@ -91,6 +97,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
// Set proposer index.
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, head)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not calculate proposer index")
|
||||
return nil, fmt.Errorf("could not calculate proposer index %w", err)
|
||||
}
|
||||
sBlk.SetProposerIndex(idx)
|
||||
@@ -101,7 +108,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
|
||||
resp, err := vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor)
|
||||
log := log.WithFields(logrus.Fields{
|
||||
log = log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
"validator": sBlk.Block().ProposerIndex(),
|
||||
@@ -274,7 +281,13 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// ProposeBeaconBlock handles the proposal of beacon blocks.
|
||||
// TODO: Add tests
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -287,11 +300,10 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
|
||||
}
|
||||
|
||||
var sidecars []*ethpb.BlobSidecar
|
||||
if block.IsBlinded() {
|
||||
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
block, blobSidecars, dataColumnSideCars, err = vs.handleBlindedBlock(ctx, block)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
|
||||
blobSidecars, dataColumnSideCars, err = vs.handleUnblindedBlock(block, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
@@ -302,9 +314,11 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
|
||||
}
|
||||
|
||||
slot := block.Block().Slot()
|
||||
epoch := slots.ToEpoch(slot)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -315,8 +329,14 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
if epoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root, slot); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive data columns: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@@ -328,46 +348,80 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
return nil, nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
}
|
||||
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return nil, nil, errors.New("unconfigured block builder")
|
||||
return nil, nil, nil, errors.New("unconfigured block builder")
|
||||
}
|
||||
|
||||
copiedBlock, err := block.Copy()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, errors.Wrap(err, "block copy")
|
||||
}
|
||||
|
||||
payload, bundle, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, block)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "submit blinded block failed")
|
||||
return nil, nil, nil, errors.Wrap(err, "submit blinded block")
|
||||
}
|
||||
|
||||
if err := copiedBlock.Unblind(payload); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unblind failed")
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind")
|
||||
}
|
||||
|
||||
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
if blockEpoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, bundle.GetBlobs(), bundle.GetProofs())
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return copiedBlock, nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
|
||||
}
|
||||
|
||||
return copiedBlock, sidecars, nil
|
||||
return copiedBlock, blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
if blockEpoch >= params.BeaconConfig().FuluForkEpoch {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -383,7 +437,7 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
}
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
|
||||
eg, eCtx := errgroup.WithContext(ctx)
|
||||
for i, sc := range sidecars {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
@@ -412,6 +466,69 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
sidecars []*ethpb.DataColumnSidecar,
|
||||
root [fieldparams.RootLength]byte,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
dataColumnsWithholdCount := features.Get().DataColumnsWithholdCount
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, sd := range sidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sd, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
sidecar := sd
|
||||
eg.Go(func() error {
|
||||
if sidecar.Index < dataColumnsWithholdCount {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"index": sidecar.Index,
|
||||
}).Warning("Withholding data column")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumn(root, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client/builder"
|
||||
@@ -19,7 +18,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -220,16 +218,10 @@ func (vs *Server) getPayloadHeaderFromBuilder(
|
||||
if signedBid == nil || signedBid.IsNil() {
|
||||
return nil, errors.New("builder returned nil bid")
|
||||
}
|
||||
fork, err := forks.Fork(slots.ToEpoch(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get fork information")
|
||||
}
|
||||
forkName, ok := params.BeaconConfig().ForkVersionNames[bytesutil.ToBytes4(fork.CurrentVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("unable to find current fork in schedule")
|
||||
}
|
||||
if !strings.EqualFold(version.String(signedBid.Version()), forkName) {
|
||||
return nil, fmt.Errorf("builder bid response version: %d is different from head block version: %d for epoch %d", signedBid.Version(), b.Version(), slots.ToEpoch(slot))
|
||||
bidVersion := signedBid.Version()
|
||||
headBlockVersion := b.Version()
|
||||
if !isVersionCompatible(bidVersion, headBlockVersion) {
|
||||
return nil, fmt.Errorf("builder bid response version: %d is not compatible with head block version: %d for epoch %d", bidVersion, headBlockVersion, slots.ToEpoch(slot))
|
||||
}
|
||||
|
||||
bid, err := signedBid.Message()
|
||||
@@ -466,3 +458,19 @@ func expectedGasLimit(parentGasLimit, proposerGasLimit uint64) uint64 {
|
||||
}
|
||||
return proposerGasLimit
|
||||
}
|
||||
|
||||
// isVersionCompatible checks if a builder bid version is compatible with the head block version.
|
||||
func isVersionCompatible(bidVersion, headBlockVersion int) bool {
|
||||
// Exact version match is always compatible
|
||||
if bidVersion == headBlockVersion {
|
||||
return true
|
||||
}
|
||||
|
||||
// Allow Electra bids for Fulu blocks - they have compatible payload formats
|
||||
if bidVersion == version.Electra && headBlockVersion == version.Fulu {
|
||||
return true
|
||||
}
|
||||
|
||||
// For all other cases, require exact version match
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
v1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -156,7 +157,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
HasConfigured: true,
|
||||
Cfg: &builderTest.Config{BeaconDB: beaconDB},
|
||||
}
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
chain := &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New(), Genesis: time.Now(), Block: wb}
|
||||
vs.ForkchoiceFetcher = chain
|
||||
@@ -973,7 +974,7 @@ func TestServer_getPayloadHeader(t *testing.T) {
|
||||
return wb
|
||||
}(),
|
||||
},
|
||||
err: "is different from head block version",
|
||||
err: "builder bid response version: 3 is not compatible with head block version: 2 for epoch 1",
|
||||
},
|
||||
{
|
||||
name: "different bid version during hard fork",
|
||||
@@ -982,7 +983,7 @@ func TestServer_getPayloadHeader(t *testing.T) {
|
||||
},
|
||||
fetcher: &blockchainTest.ChainService{
|
||||
Block: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
|
||||
require.NoError(t, err)
|
||||
wb.SetSlot(primitives.Slot(fakeCapellaEpoch) * params.BeaconConfig().SlotsPerEpoch)
|
||||
return wb
|
||||
@@ -1005,6 +1006,86 @@ func TestServer_getPayloadHeader(t *testing.T) {
|
||||
},
|
||||
err: "incorrect header gas limit 30000000 != 31000000",
|
||||
},
|
||||
{
|
||||
name: "electra bid with fulu head block - compatible",
|
||||
mock: func() *builderTest.MockBuilderService {
|
||||
// Create Electra bid
|
||||
requests := &v1.ExecutionRequests{
|
||||
Deposits: []*v1.DepositRequest{
|
||||
{
|
||||
Pubkey: bytesutil.PadTo([]byte{byte('a')}, fieldparams.BLSPubkeyLength),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{byte('b')}, fieldparams.RootLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
Signature: bytesutil.PadTo([]byte{byte('c')}, fieldparams.BLSSignatureLength),
|
||||
Index: 0,
|
||||
},
|
||||
},
|
||||
Withdrawals: []*v1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('d')}, common.AddressLength),
|
||||
ValidatorPubkey: bytesutil.PadTo([]byte{byte('e')}, fieldparams.BLSPubkeyLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
},
|
||||
Consolidations: []*v1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('f')}, common.AddressLength),
|
||||
SourcePubkey: bytesutil.PadTo([]byte{byte('g')}, fieldparams.BLSPubkeyLength),
|
||||
TargetPubkey: bytesutil.PadTo([]byte{byte('h')}, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
electraBid := ðpb.BuilderBidElectra{
|
||||
Header: &v1.ExecutionPayloadHeaderDeneb{
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: bytesutil.PadTo([]byte{1}, fieldparams.RootLength),
|
||||
ParentHash: params.BeaconConfig().ZeroHash[:],
|
||||
Timestamp: uint64(ti.Unix()),
|
||||
BlockNumber: 2,
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
BlobGasUsed: 123,
|
||||
ExcessBlobGas: 456,
|
||||
GasLimit: gasLimit,
|
||||
},
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
|
||||
BlobKzgCommitments: [][]byte{bytesutil.PadTo([]byte{2}, fieldparams.BLSPubkeyLength)},
|
||||
ExecutionRequests: requests,
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(electraBid, domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
sBidElectra := ðpb.SignedBuilderBidElectra{
|
||||
Message: electraBid,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
|
||||
return &builderTest.MockBuilderService{
|
||||
BidElectra: sBidElectra,
|
||||
}
|
||||
}(),
|
||||
fetcher: &blockchainTest.ChainService{
|
||||
Block: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
// Create Fulu head block
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockFulu())
|
||||
require.NoError(t, err)
|
||||
wb.SetSlot(primitives.Slot(params.BeaconConfig().BellatrixForkEpoch) * params.BeaconConfig().SlotsPerEpoch)
|
||||
return wb
|
||||
}(),
|
||||
},
|
||||
// Should succeed because Electra bids are compatible with Fulu head blocks
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
@@ -1222,3 +1303,107 @@ func Test_expectedGasLimit(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsVersionCompatible(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
bidVersion int
|
||||
headBlockVersion int
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Exact version match - Bellatrix",
|
||||
bidVersion: version.Bellatrix,
|
||||
headBlockVersion: version.Bellatrix,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Exact version match - Capella",
|
||||
bidVersion: version.Capella,
|
||||
headBlockVersion: version.Capella,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Exact version match - Deneb",
|
||||
bidVersion: version.Deneb,
|
||||
headBlockVersion: version.Deneb,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Exact version match - Electra",
|
||||
bidVersion: version.Electra,
|
||||
headBlockVersion: version.Electra,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Exact version match - Fulu",
|
||||
bidVersion: version.Fulu,
|
||||
headBlockVersion: version.Fulu,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Electra bid with Fulu head block - Compatible",
|
||||
bidVersion: version.Electra,
|
||||
headBlockVersion: version.Fulu,
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "Fulu bid with Electra head block - Not compatible",
|
||||
bidVersion: version.Fulu,
|
||||
headBlockVersion: version.Electra,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Deneb bid with Electra head block - Not compatible",
|
||||
bidVersion: version.Deneb,
|
||||
headBlockVersion: version.Electra,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Electra bid with Deneb head block - Not compatible",
|
||||
bidVersion: version.Electra,
|
||||
headBlockVersion: version.Deneb,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Capella bid with Deneb head block - Not compatible",
|
||||
bidVersion: version.Capella,
|
||||
headBlockVersion: version.Deneb,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Bellatrix bid with Capella head block - Not compatible",
|
||||
bidVersion: version.Bellatrix,
|
||||
headBlockVersion: version.Capella,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Phase0 bid with Altair head block - Not compatible",
|
||||
bidVersion: version.Phase0,
|
||||
headBlockVersion: version.Altair,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Deneb bid with Fulu head block - Not compatible",
|
||||
bidVersion: version.Deneb,
|
||||
headBlockVersion: version.Fulu,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "Capella bid with Fulu head block - Not compatible",
|
||||
bidVersion: version.Capella,
|
||||
headBlockVersion: version.Fulu,
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := isVersionCompatible(tt.bidVersion, tt.headBlockVersion)
|
||||
if got != tt.want {
|
||||
t.Errorf("isVersionCompatible(%d, %d) = %v, want %v", tt.bidVersion, tt.headBlockVersion, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,6 +67,7 @@ type Server struct {
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestUnblinder_UnblindBlobSidecars_InvalidBundle(t *testing.T) {
|
||||
func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
|
||||
// Test that the function accepts BlobsBundler interface
|
||||
// This test focuses on the interface change rather than full integration
|
||||
|
||||
|
||||
t.Run("Interface compatibility with BlobsBundle", func(t *testing.T) {
|
||||
// Create a simple pre-Deneb block that will return nil (no processing needed)
|
||||
wBlock, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{
|
||||
@@ -87,7 +87,7 @@ func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
|
||||
t.Run("Function signature accepts BlobsBundler interface", func(t *testing.T) {
|
||||
// This test verifies that the function signature has been updated to accept BlobsBundler
|
||||
// We test this by verifying the code compiles with both types
|
||||
|
||||
|
||||
// Create a simple pre-Deneb block for the interface test
|
||||
wBlock, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{
|
||||
Block: ðpb.BeaconBlockCapella{
|
||||
@@ -106,7 +106,7 @@ func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
|
||||
_, err = unblindBlobsSidecars(wBlock, regularBundle)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify function accepts BlobsBundleV2 through the interface
|
||||
// Verify function accepts BlobsBundleV2 through the interface
|
||||
var bundleV2 enginev1.BlobsBundler = &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{make([]byte, 48)},
|
||||
Proofs: [][]byte{make([]byte, 48)},
|
||||
|
||||
@@ -89,6 +89,7 @@ type Config struct {
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
@@ -120,6 +121,7 @@ type Config struct {
|
||||
Router *http.ServeMux
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
LCStore *lightClient.Store
|
||||
@@ -196,6 +198,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlobStorage: s.cfg.BlobStorage,
|
||||
DataColumnStorage: s.cfg.DataColumnStorage,
|
||||
}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
|
||||
coreService := &core.Service{
|
||||
@@ -236,6 +239,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
P2P: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
BlobReceiver: s.cfg.BlobReceiver,
|
||||
DataColumnReceiver: s.cfg.DataColumnReceiver,
|
||||
MockEth1Votes: s.cfg.MockEth1Votes,
|
||||
Eth1BlockFetcher: s.cfg.ExecutionChainService,
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
|
||||
@@ -259,7 +259,7 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
|
||||
defer span.End()
|
||||
|
||||
if s.isFinalizedRoot(blockRoot) {
|
||||
finalizedState := s.finalizedState()
|
||||
finalizedState := s.FinalizedState()
|
||||
if finalizedState != nil {
|
||||
return finalizedState, nil
|
||||
}
|
||||
@@ -297,7 +297,7 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
|
||||
|
||||
// Does the state exist in finalized info cache.
|
||||
if s.isFinalizedRoot(parentRoot) {
|
||||
return s.finalizedState(), nil
|
||||
return s.FinalizedState(), nil
|
||||
}
|
||||
|
||||
// Does the state exist in epoch boundary cache.
|
||||
|
||||
@@ -196,7 +196,7 @@ func (s *State) isFinalizedRoot(r [32]byte) bool {
|
||||
}
|
||||
|
||||
// Returns the cached and copied finalized state.
|
||||
func (s *State) finalizedState() state.BeaconState {
|
||||
func (s *State) FinalizedState() state.BeaconState {
|
||||
s.finalizedInfo.lock.RLock()
|
||||
defer s.finalizedInfo.lock.RUnlock()
|
||||
return s.finalizedInfo.state.Copy()
|
||||
|
||||
@@ -32,5 +32,5 @@ func TestResume(t *testing.T) {
|
||||
require.DeepSSZEqual(t, beaconState.ToProtoUnsafe(), resumeState.ToProtoUnsafe())
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch, service.finalizedInfo.slot, "Did not get watned slot")
|
||||
assert.Equal(t, service.finalizedInfo.root, root, "Did not get wanted root")
|
||||
assert.NotNil(t, service.finalizedState(), "Wanted a non nil finalized state")
|
||||
assert.NotNil(t, service.FinalizedState(), "Wanted a non nil finalized state")
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ go_library(
|
||||
"block_batcher.go",
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"custody.go",
|
||||
"data_columns.go",
|
||||
"data_columns_reconstruct.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
@@ -136,6 +138,7 @@ go_library(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
@@ -159,14 +162,16 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"batch_verifier_test.go",
|
||||
"blobs_test.go",
|
||||
"block_batcher_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"custody_test.go",
|
||||
"data_columns_reconstruct_test.go",
|
||||
"data_columns_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
@@ -191,6 +196,7 @@ go_test(
|
||||
"slot_aware_cache_test.go",
|
||||
"subscriber_beacon_aggregate_proof_test.go",
|
||||
"subscriber_beacon_blocks_test.go",
|
||||
"subscriber_data_column_sidecar_trigger_test.go",
|
||||
"subscriber_test.go",
|
||||
"subscription_topic_handler_test.go",
|
||||
"sync_fuzz_test.go",
|
||||
@@ -212,6 +218,7 @@ go_test(
|
||||
shard_count = 4,
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
@@ -259,6 +266,7 @@ go_test(
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/ecdsa:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/equality:go_default_library",
|
||||
@@ -273,13 +281,17 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
|
||||
@@ -32,7 +32,7 @@ func (w *p2pWorker) run(ctx context.Context) {
|
||||
case b := <-w.todo:
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
|
||||
if b.state == batchBlobSync {
|
||||
w.done <- w.handleBlobs(ctx, b)
|
||||
w.done <- w.handleSidecars(ctx, b)
|
||||
} else {
|
||||
w.done <- w.handleBlocks(ctx, b)
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
|
||||
return b.withResults(vb, bs)
|
||||
}
|
||||
|
||||
func (w *p2pWorker) handleBlobs(ctx context.Context, b batch) batch {
|
||||
func (w *p2pWorker) handleSidecars(ctx context.Context, b batch) batch {
|
||||
b.blobPid = b.busy
|
||||
start := time.Now()
|
||||
// we don't need to use the response for anything other than metrics, because blobResponseValidation
|
||||
|
||||
@@ -180,7 +180,7 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
chain, clock := defaultMockChain(t)
|
||||
chain, clock := defaultMockChain(t, 0)
|
||||
if c.chain == nil {
|
||||
c.chain = chain
|
||||
}
|
||||
@@ -278,7 +278,7 @@ func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
func defaultMockChain(t *testing.T, currentSlot uint64) (*mock.ChainService, *startup.Clock) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
df, err := forks.Fork(de)
|
||||
require.NoError(t, err)
|
||||
@@ -289,8 +289,14 @@ func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
require.NoError(t, err)
|
||||
now := time.Now()
|
||||
genOffset := types.Slot(params.BeaconConfig().SecondsPerSlot) * cs
|
||||
genesis := now.Add(-1 * time.Second * time.Duration(int64(genOffset)))
|
||||
clock := startup.NewClock(genesis, [32]byte{})
|
||||
genesisTime := now.Add(-1 * time.Second * time.Duration(int64(genOffset)))
|
||||
|
||||
clock := startup.NewClock(genesisTime, [32]byte{}, startup.WithNower(
|
||||
func() time.Time {
|
||||
return genesisTime.Add(time.Duration(currentSlot*params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
},
|
||||
))
|
||||
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: fe},
|
||||
Fork: df,
|
||||
|
||||
@@ -78,9 +78,10 @@ func (bb *blockRangeBatcher) next(ctx context.Context, stream libp2pcore.Stream)
|
||||
if !more {
|
||||
return blockBatch{}, false
|
||||
}
|
||||
if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
|
||||
return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
|
||||
}
|
||||
// TODO: Uncomment out of devnet.
|
||||
// if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
|
||||
// return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
|
||||
// }
|
||||
|
||||
// Wait for the ticker before doing anything expensive, unless this is the first batch.
|
||||
if bb.ticker != nil && bb.current != nil {
|
||||
|
||||
130
beacon-chain/sync/custody.go
Normal file
130
beacon-chain/sync/custody.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var nilFinalizedStateError = errors.New("finalized state is nil")
|
||||
|
||||
func (s *Service) maintainCustodyInfo() {
|
||||
const interval = 1 * time.Minute
|
||||
|
||||
async.RunEvery(s.ctx, interval, func() {
|
||||
if err := s.updateCustodyInfoIfNeeded(); err != nil {
|
||||
log.WithError(err).Error("Failed to update custody info")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) updateCustodyInfoIfNeeded() error {
|
||||
const minimumPeerCount = 1
|
||||
|
||||
// Get our actual custody group count.
|
||||
actualCustodyGrounpCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "p2p custody group count")
|
||||
}
|
||||
|
||||
// Get our target custody group count.
|
||||
targetCustodyGroupCount, err := s.custodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
// If the actual custody group count is already equal to the target, skip the update.
|
||||
if actualCustodyGrounpCount >= targetCustodyGroupCount {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check that all subscribed data column sidecars topics have at least `minimumPeerCount` peers.
|
||||
topics := s.cfg.p2p.PubSub().GetTopics()
|
||||
enoughPeers := true
|
||||
for _, topic := range topics {
|
||||
if !strings.Contains(topic, p2p.GossipDataColumnSidecarMessage) {
|
||||
continue
|
||||
}
|
||||
|
||||
if peers := s.cfg.p2p.PubSub().ListPeers(topic); len(peers) < minimumPeerCount {
|
||||
// If a topic has fewer than the minimum required peers, log a warning.
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"peerCount": len(peers),
|
||||
"minimumPeerCount": minimumPeerCount,
|
||||
}).Debug("Insufficient peers for data column sidecar topic to maintain custody count")
|
||||
enoughPeers = false
|
||||
}
|
||||
}
|
||||
|
||||
if !enoughPeers {
|
||||
return nil
|
||||
}
|
||||
|
||||
headROBlock, err := s.cfg.chain.HeadBlock(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "head block")
|
||||
}
|
||||
headSlot := headROBlock.Block().Slot()
|
||||
|
||||
storedEarliestSlot, storedGroupCount, err := s.cfg.p2p.UpdateCustodyInfo(headSlot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "p2p update custody info")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.beaconDB.UpdateCustodyInfo(s.ctx, storedEarliestSlot, storedGroupCount); err != nil {
|
||||
return errors.Wrap(err, "beacon db update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// custodyGroupCount computes the custody group count based on the custody requirement,
|
||||
// the validators custody requirement, and whether the node is subscribed to all data subnets.
|
||||
func (s *Service) custodyGroupCount() (uint64, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if flags.Get().SubscribeAllDataSubnets {
|
||||
return beaconConfig.NumberOfCustodyGroups, nil
|
||||
}
|
||||
|
||||
validatorsCustodyRequirement, err := s.validatorsCustodyRequirement()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "validators custody requirement")
|
||||
}
|
||||
|
||||
return max(beaconConfig.CustodyRequirement, validatorsCustodyRequirement), nil
|
||||
}
|
||||
|
||||
// validatorsCustodyRequirements computes the custody requirements based on the
|
||||
// finalized state and the tracked validators.
|
||||
func (s *Service) validatorsCustodyRequirement() (uint64, error) {
|
||||
// Get the indices of the tracked validators.
|
||||
indices := s.trackedValidatorsCache.Indices()
|
||||
|
||||
// Return early if no validators are tracked.
|
||||
if len(indices) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Retrieve the finalized state.
|
||||
finalizedState := s.cfg.stateGen.FinalizedState()
|
||||
if finalizedState == nil || finalizedState.IsNil() {
|
||||
return 0, nilFinalizedStateError
|
||||
}
|
||||
|
||||
// Compute the validators custody requirements.
|
||||
result, err := peerdas.ValidatorsCustodyRequirement(finalizedState, indices)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "validators custody requirements")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
195
beacon-chain/sync/custody_test.go
Normal file
195
beacon-chain/sync/custody_test.go
Normal file
@@ -0,0 +1,195 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
dbtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
type testSetup struct {
|
||||
service *Service
|
||||
p2pService *p2ptest.TestP2P
|
||||
beaconDB db.Database
|
||||
ctx context.Context
|
||||
initialSlot primitives.Slot
|
||||
initialCount uint64
|
||||
}
|
||||
|
||||
func setupCustodyTest(t *testing.T, withChain bool) *testSetup {
|
||||
ctx := t.Context()
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
beaconDB := dbtesting.SetupDB(t)
|
||||
|
||||
const (
|
||||
initialEarliestSlot = primitives.Slot(50)
|
||||
initialCustodyCount = uint64(5)
|
||||
)
|
||||
|
||||
_, _, err := p2pService.UpdateCustodyInfo(initialEarliestSlot, initialCustodyCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
dbEarliestAvailableSlot, dbCustodyCount, err := beaconDB.UpdateCustodyInfo(ctx, initialEarliestSlot, initialCustodyCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, initialEarliestSlot, dbEarliestAvailableSlot)
|
||||
require.Equal(t, initialCustodyCount, dbCustodyCount)
|
||||
|
||||
cfg := &config{
|
||||
p2p: p2pService,
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
|
||||
if withChain {
|
||||
const headSlot = primitives.Slot(100)
|
||||
block, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Body: ð.BeaconBlockBody{},
|
||||
Slot: headSlot,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg.chain = &mock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidAttestation: true,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
Block: block,
|
||||
}
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
|
||||
return &testSetup{
|
||||
service: service,
|
||||
p2pService: p2pService,
|
||||
beaconDB: beaconDB,
|
||||
ctx: ctx,
|
||||
initialSlot: initialEarliestSlot,
|
||||
initialCount: initialCustodyCount,
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *testSetup) assertCustodyInfo(t *testing.T, expectedSlot primitives.Slot, expectedCount uint64) {
|
||||
p2pEarliestSlot, err := ts.p2pService.EarliestAvailableSlot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedSlot, p2pEarliestSlot)
|
||||
|
||||
p2pCustodyCount, err := ts.p2pService.CustodyGroupCount()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCount, p2pCustodyCount)
|
||||
|
||||
dbEarliestSlot, dbCustodyCount, err := ts.beaconDB.UpdateCustodyInfo(ts.ctx, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedSlot, dbEarliestSlot)
|
||||
require.Equal(t, expectedCount, dbCustodyCount)
|
||||
}
|
||||
|
||||
func withSubscribeAllDataSubnets(t *testing.T, fn func()) {
|
||||
originalFlag := flags.Get().SubscribeAllDataSubnets
|
||||
defer func() {
|
||||
flags.Get().SubscribeAllDataSubnets = originalFlag
|
||||
}()
|
||||
flags.Get().SubscribeAllDataSubnets = true
|
||||
fn()
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfoIfNeeded(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.NumberOfCustodyGroups = 128
|
||||
beaconConfig.CustodyRequirement = 4
|
||||
beaconConfig.SamplesPerSlot = 8
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
t.Run("Skip update when actual custody count >= target", func(t *testing.T) {
|
||||
setup := setupCustodyTest(t, false)
|
||||
|
||||
err := setup.service.updateCustodyInfoIfNeeded()
|
||||
require.NoError(t, err)
|
||||
|
||||
setup.assertCustodyInfo(t, setup.initialSlot, setup.initialCount)
|
||||
})
|
||||
|
||||
t.Run("not enough peers in some subnets", func(t *testing.T) {
|
||||
const randomTopic = "aTotalRandomTopicName"
|
||||
require.Equal(t, false, strings.Contains(randomTopic, p2p.GossipDataColumnSidecarMessage))
|
||||
|
||||
withSubscribeAllDataSubnets(t, func() {
|
||||
setup := setupCustodyTest(t, false)
|
||||
|
||||
_, err := setup.service.cfg.p2p.SubscribeToTopic(p2p.GossipDataColumnSidecarMessage)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = setup.service.cfg.p2p.SubscribeToTopic(randomTopic)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = setup.service.updateCustodyInfoIfNeeded()
|
||||
require.NoError(t, err)
|
||||
|
||||
setup.assertCustodyInfo(t, setup.initialSlot, setup.initialCount)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should update", func(t *testing.T) {
|
||||
withSubscribeAllDataSubnets(t, func() {
|
||||
setup := setupCustodyTest(t, true)
|
||||
|
||||
err := setup.service.updateCustodyInfoIfNeeded()
|
||||
require.NoError(t, err)
|
||||
|
||||
const expectedSlot = primitives.Slot(100)
|
||||
setup.assertCustodyInfo(t, expectedSlot, beaconConfig.NumberOfCustodyGroups)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestCustodyGroupCount(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.NumberOfCustodyGroups = 10
|
||||
config.CustodyRequirement = 3
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
t.Run("SubscribeAllDataSubnets enabled returns NumberOfCustodyGroups", func(t *testing.T) {
|
||||
withSubscribeAllDataSubnets(t, func() {
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
}
|
||||
|
||||
result, err := service.custodyGroupCount()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, config.NumberOfCustodyGroups, result)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("No tracked validators returns CustodyRequirement", func(t *testing.T) {
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
|
||||
result, err := service.custodyGroupCount()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, config.CustodyRequirement, result)
|
||||
})
|
||||
}
|
||||
924
beacon-chain/sync/data_columns.go
Normal file
924
beacon-chain/sync/data_columns.go
Normal file
@@ -0,0 +1,924 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RequestDataColumnSidecarsByRoot is an opinionated, high level function which, for each data column in `dataColumnsToFetch`:
|
||||
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
|
||||
// - Request the data column sidecars from the selected peers.
|
||||
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
|
||||
//
|
||||
// This function:
|
||||
// - returns on success when all the initially missing sidecars in `dataColumnsToFetch` are retrieved, or
|
||||
// - returns an error if all peers in `peers` are exhausted and at least one data column sidecar is still missing.
|
||||
//
|
||||
// TODO: In case at least one column is still missing after peer exhaustion,
|
||||
//
|
||||
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
|
||||
func RequestDataColumnSidecarsByRoot(
|
||||
ctx context.Context,
|
||||
dataColumnsToFetch []uint64,
|
||||
block blocks.ROBlock,
|
||||
peers []core.PeerID,
|
||||
clock *startup.Clock,
|
||||
p2p p2p.P2P,
|
||||
ctxMap ContextByteVersions,
|
||||
newColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(dataColumnsToFetch) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Assemble the peers who can provide the needed data columns.
|
||||
dataColumnsByAdmissiblePeer, _, _, err := AdmissiblePeersForDataColumns(peers, dataColumnsToFetch, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get admissible peers for data columns")
|
||||
}
|
||||
|
||||
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnsToFetch))
|
||||
remainingMissingColumns := make(map[uint64]bool, len(dataColumnsToFetch))
|
||||
for _, column := range dataColumnsToFetch {
|
||||
remainingMissingColumns[column] = true
|
||||
}
|
||||
|
||||
blockRoot := block.Root()
|
||||
|
||||
for len(dataColumnsByAdmissiblePeer) > 0 {
|
||||
peersToFetchFrom, err := SelectPeersToFetchDataColumnsFrom(sliceFromMap(remainingMissingColumns, true /*sorted*/), dataColumnsByAdmissiblePeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers to fetch data columns from")
|
||||
}
|
||||
|
||||
// Request the data columns from each peer.
|
||||
successfulColumns := make(map[uint64]bool, len(remainingMissingColumns))
|
||||
for peer, peerRequestedColumns := range peersToFetchFrom {
|
||||
log := log.WithFields(logrus.Fields{"peer": peer.String(), "blockRoot": fmt.Sprintf("%#x", blockRoot)})
|
||||
|
||||
// Build the requests for the data columns.
|
||||
byRootRequest := ð.DataColumnsByRootIdentifier{BlockRoot: blockRoot[:], Columns: peerRequestedColumns}
|
||||
|
||||
// Send the requests to the peer.
|
||||
peerSidecars, err := SendDataColumnSidecarsByRootRequest(ctx, clock, p2p, peer, ctxMap, types.DataColumnsByRootIdentifiers{byRootRequest})
|
||||
if err != nil {
|
||||
// Remove this peer since it failed to respond correctly.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": peer.String(),
|
||||
"blockRoot": fmt.Sprintf("%#x", block.Root()),
|
||||
}).WithError(err).Debug("Failed to request data columns from peer")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if returned data columns align with the block.
|
||||
if err := peerdas.DataColumnsAlignWithBlock(block, peerSidecars); err != nil {
|
||||
// Remove this peer since it failed to respond correctly.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Align with block failed")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the received sidecars.
|
||||
verifier := newColumnsVerifier(peerSidecars, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Valid verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Sidecar inclusion proof verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Sidecar KZG proof verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
verifiedPeerSidecars, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
return nil, errors.Wrap(err, "verified data columns")
|
||||
}
|
||||
|
||||
// Mark columns as successful
|
||||
for _, sidecar := range verifiedPeerSidecars {
|
||||
successfulColumns[sidecar.Index] = true
|
||||
}
|
||||
|
||||
// Check if all requested columns were successfully returned.
|
||||
peerMissingColumns := make(map[uint64]bool)
|
||||
for _, index := range peerRequestedColumns {
|
||||
if !successfulColumns[index] {
|
||||
peerMissingColumns[index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(peerMissingColumns) > 0 {
|
||||
// Remove this peer if some requested columns were not correctly returned.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithField("missingColumns", sliceFromMap(peerMissingColumns, true /*sorted*/)).Debug("Peer did not provide all requested data columns")
|
||||
}
|
||||
|
||||
verifiedSidecars = append(verifiedSidecars, verifiedPeerSidecars...)
|
||||
}
|
||||
|
||||
// Update remaining columns for the next retry.
|
||||
for col := range successfulColumns {
|
||||
delete(remainingMissingColumns, col)
|
||||
}
|
||||
|
||||
if len(remainingMissingColumns) > 0 {
|
||||
// Some columns are still missing, retry with the remaining peers.
|
||||
continue
|
||||
}
|
||||
|
||||
return verifiedSidecars, nil
|
||||
}
|
||||
|
||||
// If we still have remaining columns after all retries, return error
|
||||
return nil, errors.Errorf("failed to retrieve all requested data columns after retries for block root=%#x, missing columns=%v", blockRoot, sliceFromMap(remainingMissingColumns, true /*sorted*/))
|
||||
}
|
||||
|
||||
// RequestMissingDataColumnsByRange is an opinionated, high level function which, for each block in `blks`:
|
||||
// - Computes all data column sidecars we should store and which are missing (according to our node ID and `groupCount`),
|
||||
// - Builds an optimized set of data column sidecars by range requests in order to never request a data column that is already stored in the DB,
|
||||
// and in order to minimize the number of total requests, while not exceeding `batchSize` sidecars per requests.
|
||||
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
|
||||
// - Request the data column sidecars from the selected peers.
|
||||
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
|
||||
//
|
||||
// This function:
|
||||
// - returns on success when all the initially missing sidecars for `blks` are retrieved, or
|
||||
// - returns an error if no progress at all is made after 5 consecutives trials.
|
||||
// (If at least one additional data column sidecar is retrieved between two trials, the counter is reset.)
|
||||
//
|
||||
// In case of success, initially missing data columns grouped by block root are returned.
|
||||
// This function expects blocks to be sorted by slot.
|
||||
//
|
||||
// TODO: In case at least one column is still missing after all allowed retries,
|
||||
//
|
||||
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
|
||||
func RequestMissingDataColumnsByRange(
|
||||
ctx context.Context,
|
||||
clock *startup.Clock,
|
||||
ctxMap ContextByteVersions,
|
||||
p2p p2p.P2P,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
groupCount uint64,
|
||||
dataColumnsStorage filesystem.DataColumnStorageSummarizer,
|
||||
blks []blocks.ROBlock,
|
||||
batchSize int,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.RODataColumn, error) {
|
||||
const maxAllowedStall = 5 // Number of trials before giving up.
|
||||
|
||||
if len(blks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the current slot.
|
||||
currentSlot := clock.CurrentSlot()
|
||||
|
||||
// Compute the minimum slot for which we should serve data columns.
|
||||
minimumSlot, err := dataColumnsRPCMinValidSlot(currentSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns RPC min valid slot")
|
||||
}
|
||||
|
||||
// Get blocks by root and compute all missing columns by root.
|
||||
blockByRoot := make(map[[fieldparams.RootLength]byte]blocks.ROBlock, len(blks))
|
||||
missingColumnsByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(blks))
|
||||
for _, blk := range blks {
|
||||
// Extract the block root and the block slot
|
||||
blockRoot, blockSlot := blk.Root(), blk.Block().Slot()
|
||||
|
||||
// Populate the block by root.
|
||||
blockByRoot[blockRoot] = blk
|
||||
|
||||
// Skip blocks that are not in the retention period.
|
||||
if blockSlot < minimumSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
missingColumns, err := MissingDataColumns(blk, p2p.NodeID(), groupCount, dataColumnsStorage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
for _, column := range missingColumns {
|
||||
if _, ok := missingColumnsByRoot[blockRoot]; !ok {
|
||||
missingColumnsByRoot[blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
missingColumnsByRoot[blockRoot][column] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if there are no missing data columns.
|
||||
if len(missingColumnsByRoot) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the number of missing data columns.
|
||||
previousMissingDataColumnsCount := itemsCount(missingColumnsByRoot)
|
||||
|
||||
// Count the number of retries for the same amount of missing data columns.
|
||||
stallCount := 0
|
||||
|
||||
// Add log fields.
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"initialMissingColumnsCount": previousMissingDataColumnsCount,
|
||||
"blockCount": len(blks),
|
||||
"firstSlot": blks[0].Block().Slot(),
|
||||
"lastSlot": blks[len(blks)-1].Block().Slot(),
|
||||
})
|
||||
|
||||
// Log the start of the process.
|
||||
start := time.Now()
|
||||
log.Debug("Requesting data column sidecars - start")
|
||||
|
||||
alignedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn, len(blks))
|
||||
for len(missingColumnsByRoot) > 0 {
|
||||
// Build requests.
|
||||
requests, err := buildDataColumnByRangeRequests(blks, missingColumnsByRoot, batchSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "build data column by range requests")
|
||||
}
|
||||
|
||||
// Requests data column sidecars from peers.
|
||||
retrievedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn)
|
||||
for _, request := range requests {
|
||||
roDataColumns, err := fetchDataColumnsFromPeers(ctx, clock, p2p, rateLimiter, ctxMap, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fetch data columns from peers")
|
||||
}
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
root := roDataColumn.BlockRoot()
|
||||
if _, ok := blockByRoot[root]; !ok {
|
||||
// It may happen if the peer which sent the data columns is on a different fork.
|
||||
continue
|
||||
}
|
||||
|
||||
retrievedDataColumnsByRoot[root] = append(retrievedDataColumnsByRoot[root], roDataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
for root, dataColumns := range retrievedDataColumnsByRoot {
|
||||
// Retrieve the block from the root.
|
||||
block, ok := blockByRoot[root]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found - this should never happen")
|
||||
}
|
||||
|
||||
// Check if the data columns align with blocks.
|
||||
if err := peerdas.DataColumnsAlignWithBlock(block, dataColumns); err != nil {
|
||||
log.WithField("root", root).WithError(err).Debug("Data columns do not align with block")
|
||||
continue
|
||||
}
|
||||
|
||||
alignedDataColumnsByRoot[root] = append(alignedDataColumnsByRoot[root], dataColumns...)
|
||||
|
||||
// Remove aligned data columns from the missing columns.
|
||||
for _, dataColumn := range dataColumns {
|
||||
delete(missingColumnsByRoot[root], dataColumn.Index)
|
||||
if len(missingColumnsByRoot[root]) == 0 {
|
||||
delete(missingColumnsByRoot, root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
missingDataColumnsCount := itemsCount(missingColumnsByRoot)
|
||||
if missingDataColumnsCount == previousMissingDataColumnsCount {
|
||||
stallCount++
|
||||
} else {
|
||||
stallCount = 0
|
||||
}
|
||||
|
||||
previousMissingDataColumnsCount = missingDataColumnsCount
|
||||
|
||||
if missingDataColumnsCount > 0 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"remainingMissingColumnsCount": missingDataColumnsCount,
|
||||
"stallCount": stallCount,
|
||||
"maxAllowedStall": maxAllowedStall,
|
||||
})
|
||||
|
||||
if stallCount >= maxAllowedStall {
|
||||
// It is very likely `bwbs` contains orphaned blocks, for which no peer has the data columns.
|
||||
// We give up and let the state machine handle the situation.
|
||||
const message = "Requesting data column sidecars - no progress, giving up"
|
||||
log.Warning(message)
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"remainingMissingColumnsCount": missingDataColumnsCount,
|
||||
"stallCount": stallCount,
|
||||
}).Debug("Requesting data column sidecars - continue")
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("duration", time.Since(start)).Debug("Requesting data column sidecars - success")
|
||||
return alignedDataColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// MissingDataColumns looks at the data columns we should store for a given block regarding `custodyGroupCount`,
|
||||
// and returns the indices of the missing ones.
|
||||
func MissingDataColumns(block blocks.ROBlock, nodeID enode.ID, custodyGroupCount uint64, dataColumnStorage filesystem.DataColumnStorageSummarizer) ([]uint64, error) {
|
||||
// Blocks before Fulu have no data columns.
|
||||
if block.Version() < version.Fulu {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the blob commitments from the block.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Nothing to build if there are no commitments.
|
||||
if len(commitments) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the expected columns.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
expectedColumns := peerInfo.CustodyColumns
|
||||
|
||||
// Get the stored columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
summary := dataColumnStorage.Summary(block.Root())
|
||||
|
||||
storedColumns := make(map[uint64]bool, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if summary.HasIndex(i) {
|
||||
storedColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the missing columns.
|
||||
missingColumns := make([]uint64, 0, len(expectedColumns))
|
||||
for column := range expectedColumns {
|
||||
if !storedColumns[column] {
|
||||
missingColumns = append(missingColumns, column)
|
||||
}
|
||||
}
|
||||
|
||||
return missingColumns, nil
|
||||
}
|
||||
|
||||
// SelectPeersToFetchDataColumnsFrom implements greedy algorithm in order to select peers to fetch data columns from.
|
||||
// https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm
|
||||
func SelectPeersToFetchDataColumnsFrom(neededDataColumns []uint64, dataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID][]uint64, error) {
|
||||
// Copy the provided needed data columns into a set that we will remove elements from.
|
||||
remainingDataColumns := make(map[uint64]bool, len(neededDataColumns))
|
||||
for _, dataColumn := range neededDataColumns {
|
||||
remainingDataColumns[dataColumn] = true
|
||||
}
|
||||
|
||||
dataColumnsFromSelectedPeers := make(map[peer.ID][]uint64)
|
||||
|
||||
// Filter `dataColumnsByPeer` to only contain needed data columns.
|
||||
neededDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(dataColumnsByPeer))
|
||||
for pid, dataColumns := range dataColumnsByPeer {
|
||||
for dataColumn := range dataColumns {
|
||||
if remainingDataColumns[dataColumn] {
|
||||
if _, ok := neededDataColumnsByPeer[pid]; !ok {
|
||||
neededDataColumnsByPeer[pid] = make(map[uint64]bool, len(neededDataColumns))
|
||||
}
|
||||
|
||||
neededDataColumnsByPeer[pid][dataColumn] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
for len(remainingDataColumns) > 0 {
|
||||
// Check if at least one peer remains. If not, it means that we don't have enough peers to fetch all needed data columns.
|
||||
if len(neededDataColumnsByPeer) == 0 {
|
||||
missingDataColumnsSortedSlice := sliceFromMap(remainingDataColumns, true /*sorted*/)
|
||||
return dataColumnsFromSelectedPeers, errors.Errorf("no peer to fetch the following data columns: %v", missingDataColumnsSortedSlice)
|
||||
}
|
||||
|
||||
// Select the peer that custody the most needed data columns (greedy selection).
|
||||
var bestPeer peer.ID
|
||||
for peer, dataColumns := range neededDataColumnsByPeer {
|
||||
if len(dataColumns) > len(neededDataColumnsByPeer[bestPeer]) {
|
||||
bestPeer = peer
|
||||
}
|
||||
}
|
||||
|
||||
dataColumnsSortedSlice := sliceFromMap(neededDataColumnsByPeer[bestPeer], true /*sorted*/)
|
||||
if uint64(len(dataColumnsSortedSlice)) > maxRequestDataColumnSidecars {
|
||||
dataColumnsSortedSlice = dataColumnsSortedSlice[:maxRequestDataColumnSidecars]
|
||||
}
|
||||
dataColumnsFromSelectedPeers[bestPeer] = dataColumnsSortedSlice
|
||||
|
||||
// Remove the selected peer from the list of peers.
|
||||
delete(neededDataColumnsByPeer, bestPeer)
|
||||
|
||||
// Remove the selected peer's data columns from the list of remaining data columns.
|
||||
for _, dataColumn := range dataColumnsSortedSlice {
|
||||
delete(remainingDataColumns, dataColumn)
|
||||
}
|
||||
|
||||
// Remove the selected peer's data columns from the list of needed data columns by peer.
|
||||
for _, dataColumn := range dataColumnsSortedSlice {
|
||||
for peer, dataColumns := range neededDataColumnsByPeer {
|
||||
delete(dataColumns, dataColumn)
|
||||
|
||||
if len(dataColumns) == 0 {
|
||||
delete(neededDataColumnsByPeer, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dataColumnsFromSelectedPeers, nil
|
||||
}
|
||||
|
||||
// AdmissiblePeersForCustodyGroup returns a map of peers that custody at least one custody group listed in `neededCustodyGroups`.
|
||||
//
|
||||
// It returns:
|
||||
// - A map, where the key of the map is the peer, the value is the custody groups of the peer.
|
||||
// - A map, where the key of the map is the custody group, the value is a list of peers that custody the group.
|
||||
// - A slice of descriptions for non admissible peers.
|
||||
// - An error if any.
|
||||
//
|
||||
// NOTE: distributeSamplesToPeer from the DataColumnSampler implements similar logic,
|
||||
// but with only one column queried in each request.
|
||||
func AdmissiblePeersForDataColumns(
|
||||
peers []peer.ID,
|
||||
neededDataColumns []uint64,
|
||||
p2p p2p.P2P,
|
||||
) (map[peer.ID]map[uint64]bool, map[uint64][]peer.ID, []string, error) {
|
||||
peerCount := len(peers)
|
||||
neededDataColumnsCount := uint64(len(neededDataColumns))
|
||||
|
||||
// Create description slice for non admissible peers.
|
||||
descriptions := make([]string, 0, peerCount)
|
||||
|
||||
// Compute custody columns for each peer.
|
||||
dataColumnsByPeer, err := custodyColumnsFromPeers(p2p, peers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "custody columns from peers")
|
||||
}
|
||||
|
||||
// Filter peers which custody at least one needed data column.
|
||||
dataColumnsByAdmissiblePeer, localDescriptions := filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns, dataColumnsByPeer)
|
||||
descriptions = append(descriptions, localDescriptions...)
|
||||
|
||||
// Compute a map from needed data columns to their peers.
|
||||
admissiblePeersByDataColumn := make(map[uint64][]peer.ID, neededDataColumnsCount)
|
||||
for peerId, peerDataColumns := range dataColumnsByAdmissiblePeer {
|
||||
for _, dataColumn := range neededDataColumns {
|
||||
if peerDataColumns[dataColumn] {
|
||||
admissiblePeersByDataColumn[dataColumn] = append(admissiblePeersByDataColumn[dataColumn], peerId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, descriptions, nil
|
||||
}
|
||||
|
||||
// custodyColumnsFromPeers computes all the custody columns indexed by peer.
|
||||
func custodyColumnsFromPeers(p2pIface p2p.P2P, peers []peer.ID) (map[peer.ID]map[uint64]bool, error) {
|
||||
peerCount := len(peers)
|
||||
|
||||
custodyColumnsByPeer := make(map[peer.ID]map[uint64]bool, peerCount)
|
||||
for _, peer := range peers {
|
||||
// Get the node ID from the peer ID.
|
||||
nodeID, err := p2p.ConvertPeerIDToNodeID(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert peer ID to node ID")
|
||||
}
|
||||
|
||||
// Get the custody group count of the peer.
|
||||
custodyGroupCount := p2pIface.CustodyGroupCountFromPeer(peer)
|
||||
|
||||
// Get peerdas info of the peer.
|
||||
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peerdas info")
|
||||
}
|
||||
|
||||
custodyColumnsByPeer[peer] = dasInfo.CustodyColumns
|
||||
}
|
||||
|
||||
return custodyColumnsByPeer, nil
|
||||
}
|
||||
|
||||
// `filterPeerWhichCustodyAtLeastOneDataColumn` filters peers which custody at least one data column
|
||||
// specified in `neededDataColumns`. It returns also a list of descriptions for non admissible peers.
|
||||
func filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns []uint64, inputDataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID]map[uint64]bool, []string) {
|
||||
// Create pretty needed data columns for logs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
outputDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(inputDataColumnsByPeer))
|
||||
descriptions := make([]string, 0)
|
||||
|
||||
outerLoop:
|
||||
for peer, peerCustodyDataColumns := range inputDataColumnsByPeer {
|
||||
for _, neededDataColumn := range neededDataColumns {
|
||||
if peerCustodyDataColumns[neededDataColumn] {
|
||||
outputDataColumnsByPeer[peer] = peerCustodyDataColumns
|
||||
|
||||
continue outerLoop
|
||||
}
|
||||
}
|
||||
|
||||
peerCustodyColumnsCount := uint64(len(peerCustodyDataColumns))
|
||||
var peerCustodyColumnsLog interface{} = "all"
|
||||
|
||||
if peerCustodyColumnsCount < numberOfColumns {
|
||||
peerCustodyColumnsLog = sliceFromMap(peerCustodyDataColumns, true /*sorted*/)
|
||||
}
|
||||
|
||||
description := fmt.Sprintf("peer %s: does not custody any needed column, custody columns: %v", peer, peerCustodyColumnsLog)
|
||||
descriptions = append(descriptions, description)
|
||||
}
|
||||
|
||||
return outputDataColumnsByPeer, descriptions
|
||||
}
|
||||
|
||||
// buildDataColumnByRangeRequests builds an optimized slices of data column by range requests:
|
||||
// 1. It will never request a data column that is already stored in the DB if there is no "hole" in `roBlocks` other than missed slots.
|
||||
// 2. It will minimize the number of requests.
|
||||
// It expects blocks to be sorted by slot.
|
||||
func buildDataColumnByRangeRequests(roBlocks []blocks.ROBlock, missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool, batchSize int) ([]*eth.DataColumnSidecarsByRangeRequest, error) {
|
||||
batchSizeSlot := primitives.Slot(batchSize)
|
||||
|
||||
// Return early if there are no blocks to process.
|
||||
if len(roBlocks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// It's safe to get the first item of the slice since we've already checked that it's not empty.
|
||||
firstROBlock, lastROBlock := roBlocks[0], roBlocks[len(roBlocks)-1]
|
||||
firstBlockSlot, lastBlockSlot := firstROBlock.Block().Slot(), lastROBlock.Block().Slot()
|
||||
firstBlockRoot := firstROBlock.Root()
|
||||
|
||||
previousMissingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[firstBlockRoot]))
|
||||
|
||||
if missing, ok := missingColumnsByRoot[firstBlockRoot]; ok {
|
||||
for key, value := range missing {
|
||||
previousMissingDataColumns[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
previousBlockSlot, previousStartBlockSlot := firstBlockSlot, firstBlockSlot
|
||||
|
||||
result := make([]*eth.DataColumnSidecarsByRangeRequest, 0, 1)
|
||||
for index := 1; index < len(roBlocks); index++ {
|
||||
roBlock := roBlocks[index]
|
||||
|
||||
// Extract the block from the RO-block.
|
||||
block := roBlock.Block()
|
||||
|
||||
// Extract the slot from the block.
|
||||
blockRoot, blockSlot := roBlock.Root(), block.Slot()
|
||||
|
||||
if blockSlot <= previousBlockSlot {
|
||||
return nil, errors.Errorf("blocks are not strictly sorted by slot. Previous block slot: %d, current block slot: %d", previousBlockSlot, blockSlot)
|
||||
}
|
||||
|
||||
// Extract KZG commitments count from the current block body
|
||||
blockKzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the count of KZG commitments.
|
||||
blockKzgCommitmentCount := len(blockKzgCommitments)
|
||||
|
||||
// Skip blocks without commitments.
|
||||
if blockKzgCommitmentCount == 0 {
|
||||
previousBlockSlot = blockSlot
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the missing data columns for the current block.
|
||||
missingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[blockRoot]))
|
||||
for key, value := range missingColumnsByRoot[blockRoot] {
|
||||
missingDataColumns[key] = value
|
||||
}
|
||||
|
||||
// Compute if the missing data columns differ.
|
||||
missingDataColumnsDiffer := uint64MapDiffer(previousMissingDataColumns, missingDataColumns)
|
||||
|
||||
// Compute if the batch size is reached.
|
||||
batchSizeReached := blockSlot-previousStartBlockSlot >= batchSizeSlot
|
||||
|
||||
if missingDataColumnsDiffer || batchSizeReached {
|
||||
// Append the slice to the result.
|
||||
request := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: previousStartBlockSlot,
|
||||
Count: uint64(blockSlot - previousStartBlockSlot),
|
||||
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
|
||||
}
|
||||
|
||||
result = append(result, request)
|
||||
|
||||
previousStartBlockSlot, previousMissingDataColumns = blockSlot, missingDataColumns
|
||||
}
|
||||
|
||||
previousBlockSlot = blockSlot
|
||||
}
|
||||
|
||||
lastRequest := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: previousStartBlockSlot,
|
||||
Count: uint64(lastBlockSlot - previousStartBlockSlot + 1),
|
||||
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
|
||||
}
|
||||
|
||||
result = append(result, lastRequest)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// fetchDataColumnsFromPeers requests data columns by range to relevant peers
|
||||
func fetchDataColumnsFromPeers(
|
||||
ctx context.Context,
|
||||
clock *startup.Clock,
|
||||
p2p p2p.P2P,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
ctxMap ContextByteVersions,
|
||||
targetRequest *eth.DataColumnSidecarsByRangeRequest,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Filter out requests with no data columns.
|
||||
if len(targetRequest.Columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get all admissible peers with the data columns they custody.
|
||||
dataColumnsByAdmissiblePeer, err := waitForPeersForDataColumns(p2p, rateLimiter, targetRequest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "wait for peers for data columns")
|
||||
}
|
||||
|
||||
// Select the peers that will be requested.
|
||||
dataColumnsToFetchByPeer, err := SelectPeersToFetchDataColumnsFrom(targetRequest.Columns, dataColumnsByAdmissiblePeer)
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
return nil, errors.Wrap(err, "select peers to fetch data columns from")
|
||||
}
|
||||
|
||||
var roDataColumns []blocks.RODataColumn
|
||||
for peer, columnsToFetch := range dataColumnsToFetchByPeer {
|
||||
// Build the request.
|
||||
request := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: targetRequest.StartSlot,
|
||||
Count: targetRequest.Count,
|
||||
Columns: columnsToFetch,
|
||||
}
|
||||
|
||||
peerRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(ctx, clock, p2p, peer, ctxMap, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "send data column sidecars by range request")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, peerRoDataColumns...)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// waitForPeersForDataColumns returns a map, where the key of the map is the peer, the value is the custody columns of the peer.
|
||||
// It uses only peers
|
||||
// - synced up to `lastSlot`, and
|
||||
// - have bandwidth to serve `blockCount` blocks.
|
||||
// It waits until at least one peer per data column is available.
|
||||
func waitForPeersForDataColumns(p2p p2p.P2P, rateLimiter *leakybucket.Collector, request *eth.DataColumnSidecarsByRangeRequest) (map[peer.ID]map[uint64]bool, error) {
|
||||
const delay = 5 * time.Second
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Build nice log fields.
|
||||
lastSlot := request.StartSlot.Add(request.Count).Sub(1)
|
||||
|
||||
var neededDataColumnsLog interface{} = "all"
|
||||
neededDataColumnCount := uint64(len(request.Columns))
|
||||
if neededDataColumnCount < numberOfColumns {
|
||||
neededDataColumnsLog = request.Columns
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"start": request.StartSlot,
|
||||
"targetSlot": lastSlot,
|
||||
"neededDataColumns": neededDataColumnsLog,
|
||||
})
|
||||
|
||||
// Keep only peers with head epoch greater than or equal to the epoch corresponding to the target slot, and
|
||||
// keep only peers with enough bandwidth.
|
||||
filteredPeers, descriptions, err := filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter eers by target slot and bandwidth")
|
||||
}
|
||||
|
||||
// Get the peers that are admissible for the data columns.
|
||||
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err := AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "admissible peers for data columns")
|
||||
}
|
||||
|
||||
descriptions = append(descriptions, moreDescriptions...)
|
||||
|
||||
// Compute data columns without any peer.
|
||||
dataColumnsWithoutPeers := computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
|
||||
|
||||
// Wait if no suitable peers are available.
|
||||
for len(dataColumnsWithoutPeers) > 0 {
|
||||
// Build a nice log fields.
|
||||
var dataColumnsWithoutPeersLog interface{} = "all"
|
||||
dataColumnsWithoutPeersCount := uint64(len(dataColumnsWithoutPeers))
|
||||
if dataColumnsWithoutPeersCount < numberOfColumns {
|
||||
dataColumnsWithoutPeersLog = sliceFromMap(dataColumnsWithoutPeers, true /*sorted*/)
|
||||
}
|
||||
|
||||
log.WithField("columnsWithoutPeer", dataColumnsWithoutPeersLog).Warning("Fetch data columns from peers - no available peers, retrying later")
|
||||
for _, description := range descriptions {
|
||||
log.Debug(description)
|
||||
}
|
||||
|
||||
for pid, peerDataColumns := range dataColumnsByAdmissiblePeer {
|
||||
var peerDataColumnsLog interface{} = "all"
|
||||
peerDataColumnsCount := uint64(len(peerDataColumns))
|
||||
if peerDataColumnsCount < numberOfColumns {
|
||||
peerDataColumnsLog = sliceFromMap(peerDataColumns, true /*sorted*/)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"peerDataColumns": peerDataColumnsLog,
|
||||
}).Debug("Peer data columns")
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
|
||||
// Filter for peers with head epoch greater than or equal to our target epoch for ByRange requests.
|
||||
filteredPeers, descriptions, err = filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter peers by target slot and bandwidth")
|
||||
}
|
||||
|
||||
// Get the peers that are admissible for the data columns.
|
||||
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err = AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "admissible peers for data columns")
|
||||
}
|
||||
|
||||
descriptions = append(descriptions, moreDescriptions...)
|
||||
|
||||
// Compute data columns without any peer.
|
||||
dataColumnsWithoutPeers = computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
|
||||
}
|
||||
|
||||
return dataColumnsByAdmissiblePeer, nil
|
||||
}
|
||||
|
||||
// Filter peers to ensure they are synced to the target slot and have sufficient bandwidth to serve the request.
|
||||
func filterPeersByTargetSlotAndBandwidth(p2p p2p.P2P, rateLimiter *leakybucket.Collector, lastSlot primitives.Slot, blockCount uint64) ([]peer.ID, []string, error) {
|
||||
peers := p2p.Peers().Connected()
|
||||
|
||||
slotPeers, descriptions, err := filterPeersByTargetSlot(p2p, peers, lastSlot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "peers with slot and data columns")
|
||||
}
|
||||
|
||||
// Filter for peers with sufficient bandwidth to serve the request.
|
||||
slotAndBandwidthPeers := hasSufficientBandwidth(rateLimiter, slotPeers, blockCount)
|
||||
|
||||
// Add debugging logs for the filtered peers.
|
||||
peerWithSufficientBandwidthMap := make(map[peer.ID]bool, len(peers))
|
||||
for _, peer := range slotAndBandwidthPeers {
|
||||
peerWithSufficientBandwidthMap[peer] = true
|
||||
}
|
||||
|
||||
for _, peer := range slotPeers {
|
||||
if !peerWithSufficientBandwidthMap[peer] {
|
||||
description := fmt.Sprintf("peer %s: does not have sufficient bandwidth", peer)
|
||||
descriptions = append(descriptions, description)
|
||||
}
|
||||
}
|
||||
return slotAndBandwidthPeers, descriptions, nil
|
||||
}
|
||||
|
||||
func hasSufficientBandwidth(rateLimiter *leakybucket.Collector, peers []peer.ID, count uint64) []peer.ID {
|
||||
var filteredPeers []peer.ID
|
||||
|
||||
for _, p := range peers {
|
||||
if uint64(rateLimiter.Remaining(p.String())) < count {
|
||||
continue
|
||||
}
|
||||
copiedP := p
|
||||
filteredPeers = append(filteredPeers, copiedP)
|
||||
}
|
||||
return filteredPeers
|
||||
}
|
||||
|
||||
func computeDataColumnsWithoutPeers(neededColumns []uint64, peersByColumn map[uint64][]peer.ID) map[uint64]bool {
|
||||
result := make(map[uint64]bool)
|
||||
for _, column := range neededColumns {
|
||||
if _, ok := peersByColumn[column]; !ok {
|
||||
result[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Filter peers with head epoch lower than our target epoch for ByRange requests.
|
||||
func filterPeersByTargetSlot(p2p p2p.P2P, peers []peer.ID, targetSlot primitives.Slot) ([]peer.ID, []string, error) {
|
||||
filteredPeers := make([]peer.ID, 0, len(peers))
|
||||
descriptions := make([]string, 0, len(peers))
|
||||
// Compute the target epoch from the target slot.
|
||||
targetEpoch := slots.ToEpoch(targetSlot)
|
||||
|
||||
for _, peer := range peers {
|
||||
peerChainState, err := p2p.Peers().ChainState(peer)
|
||||
if err != nil {
|
||||
description := fmt.Sprintf("peer %s: error: %s", peer, err)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
if peerChainState == nil {
|
||||
description := fmt.Sprintf("peer %s: chain state is nil", peer)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
peerHeadEpoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
|
||||
if peerHeadEpoch < targetEpoch {
|
||||
description := fmt.Sprintf("peer %s: peer head epoch %d < our target epoch %d", peer, peerHeadEpoch, targetEpoch)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
filteredPeers = append(filteredPeers, peer)
|
||||
}
|
||||
|
||||
return filteredPeers, descriptions, nil
|
||||
}
|
||||
|
||||
// itemsCount returns the total count of items
|
||||
func itemsCount(missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||
count := 0
|
||||
for _, columns := range missingColumnsByRoot {
|
||||
count += len(columns)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// uint64MapDiffer returns true if the two maps differ.
|
||||
func uint64MapDiffer(left, right map[uint64]bool) bool {
|
||||
if len(left) != len(right) {
|
||||
return true
|
||||
}
|
||||
|
||||
for k := range left {
|
||||
if !right[k] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -32,6 +32,7 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
root [fieldparams.RootLength]byte,
|
||||
) error {
|
||||
startTime := time.Now()
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Lock to prevent concurrent reconstructions.
|
||||
s.reconstructionLock.Lock()
|
||||
@@ -49,8 +50,13 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
|
||||
// Retrieve our local node info.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
custodyGroupCount := s.cfg.custodyInfo.ActualGroupCount()
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
@@ -155,10 +161,12 @@ func (s *Service) broadcastMissingDataColumnSidecars(
|
||||
// Get the node ID.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
// Get the custody group count.
|
||||
custodyGroupCount := s.cfg.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve the local node info.
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peerdas info")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user