mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
291 Commits
remove_pro
...
peerdas-ge
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4339822dbe | ||
|
|
b40719a19c | ||
|
|
22a6820ede | ||
|
|
00ab5a1051 | ||
|
|
41cf0ec59e | ||
|
|
39ca2eaae1 | ||
|
|
d5c56355f8 | ||
|
|
5a3e450067 | ||
|
|
e50472ab25 | ||
|
|
53d69d407b | ||
|
|
58687620f6 | ||
|
|
f8716d8f77 | ||
|
|
58795d5ce3 | ||
|
|
d36875332d | ||
|
|
0a9ff2dc8b | ||
|
|
cc8a5fc422 | ||
|
|
9dcb8be2df | ||
|
|
51e465d690 | ||
|
|
ddbf9cb404 | ||
|
|
9ed1496c8f | ||
|
|
5b04cab118 | ||
|
|
0b681f6861 | ||
|
|
88b363e3cf | ||
|
|
f521948b4d | ||
|
|
a3f919205a | ||
|
|
474f458834 | ||
|
|
bd17dfefb9 | ||
|
|
3f361a79a0 | ||
|
|
09b7fa6bc9 | ||
|
|
76d974694c | ||
|
|
dca7f282e6 | ||
|
|
327309b5f7 | ||
|
|
8c44999d21 | ||
|
|
bb2fd617a5 | ||
|
|
c31c1e2674 | ||
|
|
978ffa4780 | ||
|
|
407bf6785f | ||
|
|
c45230b455 | ||
|
|
0af6591001 | ||
|
|
1af249da31 | ||
|
|
901f6b6e6c | ||
|
|
a3cdda56d9 | ||
|
|
cf3200fa06 | ||
|
|
04cafa1959 | ||
|
|
498b945a61 | ||
|
|
90317ba5b5 | ||
|
|
b35358f440 | ||
|
|
b926066495 | ||
|
|
263ddf9a7b | ||
|
|
c558798fe8 | ||
|
|
ba1699fdee | ||
|
|
adf62a6b45 | ||
|
|
9e5b3fb599 | ||
|
|
eaf4b4f9bf | ||
|
|
0b0b7ff0a9 | ||
|
|
f1be39f7f1 | ||
|
|
3815ff4c28 | ||
|
|
76a0759e13 | ||
|
|
5cd2d99606 | ||
|
|
1a2a0688e1 | ||
|
|
6d0524dcf5 | ||
|
|
8ec9da81c0 | ||
|
|
facb70e12c | ||
|
|
3d91b35f4e | ||
|
|
dc70dae9d0 | ||
|
|
9e2c04400c | ||
|
|
60058266e8 | ||
|
|
291c4ac9b5 | ||
|
|
045776ff75 | ||
|
|
0a386cbdfd | ||
|
|
4f02e44446 | ||
|
|
41600b67e3 | ||
|
|
cec236ff7d | ||
|
|
62dac40734 | ||
|
|
d3763d56cf | ||
|
|
461fa50c34 | ||
|
|
7b059560f6 | ||
|
|
111e5c462f | ||
|
|
6d4e1d5f7a | ||
|
|
415622ec49 | ||
|
|
df65458834 | ||
|
|
2005d5c6f2 | ||
|
|
7d72fbebe7 | ||
|
|
43c111bca2 | ||
|
|
685761666d | ||
|
|
41c2f1d802 | ||
|
|
a75974b5f5 | ||
|
|
0725dff5e8 | ||
|
|
0d95d3d022 | ||
|
|
384270f9a7 | ||
|
|
8e9d3f5f4f | ||
|
|
d6d542889c | ||
|
|
f8e6b9d1a8 | ||
|
|
8f25d1e986 | ||
|
|
81e9fda34b | ||
|
|
ede560bee1 | ||
|
|
34a1bf835a | ||
|
|
b0bceac9c0 | ||
|
|
0ff2d2fa21 | ||
|
|
8477a84454 | ||
|
|
e95d1c54cf | ||
|
|
4af3763013 | ||
|
|
a520db7276 | ||
|
|
f8abf0565f | ||
|
|
11a6af9bf9 | ||
|
|
6f8a654874 | ||
|
|
f0c01fdb4b | ||
|
|
a015ae6a29 | ||
|
|
457aa117f3 | ||
|
|
d302b494df | ||
|
|
b3db1b6b74 | ||
|
|
66e4d5e816 | ||
|
|
41f109aa5b | ||
|
|
cfd4ceb4dd | ||
|
|
df211c3384 | ||
|
|
89e78d7da3 | ||
|
|
e76ea84596 | ||
|
|
f10d6e8e16 | ||
|
|
91eb43b595 | ||
|
|
90710ec57d | ||
|
|
3dc65f991e | ||
|
|
4d9789401b | ||
|
|
f72d59b004 | ||
|
|
e25497be3e | ||
|
|
8897a26f84 | ||
|
|
b2a26f2b62 | ||
|
|
09659010f8 | ||
|
|
589042df20 | ||
|
|
312b93e9b1 | ||
|
|
f86f76e447 | ||
|
|
c311e652eb | ||
|
|
6a5d78a331 | ||
|
|
a2fd30497e | ||
|
|
a94561f8dc | ||
|
|
af875b78c9 | ||
|
|
61207bd3ac | ||
|
|
0b6fcd7d17 | ||
|
|
fe2766e716 | ||
|
|
9135d765e1 | ||
|
|
eca87f29d1 | ||
|
|
00821c8f55 | ||
|
|
4b9e92bcd7 | ||
|
|
b01d9005b8 | ||
|
|
8d812d5f0e | ||
|
|
24a3cb2a8b | ||
|
|
66d1d3e248 | ||
|
|
99933678ea | ||
|
|
34f8e1e92b | ||
|
|
a6a41a8755 | ||
|
|
f110b94fac | ||
|
|
33023aa282 | ||
|
|
eeb3cdc99e | ||
|
|
1e7147f060 | ||
|
|
8936beaff3 | ||
|
|
c00283f247 | ||
|
|
a4269cf308 | ||
|
|
91f3c8a4d0 | ||
|
|
30c7ee9c7b | ||
|
|
456d8b9eb9 | ||
|
|
4fe3e6d31a | ||
|
|
01ee1c80b4 | ||
|
|
c14fe47a81 | ||
|
|
b9deabbf0a | ||
|
|
5d66a98e78 | ||
|
|
2d46d6ffae | ||
|
|
57107e50a7 | ||
|
|
47271254f6 | ||
|
|
f304028874 | ||
|
|
8abc5e159a | ||
|
|
b1ac53c4dd | ||
|
|
27ab68c856 | ||
|
|
ddf5a3953b | ||
|
|
92d2fc101d | ||
|
|
8996000d2b | ||
|
|
a2fcba2349 | ||
|
|
abe8638991 | ||
|
|
0b5064b474 | ||
|
|
da9d4cf5b9 | ||
|
|
a62cca15dd | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
a216cb4105 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
@@ -6,20 +6,20 @@ import (
|
||||
)
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sidecars) == 1 {
|
||||
if len(blobSidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(sidecars[0].Blob),
|
||||
bytesToCommitment(sidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(sidecars[0].KzgProof))
|
||||
bytesToBlob(blobSidecars[0].Blob),
|
||||
bytesToCommitment(blobSidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(blobSidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs := make([]GoKZG.Blob, len(blobSidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
|
||||
for i, sidecar := range blobSidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
|
||||
@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(sidecars...))
|
||||
blobSidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(blobSidecars...))
|
||||
}
|
||||
|
||||
func TestBytesToAny(t *testing.T) {
|
||||
|
||||
@@ -240,9 +240,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
@@ -308,6 +309,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -901,6 +926,118 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
}
|
||||
}
|
||||
|
||||
// areDataColumnsImmediatelyAvailable checks if all required data columns are currently
|
||||
// available in the database without waiting for missing ones.
|
||||
func (s *Service) areDataColumnsImmediatelyAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has no commitments there is nothing to check.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count error")
|
||||
}
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
// Compute the sampling size.
|
||||
samplingSize := max(samplesPerSlot, custodyGroupCount)
|
||||
|
||||
// Get the peer info for the node.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if storedDataColumnsCount >= minimumColumnCountToReconstruct {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If any data is missing, return error immediately (don't wait)
|
||||
missingIndices := uint64MapToSortedSlice(missingMap)
|
||||
return fmt.Errorf("data columns not immediately available, missing %v", missingIndices)
|
||||
}
|
||||
|
||||
// areBlobsImmediatelyAvailable checks if all required blobs are currently
|
||||
// available in the database without waiting for missing ones.
|
||||
func (s *Service) areBlobsImmediatelyAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// expected is the number of kzg commitments observed in the block.
|
||||
expected := len(kzgCommitments)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingBlobIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If any blobs are missing, return error immediately (don't wait)
|
||||
missingIndices := uint64MapToSortedSlice(missing)
|
||||
return fmt.Errorf("blobs not immediately available, missing %v", missingIndices)
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -37,12 +38,22 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DataAvailabilityChecker defines an interface for checking if data is available
|
||||
// for a given block root. This interface is implemented by the blockchain service
|
||||
// which has knowledge of the beacon chain's data availability requirements.
|
||||
// Returns nil if data is available, ErrDataNotAvailable if data is not available,
|
||||
// or another error for other failures.
|
||||
type DataAvailabilityChecker interface {
|
||||
IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error
|
||||
}
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
@@ -106,25 +117,32 @@ type Checker interface {
|
||||
|
||||
var ErrMissingClockSetter = errors.New("blockchain Service initialized without a startup.ClockSetter")
|
||||
|
||||
// ErrDataNotAvailable is returned when block data is not immediately available for processing.
|
||||
var ErrDataNotAvailable = errors.New("block data is not available")
|
||||
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -135,7 +153,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -145,12 +165,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -176,7 +199,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
@@ -518,6 +543,32 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// IsDataAvailable implements the DataAvailabilityChecker interface for use by the execution service.
|
||||
// It checks if all required blob and data column data is immediately available in the database without waiting.
|
||||
func (s *Service) IsDataAvailable(ctx context.Context, blockRoot [fieldparams.RootLength]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
|
||||
blockVersion := block.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsImmediatelyAvailable(ctx, blockRoot, block); err != nil {
|
||||
return errors.Wrap(ErrDataNotAvailable, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := s.areBlobsImmediatelyAvailable(ctx, blockRoot, block); err != nil {
|
||||
return errors.Wrap(ErrDataNotAvailable, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
|
||||
@@ -554,7 +554,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -732,6 +732,11 @@ func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]b
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
// IsDataAvailable implements the data availability checker interface for testing
|
||||
func (c *ChainService) IsDataAvailable(_ context.Context, _ [32]byte, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockSyncChecker is a mock implementation of blockchain.Checker.
|
||||
// We can't make an assertion here that this is true because that would create a circular dependency.
|
||||
type MockSyncChecker struct {
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
@@ -13,7 +12,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -23,7 +21,6 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -33,7 +30,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
@@ -49,7 +45,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,30 +53,25 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
if len(blobSidecars) > 1 {
|
||||
firstRoot := blobSidecars[0].BlockRoot()
|
||||
for _, sidecar := range blobSidecars[1:] {
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := sidecars[0].BlockRoot()
|
||||
for _, sidecar := range sidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blobSidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(blobSidecars[0])
|
||||
key := keyFromSidecar(sidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for _, blobSidecar := range blobSidecars {
|
||||
for _, blobSidecar := range sidecars {
|
||||
if err := entry.stash(&blobSidecar); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -118,23 +118,21 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
}
|
||||
@@ -149,10 +147,8 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
@@ -161,29 +157,25 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
|
||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
||||
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, as.Persist(32+slotOOB, more[0]))
|
||||
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(1, more...))
|
||||
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
nodeID enode.ID,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
custodyGroupCount uint64,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custodyGroupCount: custodyGroupCount,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first sidecar.
|
||||
firstSidecar := dataColumnSidecars[0]
|
||||
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := firstSidecar.BlockRoot()
|
||||
for _, sidecar := range dataColumnSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
for _, sidecar := range dataColumnSidecars {
|
||||
if err := entry.stash(&sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return errors.Wrap(err, "valid fields")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verified RO data columns - should never happen")
|
||||
}
|
||||
|
||||
if err := s.store.Save(verifiedRoDataColumns); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve peer info.
|
||||
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
commitmentsArraySize := uint64(len(commitmentsArray))
|
||||
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
if column >= commitmentsArraySize {
|
||||
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
|
||||
}
|
||||
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
@@ -1,313 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[int]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
key := cacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
||||
}
|
||||
|
||||
return roSidecars, roDataColumns
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
@@ -15,5 +15,5 @@ import (
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
@@ -5,13 +5,12 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
@@ -25,13 +24,9 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
|
||||
}
|
||||
|
||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error {
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
||||
return m.PersistBlobsCallback(current, blobSidecar...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -100,6 +100,14 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
// DataColumnStorageReader is an interface to read data column sidecars from the filesystem.
|
||||
type DataColumnStorageReader interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
var _ DataColumnStorageReader = &DataColumnStorage{}
|
||||
|
||||
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
|
||||
func WithDataColumnBasePath(base string) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
|
||||
@@ -84,12 +84,6 @@ func (s DataColumnStorageSummary) Stored() map[uint64]bool {
|
||||
return stored
|
||||
}
|
||||
|
||||
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
|
||||
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type DataColumnStorageSummarizer interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
}
|
||||
|
||||
type dataColumnStorageSummaryCache struct {
|
||||
mu sync.RWMutex
|
||||
dataColumnCount float64
|
||||
@@ -98,8 +92,6 @@ type dataColumnStorageSummaryCache struct {
|
||||
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
|
||||
}
|
||||
|
||||
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
|
||||
|
||||
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
|
||||
return &dataColumnStorageSummaryCache{
|
||||
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
|
||||
|
||||
@@ -144,14 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
|
||||
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
|
||||
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
|
||||
}
|
||||
|
||||
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
|
||||
c := newDataColumnStorageSummaryCache()
|
||||
for root, indices := range set {
|
||||
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -115,6 +115,17 @@ type NoHeadAccessDatabase interface {
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
@@ -131,16 +142,6 @@ type HeadAccessDatabase interface {
|
||||
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HeadBlockRoot() ([32]byte, error)
|
||||
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -74,6 +74,7 @@ go_library(
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_sync//singleflight:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -84,6 +85,7 @@ go_test(
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"deposit_test.go",
|
||||
"engine_client_broadcast_test.go",
|
||||
"engine_client_fuzz_test.go",
|
||||
"engine_client_test.go",
|
||||
"execution_chain_test.go",
|
||||
|
||||
@@ -99,6 +99,8 @@ const (
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
// defaultGetBlobsRetryInterval is the default retry interval for getBlobsV2 calls.
|
||||
defaultGetBlobsRetryInterval = 200 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -652,9 +654,94 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
// It uses singleflight to ensure only one reconstruction per blockRoot.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Use singleflight to ensure only one reconstruction per blockRoot
|
||||
v, err, _ := s.reconstructSingleflight.Do(fmt.Sprintf("%x", blockRoot), func() (interface{}, error) {
|
||||
// Try reconstruction once
|
||||
result, err := s.reconstructDataColumnSidecarsOnce(ctx, signedROBlock, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to reconstruct data column sidecars")
|
||||
}
|
||||
if len(result) > 0 {
|
||||
return result, nil // Success - return data
|
||||
}
|
||||
|
||||
// Empty result - initiate retry mechanism
|
||||
|
||||
// Create a new context with a timeout for the retry goroutine.
|
||||
retryCtx, cancel := context.WithTimeout(s.ctx, time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second)
|
||||
|
||||
// LoadOrStore atomically checks for an existing retry and stores
|
||||
// a new one if none exists. This prevents a race condition.
|
||||
// The stored value is the cancel function for the new context.
|
||||
_, loaded := s.activeRetries.LoadOrStore(blockRoot, cancel)
|
||||
|
||||
if loaded {
|
||||
// Another goroutine already started the retry process. The current one can exit.
|
||||
cancel() // Cancel the context we just created as it won't be used.
|
||||
return []blocks.VerifiedRODataColumn{}, nil
|
||||
}
|
||||
|
||||
// This goroutine is now responsible for starting the retry.
|
||||
// Perform periodic retry attempts for data column reconstruction inline.
|
||||
go func() {
|
||||
startTime := time.Now()
|
||||
// Defer the cancellation of the context and the removal of the active retry tracker.
|
||||
defer func() {
|
||||
cancel()
|
||||
s.activeRetries.Delete(blockRoot)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(defaultGetBlobsRetryInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
attemptCount := 0
|
||||
retryLog := log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot))
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
attemptCount++
|
||||
getBlobsRetryAttempts.WithLabelValues("attempt").Inc()
|
||||
|
||||
// Retry reconstruction
|
||||
retryLog.WithField("attempt", attemptCount).Debug("Retrying data column reconstruction")
|
||||
result, err := s.reconstructDataColumnSidecarsOnce(retryCtx, signedROBlock, blockRoot)
|
||||
if err != nil {
|
||||
retryLog.WithError(err).Debug("Reconstruction attempt failed, will retry")
|
||||
continue
|
||||
}
|
||||
if len(result) > 0 {
|
||||
retryLog.WithField("attempts", attemptCount).Debug("Retry succeeded")
|
||||
getBlobsRetryAttempts.WithLabelValues("success_reconstructed").Inc()
|
||||
getBlobsRetryDuration.WithLabelValues("success").Observe(time.Since(startTime).Seconds())
|
||||
// Clean up active retry tracker immediately on success
|
||||
s.activeRetries.Delete(blockRoot)
|
||||
return
|
||||
}
|
||||
|
||||
case <-retryCtx.Done():
|
||||
retryLog.WithField("attempts", attemptCount).Debug("Retry timeout")
|
||||
getBlobsRetryAttempts.WithLabelValues("timeout").Inc()
|
||||
getBlobsRetryDuration.WithLabelValues("timeout").Observe(time.Since(startTime).Seconds())
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Return empty result for now; the background retry will handle it.
|
||||
return []blocks.VerifiedRODataColumn{}, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v.([]blocks.VerifiedRODataColumn), nil
|
||||
}
|
||||
|
||||
// reconstructDataColumnSidecarsOnce performs a single attempt to reconstruct data column sidecars.
|
||||
func (s *Service) reconstructDataColumnSidecarsOnce(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
@@ -1008,6 +1095,12 @@ func toBlockNumArg(number *big.Int) string {
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
// hasActiveRetry checks if there's an active retry for the given block root.
|
||||
func (s *Service) hasActiveRetry(blockRoot [fieldparams.RootLength]byte) bool {
|
||||
_, exists := s.activeRetries.Load(blockRoot)
|
||||
return exists
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
|
||||
92
beacon-chain/execution/engine_client_broadcast_test.go
Normal file
92
beacon-chain/execution/engine_client_broadcast_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package execution
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
// TestStartRetryIfNeeded_AtomicBehavior tests that the atomic retry start behavior
|
||||
// prevents race conditions by ensuring only one retry can be active per blockRoot.
|
||||
func TestStartRetryIfNeeded_AtomicBehavior(t *testing.T) {
|
||||
t.Run("prevents multiple concurrent retry claims", func(t *testing.T) {
|
||||
service := &Service{
|
||||
activeRetries: sync.Map{},
|
||||
}
|
||||
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
claimCount := int64(0)
|
||||
|
||||
numConcurrentCalls := 20
|
||||
var wg sync.WaitGroup
|
||||
startSignal := make(chan struct{})
|
||||
|
||||
// Launch multiple goroutines that try to claim retry slot simultaneously
|
||||
for i := 0; i < numConcurrentCalls; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
<-startSignal // Wait for signal to maximize race contention
|
||||
|
||||
// Simulate the atomic claim logic from startRetryIfNeeded
|
||||
cancelFunc := func() {}
|
||||
if _, loaded := service.activeRetries.LoadOrStore(blockRoot, cancelFunc); !loaded {
|
||||
// We won the race - count successful claims
|
||||
atomic.AddInt64(&claimCount, 1)
|
||||
|
||||
// Simulate some work before cleaning up
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
service.activeRetries.Delete(blockRoot)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Start all goroutines simultaneously to maximize race condition
|
||||
close(startSignal)
|
||||
wg.Wait()
|
||||
|
||||
// Verify only one goroutine successfully claimed the retry slot
|
||||
actualClaimCount := atomic.LoadInt64(&claimCount)
|
||||
require.Equal(t, int64(1), actualClaimCount, "Only one goroutine should successfully claim retry slot despite %d concurrent attempts", numConcurrentCalls)
|
||||
|
||||
t.Logf("Success: %d concurrent attempts resulted in only 1 successful claim (atomic behavior verified)", numConcurrentCalls)
|
||||
})
|
||||
|
||||
t.Run("hasActiveRetry correctly detects active retries", func(t *testing.T) {
|
||||
service := &Service{
|
||||
activeRetries: sync.Map{},
|
||||
}
|
||||
|
||||
blockRoot1 := [32]byte{1, 2, 3}
|
||||
blockRoot2 := [32]byte{4, 5, 6}
|
||||
|
||||
// Initially no active retries
|
||||
if service.hasActiveRetry(blockRoot1) {
|
||||
t.Error("Should not have active retry initially")
|
||||
}
|
||||
|
||||
// Add active retry for blockRoot1
|
||||
service.activeRetries.Store(blockRoot1, func() {})
|
||||
|
||||
// Verify detection
|
||||
if !service.hasActiveRetry(blockRoot1) {
|
||||
t.Error("Should detect active retry for blockRoot1")
|
||||
}
|
||||
if service.hasActiveRetry(blockRoot2) {
|
||||
t.Error("Should not detect active retry for blockRoot2")
|
||||
}
|
||||
|
||||
// Remove active retry
|
||||
service.activeRetries.Delete(blockRoot1)
|
||||
|
||||
// Verify removal
|
||||
if service.hasActiveRetry(blockRoot1) {
|
||||
t.Error("Should not detect active retry after deletion")
|
||||
}
|
||||
|
||||
t.Logf("Success: hasActiveRetry correctly tracks retry state")
|
||||
})
|
||||
}
|
||||
@@ -11,7 +11,10 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
@@ -2723,3 +2726,412 @@ func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test retry helper methods
|
||||
func TestRetryHelperMethods(t *testing.T) {
|
||||
client := &Service{}
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("hasActiveRetry returns false initially", func(t *testing.T) {
|
||||
hasActive := client.hasActiveRetry(blockRoot)
|
||||
require.Equal(t, false, hasActive)
|
||||
})
|
||||
|
||||
t.Run("hasActiveRetry returns true after storing cancel function", func(t *testing.T) {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
client.activeRetries.Store(blockRoot, cancel)
|
||||
|
||||
hasActive := client.hasActiveRetry(blockRoot)
|
||||
require.Equal(t, true, hasActive)
|
||||
|
||||
// Clean up
|
||||
client.activeRetries.Delete(blockRoot)
|
||||
})
|
||||
}
|
||||
|
||||
// Test ReconstructDataColumnSidecars with retry logic
|
||||
func TestReconstructDataColumnSidecars_WithRetry(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create test block
|
||||
kzgCommitments := createRandomKzgCommitments(t, 3)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("successful initial call does not trigger retry", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup server that returns all blobs
|
||||
blobMasks := []bool{true, true, true}
|
||||
srv := createBlobServerV2(t, 3, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
|
||||
// Should not have any active retries since initial call succeeded
|
||||
require.Equal(t, false, client.hasActiveRetry(r))
|
||||
})
|
||||
|
||||
t.Run("failed initial call triggers retry", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should have active retry since initial call returned empty
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Clean up
|
||||
if cancel, ok := client.activeRetries.Load(r); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
t.Run("does not start duplicate retry", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// First call should start retry
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Second call should not start another retry
|
||||
dataColumns, err = client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Clean up
|
||||
if cancel, ok := client.activeRetries.Load(r); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test timeout and cleanup behavior
|
||||
func TestRetryTimeout(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create test block
|
||||
kzgCommitments := createRandomKzgCommitments(t, 1)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("retry cleans up after timeout", func(t *testing.T) {
|
||||
// Setup server that always returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Modify config to have very short slot time for testing
|
||||
originalConfig := params.BeaconConfig()
|
||||
cfg := originalConfig.Copy()
|
||||
cfg.SecondsPerSlot = 1 // 1 second timeout for retry
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
defer params.OverrideBeaconConfig(originalConfig)
|
||||
|
||||
// Call ReconstructDataColumnSidecars which will start retry internally
|
||||
ctx := context.Background()
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err) // Should not error, just return empty result
|
||||
|
||||
// Wait a bit for the retry goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should have active retry initially
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Wait for timeout (longer than the 1 second timeout we set)
|
||||
time.Sleep(1200 * time.Millisecond)
|
||||
|
||||
// Should be cleaned up after timeout
|
||||
require.Equal(t, false, client.hasActiveRetry(r))
|
||||
})
|
||||
}
|
||||
|
||||
// Test concurrent retry scenarios
|
||||
func TestConcurrentRetries(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("multiple blocks can have concurrent retries", func(t *testing.T) {
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Create multiple test blocks
|
||||
testBlocks := make([]interfaces.ReadOnlySignedBeaconBlock, 3)
|
||||
roots := make([][32]byte, 3)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
kzgCommitments := createRandomKzgCommitments(t, 1)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
testBlocks[i] = signedB
|
||||
roots[i] = [32]byte{byte(i), byte(i), byte(i)}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Start retries for all blocks
|
||||
for i := 0; i < 3; i++ {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, testBlocks[i], roots[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Wait a bit for the goroutines to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// All should have active retries
|
||||
for i := 0; i < 3; i++ {
|
||||
require.Equal(t, true, client.hasActiveRetry(roots[i]))
|
||||
}
|
||||
|
||||
// Clean up
|
||||
for i := 0; i < 3; i++ {
|
||||
if cancel, ok := client.activeRetries.Load(roots[i]); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test end-to-end retry behavior with data availability changes
|
||||
func TestRetryBehaviorWithDataAvailability(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create test block
|
||||
kzgCommitments := createRandomKzgCommitments(t, 1)
|
||||
sb := util.NewBeaconBlockFulu()
|
||||
sb.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
signedB, err := blocks.NewSignedBeaconBlock(sb)
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{1, 2, 3}
|
||||
|
||||
t.Run("retry stops when data becomes available", func(t *testing.T) {
|
||||
// Setup server that returns no blobs initially
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Start the initial reconstruction which should trigger retry
|
||||
ctx := context.Background()
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify retry started
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Wait for retry timeout (the retry will continue since there's no way to stop it now)
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
// Retry should still be active since there's no availability check to stop it
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
})
|
||||
|
||||
t.Run("retry continues when data is not available", func(t *testing.T) {
|
||||
// Setup server that returns no blobs
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Start the initial reconstruction which should trigger retry
|
||||
ctx := context.Background()
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, signedB, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
|
||||
// Wait a bit for the goroutine to start
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify retry started
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Wait a bit - retry should still be active
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.Equal(t, true, client.hasActiveRetry(r))
|
||||
|
||||
// Clean up
|
||||
if cancel, ok := client.activeRetries.Load(r); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
|
||||
// Wait for cleanup
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
require.Equal(t, false, client.hasActiveRetry(r))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
// TestConcurrentReconstructDataColumnSidecars tests that concurrent calls to ReconstructDataColumnSidecars
|
||||
// don't result in multiple getBlobsV2 calls for the same block root
|
||||
func TestConcurrentReconstructDataColumnSidecars(t *testing.T) {
|
||||
t.Run("concurrent calls share result", func(t *testing.T) {
|
||||
// Setup server that tracks call count
|
||||
callCount := int32(0)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
atomic.AddInt32(&callCount, 1)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
// Simulate some processing time
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
if strings.Contains(r.URL.RequestURI(), GetBlobsV2) {
|
||||
// Return empty result - simulating EL doesn't have the data yet
|
||||
resp := []interface{}{nil}
|
||||
respJSON, _ := json.Marshal(map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
})
|
||||
_, _ = w.Write(respJSON)
|
||||
return
|
||||
}
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
// Setup client
|
||||
client := &Service{}
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
// Create test block with KZG commitments
|
||||
slot := primitives.Slot(100)
|
||||
block := util.NewBeaconBlockDeneb()
|
||||
block.Block.Slot = slot
|
||||
commitment := [48]byte{1, 2, 3}
|
||||
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := signedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Start multiple concurrent calls
|
||||
numCalls := 5
|
||||
var wg sync.WaitGroup
|
||||
results := make([][]blocks.VerifiedRODataColumn, numCalls)
|
||||
errors := make([]error, numCalls)
|
||||
|
||||
for i := 0; i < numCalls; i++ {
|
||||
wg.Add(1)
|
||||
go func(index int) {
|
||||
defer wg.Done()
|
||||
result, err := client.ReconstructDataColumnSidecars(ctx, signedBlock, blockRoot)
|
||||
results[index] = result
|
||||
errors[index] = err
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all calls to complete
|
||||
wg.Wait()
|
||||
|
||||
// Verify that GetBlobsV2 was called only once, not numCalls times
|
||||
finalCallCount := atomic.LoadInt32(&callCount)
|
||||
require.Equal(t, int32(1), finalCallCount, "Expected GetBlobsV2 to be called only once, but was called %d times", finalCallCount)
|
||||
|
||||
// Verify all calls got the same result length
|
||||
for i := 1; i < numCalls; i++ {
|
||||
require.Equal(t, len(results[0]), len(results[i]), "All concurrent calls should return same result length")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -71,4 +71,19 @@ var (
|
||||
Name: "execution_payload_bodies_count",
|
||||
Help: "The number of requested payload bodies is too large",
|
||||
})
|
||||
getBlobsRetryAttempts = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "getblobs_retry_attempts_total",
|
||||
Help: "Total number of getBlobsV2 retry attempts",
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
getBlobsRetryDuration = promauto.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "getblobs_retry_duration_seconds",
|
||||
Help: "Duration of getBlobsV2 retry cycles",
|
||||
Buckets: []float64{0.1, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0},
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/singleflight"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
@@ -162,6 +164,8 @@ type Service struct {
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
blobVerifier verification.NewBlobVerifier
|
||||
capabilityCache *capabilityCache
|
||||
activeRetries sync.Map // map[blockRoot]context.CancelFunc for tracking active retries
|
||||
reconstructSingleflight singleflight.Group
|
||||
}
|
||||
|
||||
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.
|
||||
|
||||
@@ -845,6 +845,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -1124,4 +1125,4 @@ func hasNetworkFlag(cliCtx *cli.Context) bool {
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Scorer = (*BadResponsesScorer)(nil)
|
||||
@@ -132,13 +131,14 @@ func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
// if peerData, ok := s.store.PeerData(pid); ok {
|
||||
// TODO: Remote this out of devnet
|
||||
// if peerData.BadResponses >= s.config.Threshold {
|
||||
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
// return nil
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package scorers_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
@@ -13,39 +12,41 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
// const pid = "peer1"
|
||||
|
||||
ctx := t.Context()
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 4,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
// scorer.Increment(pid)
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
@@ -137,56 +138,60 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
|
||||
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pid := peer.ID("peer1")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pid)
|
||||
// if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// } else {
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
for i := 0; i < len(pids); i++ {
|
||||
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
}
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pids[1])
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
return badPeers[i] < badPeers[j]
|
||||
})
|
||||
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
}
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
// for i := 0; i < len(pids); i++ {
|
||||
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
// }
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pids[1])
|
||||
// scorer.Increment(pids[2])
|
||||
// scorer.Increment(pids[4])
|
||||
// }
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
// want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
// badPeers := scorer.BadPeers()
|
||||
// sort.Slice(badPeers, func(i, j int) bool {
|
||||
// return badPeers[i] < badPeers[j]
|
||||
// })
|
||||
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
// }
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -211,99 +211,102 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestScorers_Service_loop(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_loop(t *testing.T) {
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 5,
|
||||
DecayInterval: 50 * time.Millisecond,
|
||||
},
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
DecayInterval: 25 * time.Millisecond,
|
||||
Decay: 64,
|
||||
},
|
||||
},
|
||||
})
|
||||
s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 5,
|
||||
// DecayInterval: 50 * time.Millisecond,
|
||||
// },
|
||||
// BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
// DecayInterval: 25 * time.Millisecond,
|
||||
// Decay: 64,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
pid1 := peer.ID("peer1")
|
||||
peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
// pid1 := peer.ID("peer1")
|
||||
// peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
// for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
// s1.Increment(pid1)
|
||||
// }
|
||||
// assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
// s2.IncrementProcessedBlocks("peer1", 221)
|
||||
// assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- struct{}{}
|
||||
}()
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Error("Timed out")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// done := make(chan struct{}, 1)
|
||||
// go func() {
|
||||
// defer func() {
|
||||
// done <- struct{}{}
|
||||
// }()
|
||||
// ticker := time.NewTicker(50 * time.Millisecond)
|
||||
// defer ticker.Stop()
|
||||
// for {
|
||||
// select {
|
||||
// case <-ticker.C:
|
||||
// if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
// return
|
||||
// }
|
||||
// case <-ctx.Done():
|
||||
// t.Error("Timed out")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
|
||||
<-done
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
// <-done
|
||||
// assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
// assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
// }
|
||||
|
||||
func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// }
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
// for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
// }
|
||||
|
||||
@@ -62,7 +62,9 @@ const (
|
||||
|
||||
const (
|
||||
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
CollocationLimit = 5
|
||||
// TODO: Revert this when out of devnet.
|
||||
// CollocationLimit = 5
|
||||
CollocationLimit = 9999
|
||||
|
||||
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
|
||||
maxLimitBuffer = 150
|
||||
@@ -780,6 +782,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
// and is shared by at least minPeers.
|
||||
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
|
||||
@@ -2,7 +2,6 @@ package peers_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -328,55 +327,56 @@ func TestPeerWithNilChainState(t *testing.T) {
|
||||
require.Equal(t, resChainState, nothing)
|
||||
}
|
||||
|
||||
func TestPeerBadResponses(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerBadResponses(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
{
|
||||
_, err := id.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
// require.NoError(t, err)
|
||||
// {
|
||||
// _, err := id.MarshalBinary()
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
direction := network.DirInbound
|
||||
p.Add(new(enr.Record), id, address, direction)
|
||||
// address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
// require.NoError(t, err, "Failed to create address")
|
||||
// direction := network.DirInbound
|
||||
// p.Add(new(enr.Record), id, address, direction)
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
// resBadResponses, err := scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// }
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
@@ -495,100 +495,102 @@ func TestPeerValidTime(t *testing.T) {
|
||||
assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers")
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPrune(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// if i%7 == 0 {
|
||||
// // Peer added as disconnected.
|
||||
// _ = addPeer(t, p, peers.PeerDisconnected)
|
||||
// }
|
||||
// // Peer added to peer handler.
|
||||
// _ = addPeer(t, p, peers.PeerConnected)
|
||||
// }
|
||||
|
||||
disPeers := p.Disconnected()
|
||||
firstPID := disPeers[0]
|
||||
secondPID := disPeers[1]
|
||||
thirdPID := disPeers[2]
|
||||
// disPeers := p.Disconnected()
|
||||
// firstPID := disPeers[0]
|
||||
// secondPID := disPeers[1]
|
||||
// thirdPID := disPeers[2]
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
|
||||
// Make first peer a bad peer
|
||||
scorer.Increment(firstPID)
|
||||
scorer.Increment(firstPID)
|
||||
// // Make first peer a bad peer
|
||||
// scorer.Increment(firstPID)
|
||||
// scorer.Increment(firstPID)
|
||||
|
||||
// Add bad response for p2.
|
||||
scorer.Increment(secondPID)
|
||||
// // Add bad response for p2.
|
||||
// scorer.Increment(secondPID)
|
||||
|
||||
// Prune peers
|
||||
p.Prune()
|
||||
// // Prune peers
|
||||
// p.Prune()
|
||||
|
||||
// Bad peer is expected to still be kept in handler.
|
||||
badRes, err := scorer.Count(firstPID)
|
||||
assert.NoError(t, err, "error is supposed to be nil")
|
||||
assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
// // Bad peer is expected to still be kept in handler.
|
||||
// badRes, err := scorer.Count(firstPID)
|
||||
// assert.NoError(t, err, "error is supposed to be nil")
|
||||
// assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
|
||||
// Not so good peer is pruned away so that we can reduce the
|
||||
// total size of the handler.
|
||||
_, err = scorer.Count(secondPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
// // Not so good peer is pruned away so that we can reduce the
|
||||
// // total size of the handler.
|
||||
// _, err = scorer.Count(secondPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
|
||||
// Last peer has been removed.
|
||||
_, err = scorer.Count(thirdPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
}
|
||||
// // Last peer has been removed.
|
||||
// _, err = scorer.Count(thirdPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
// }
|
||||
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerIPTracker(t *testing.T) {
|
||||
// resetCfg := features.InitWithReset(&features.Flags{
|
||||
// EnablePeerScorer: false,
|
||||
// })
|
||||
// defer resetCfg()
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
// badIP := "211.227.218.116"
|
||||
// var badPeers []peer.ID
|
||||
// for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
// port := strconv.Itoa(3000 + i)
|
||||
// addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
// }
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
// }
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
// // Add in bad peers, so that our records are trimmed out
|
||||
// // from the peer store.
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// // Peer added to peer handler.
|
||||
// pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
// p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
p := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
|
||||
@@ -169,7 +169,7 @@ var (
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -391,48 +389,49 @@ func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.C
|
||||
return params.ForkDigest(clock.CurrentEpoch())
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
peers *peers.Status
|
||||
info peer.AddrInfo
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "bad peer",
|
||||
peers: func() *peers.Status {
|
||||
ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
for i := 0; i < 10; i++ {
|
||||
ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
}
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _, _ := createHost(t, 34567)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
ctx := t.Context()
|
||||
s := &Service{
|
||||
host: h,
|
||||
peers: tt.peers,
|
||||
}
|
||||
err := s.connectWithPeer(ctx, tt.info)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// TODO: Uncomment out of devnet.
|
||||
// func TestService_connectWithPeer(t *testing.T) {
|
||||
// params.SetupTestConfigCleanup(t)
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// peers *peers.Status
|
||||
// info peer.AddrInfo
|
||||
// wantErr string
|
||||
// }{
|
||||
// {
|
||||
// name: "bad peer",
|
||||
// peers: func() *peers.Status {
|
||||
// ps := peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// for i := 0; i < 10; i++ {
|
||||
// ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
// }
|
||||
// return ps
|
||||
// }(),
|
||||
// info: peer.AddrInfo{ID: "bad"},
|
||||
// wantErr: "bad peer",
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// h, _, _ := createHost(t, 34567)
|
||||
// defer func() {
|
||||
// if err := h.Close(); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// }()
|
||||
// ctx := t.Context()
|
||||
// s := &Service{
|
||||
// host: h,
|
||||
// peers: tt.peers,
|
||||
// }
|
||||
// err := s.connectWithPeer(ctx, tt.info)
|
||||
// if len(tt.wantErr) > 0 {
|
||||
// require.ErrorContains(t, tt.wantErr, err)
|
||||
// } else {
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -206,8 +206,8 @@ func (s BlobSidecarsByRootReq) Swap(i, j int) {
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s BlobSidecarsByRootReq) Len() int {
|
||||
return len(s)
|
||||
func (s *BlobSidecarsByRootReq) Len() int {
|
||||
return len(*s)
|
||||
}
|
||||
|
||||
// ====================================
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# gazelle:ignore
|
||||
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
@@ -37,6 +39,7 @@ go_library(
|
||||
"//api/client/builder:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
@@ -47,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -181,7 +185,6 @@ common_deps = [
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
]
|
||||
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
timeout = "moderate",
|
||||
|
||||
@@ -29,12 +29,19 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r1, err := eb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
result, err := vs.constructGenericBeaconBlock(b, nil, primitives.ZeroWei())
|
||||
bundle := &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{{1, 2, 3}},
|
||||
Proofs: [][]byte{{4, 5, 6}},
|
||||
Blobs: [][]byte{{7, 8, 9}},
|
||||
}
|
||||
result, err := vs.constructGenericBeaconBlock(b, bundle, primitives.ZeroWei())
|
||||
require.NoError(t, err)
|
||||
r2, err := result.GetFulu().Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, r1, r2)
|
||||
require.Equal(t, result.IsBlinded, false)
|
||||
require.DeepEqual(t, bundle.Blobs, result.GetFulu().GetBlobs())
|
||||
require.DeepEqual(t, bundle.Proofs, result.GetFulu().GetKzgProofs())
|
||||
})
|
||||
|
||||
// Test for Electra version
|
||||
|
||||
@@ -544,4 +544,4 @@ func blobsAndProofs(req *ethpb.GenericSignedBeaconBlock) ([][]byte, [][]byte, er
|
||||
default:
|
||||
return nil, nil, errors.Errorf("unknown request type provided: %T", req)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,6 +69,7 @@ type Server struct {
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
|
||||
@@ -89,6 +89,7 @@ type Config struct {
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
@@ -120,6 +121,7 @@ type Config struct {
|
||||
Router *http.ServeMux
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
LCStore *lightClient.Store
|
||||
@@ -196,6 +198,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlobStorage: s.cfg.BlobStorage,
|
||||
DataColumnStorage: s.cfg.DataColumnStorage,
|
||||
}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
|
||||
coreService := &core.Service{
|
||||
@@ -236,6 +239,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
P2P: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
BlobReceiver: s.cfg.BlobReceiver,
|
||||
DataColumnReceiver: s.cfg.DataColumnReceiver,
|
||||
MockEth1Votes: s.cfg.MockEth1Votes,
|
||||
Eth1BlockFetcher: s.cfg.ExecutionChainService,
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"custody.go",
|
||||
"data_column_sidecars.go",
|
||||
"data_columns_reconstruct.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
@@ -136,6 +137,7 @@ go_library(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
@@ -159,7 +161,7 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"batch_verifier_test.go",
|
||||
"blobs_test.go",
|
||||
@@ -167,7 +169,9 @@ go_test(
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"custody_test.go",
|
||||
"data_column_sidecars_test.go",
|
||||
"data_columns_reconstruct_test.go",
|
||||
"data_columns_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
@@ -192,6 +196,7 @@ go_test(
|
||||
"slot_aware_cache_test.go",
|
||||
"subscriber_beacon_aggregate_proof_test.go",
|
||||
"subscriber_beacon_blocks_test.go",
|
||||
"subscriber_data_column_sidecar_trigger_test.go",
|
||||
"subscriber_test.go",
|
||||
"subscription_topic_handler_test.go",
|
||||
"sync_fuzz_test.go",
|
||||
@@ -261,6 +266,7 @@ go_test(
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/ecdsa:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/equality:go_default_library",
|
||||
@@ -274,13 +280,17 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
|
||||
@@ -91,9 +91,7 @@ func (bs *blobSync) validateNext(rb blocks.ROBlob) error {
|
||||
return err
|
||||
}
|
||||
|
||||
sc := blocks.NewSidecarFromBlobSidecar(rb)
|
||||
|
||||
if err := bs.store.Persist(bs.current, sc); err != nil {
|
||||
if err := bs.store.Persist(bs.current, rb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ func (w *p2pWorker) run(ctx context.Context) {
|
||||
case b := <-w.todo:
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
|
||||
if b.state == batchBlobSync {
|
||||
w.done <- w.handleBlobs(ctx, b)
|
||||
w.done <- w.handleSidecars(ctx, b)
|
||||
} else {
|
||||
w.done <- w.handleBlocks(ctx, b)
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
|
||||
return b.withResults(vb, bs)
|
||||
}
|
||||
|
||||
func (w *p2pWorker) handleBlobs(ctx context.Context, b batch) batch {
|
||||
func (w *p2pWorker) handleSidecars(ctx context.Context, b batch) batch {
|
||||
b.blobPid = b.busy
|
||||
start := time.Now()
|
||||
// we don't need to use the response for anything other than metrics, because blobResponseValidation
|
||||
|
||||
@@ -175,7 +175,7 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
chain, clock := defaultMockChain(t)
|
||||
chain, clock := defaultMockChain(t, 0)
|
||||
if c.chain == nil {
|
||||
c.chain = chain
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
func defaultMockChain(t *testing.T, currentSlot uint64) (*mock.ChainService, *startup.Clock) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
df, err := params.Fork(de)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -78,9 +78,10 @@ func (bb *blockRangeBatcher) next(ctx context.Context, stream libp2pcore.Stream)
|
||||
if !more {
|
||||
return blockBatch{}, false
|
||||
}
|
||||
if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
|
||||
return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
|
||||
}
|
||||
// TODO: Uncomment out of devnet.
|
||||
// if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
|
||||
// return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
|
||||
// }
|
||||
|
||||
// Wait for the ticker before doing anything expensive, unless this is the first batch.
|
||||
if bb.ticker != nil && bb.current != nil {
|
||||
|
||||
869
beacon-chain/sync/data_column_sidecars.go
Normal file
869
beacon-chain/sync/data_column_sidecars.go
Normal file
@@ -0,0 +1,869 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
prysmP2P "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
goPeer "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DataColumnSidecarsParams stores the common parameters needed to
|
||||
// fetch data column sidecars from peers.
|
||||
type DataColumnSidecarsParams struct {
|
||||
Ctx context.Context // Context
|
||||
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
|
||||
P2P prysmP2P.P2P // P2P network interface
|
||||
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
|
||||
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
|
||||
Storage filesystem.DataColumnStorageReader // Data columns storage
|
||||
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
|
||||
}
|
||||
|
||||
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given
|
||||
// blocks and requested data column indices. It employs a multi-step strategy:
|
||||
//
|
||||
// 1. Direct retrieval: If all requested columns are available in storage, they are
|
||||
// retrieved directly without reconstruction.
|
||||
// 2. Reconstruction-based retrieval: If some requested columns are missing but sufficient
|
||||
// stored columns exist (at least the minimum required for reconstruction), the function
|
||||
// reconstructs all columns and extracts the requested indices.
|
||||
// 3. Peer retrieval: If storage and reconstruction fail, missing columns are requested
|
||||
// from connected peers that are expected to custody the required data.
|
||||
//
|
||||
// The function returns a map of block roots to their corresponding verified read-only data
|
||||
// columns. It returns an error if data column storage is unavailable, if storage/reconstruction
|
||||
// operations fail unexpectedly, or if not all requested columns could be retrieved from peers.
|
||||
func FetchDataColumnSidecars(
|
||||
params DataColumnSidecarsParams,
|
||||
roBlocks []blocks.ROBlock,
|
||||
indicesMap map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
if len(roBlocks) == 0 || len(indicesMap) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
indices := sortedSliceFromMap(indicesMap)
|
||||
slotsWithCommitments := make(map[primitives.Slot]bool)
|
||||
indicesByRootToQuery := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
indicesByRootStored := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
|
||||
for _, roBlock := range roBlocks {
|
||||
// Filter out blocks without commitments.
|
||||
block := roBlock.Block()
|
||||
commitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get blob kzg commitments for block root %#x", roBlock.Root())
|
||||
}
|
||||
if len(commitments) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
slotsWithCommitments[block.Slot()] = true
|
||||
root := roBlock.Root()
|
||||
|
||||
// Step 1: Get the requested sidecars for this root if available in storage
|
||||
requestedColumns, err := tryGetDirectColumns(params.Storage, root, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
}
|
||||
if requestedColumns != nil {
|
||||
result[root] = requestedColumns
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 2: If step 1 failed, reconstruct the requested sidecars from what is available in storage
|
||||
requestedColumns, err = tryGetReconstructedColumns(params.Storage, root, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get reconstructed columns for root %#x", root)
|
||||
}
|
||||
if requestedColumns != nil {
|
||||
result[root] = requestedColumns
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 3a: If steps 1 and 2 failed, keep track of the sidecars that need to be queried from peers
|
||||
// and those that are already stored.
|
||||
indicesToQueryMap, indicesStoredMap := categorizeIndices(params.Storage, root, indices)
|
||||
|
||||
if len(indicesToQueryMap) > 0 {
|
||||
indicesByRootToQuery[root] = indicesToQueryMap
|
||||
}
|
||||
if len(indicesStoredMap) > 0 {
|
||||
indicesByRootStored[root] = indicesStoredMap
|
||||
}
|
||||
}
|
||||
|
||||
// Early return if no sidecars need to be queried from peers.
|
||||
if len(indicesByRootToQuery) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Step 3b: Request missing sidecars from peers.
|
||||
start, count := time.Now(), computeTotalCount(indicesByRootToQuery)
|
||||
fromPeersResult, err := tryRequestingColumnsFromPeers(params, roBlocks, slotsWithCommitments, indicesByRootToQuery)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "request from peers")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{"duration": time.Since(start), "count": count}).Debug("Requested data column sidecars from peers")
|
||||
|
||||
for root, verifiedSidecars := range fromPeersResult {
|
||||
result[root] = append(result[root], verifiedSidecars...)
|
||||
}
|
||||
|
||||
// Step 3c: Load the stored sidecars.
|
||||
for root, indicesStored := range indicesByRootStored {
|
||||
requestedColumns, err := tryGetDirectColumns(params.Storage, root, sortedSliceFromMap(indicesStored))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
}
|
||||
|
||||
result[root] = append(result[root], requestedColumns...)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// tryGetDirectColumns attempts to retrieve all requested columns directly from storage
|
||||
// if they are all available. Returns the columns if successful, and nil if at least one
|
||||
// requested sidecar is not available in the storage.
|
||||
func tryGetDirectColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if all requested indices are present in cache
|
||||
storedIndices := storage.Summary(blockRoot).Stored()
|
||||
allRequestedPresent := true
|
||||
for _, requestedIndex := range indices {
|
||||
if !storedIndices[requestedIndex] {
|
||||
allRequestedPresent = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allRequestedPresent {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// All requested data is present, retrieve directly from DB
|
||||
requestedColumns, err := storage.Get(blockRoot, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get data columns for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
return requestedColumns, nil
|
||||
}
|
||||
|
||||
// tryGetReconstructedColumns attempts to retrieve columns using reconstruction
|
||||
// if sufficient columns are available. Returns the columns if successful, nil and nil if insufficient columns,
|
||||
// or nil and error if an error occurs.
|
||||
func tryGetReconstructedColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if we have enough columns for reconstruction
|
||||
summary := storage.Summary(blockRoot)
|
||||
if summary.Count() < peerdas.MinimumColumnCountToReconstruct() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Retrieve all stored columns for reconstruction
|
||||
allStoredColumns, err := storage.Get(blockRoot, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get all stored columns for reconstruction for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Attempt reconstruction
|
||||
reconstructedColumns, err := peerdas.ReconstructDataColumnSidecars(allStoredColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct data columns for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Health check: ensure we have the expected number of columns
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if uint64(len(reconstructedColumns)) != numberOfColumns {
|
||||
return nil, errors.Errorf("reconstructed %d columns but expected %d for block root %#x", len(reconstructedColumns), numberOfColumns, blockRoot)
|
||||
}
|
||||
|
||||
// Extract only the requested indices from reconstructed data using direct indexing
|
||||
requestedColumns := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||
for _, requestedIndex := range indices {
|
||||
if requestedIndex >= numberOfColumns {
|
||||
return nil, errors.Errorf("requested column index %d exceeds maximum %d for block root %#x", requestedIndex, numberOfColumns-1, blockRoot)
|
||||
}
|
||||
requestedColumns = append(requestedColumns, reconstructedColumns[requestedIndex])
|
||||
}
|
||||
|
||||
return requestedColumns, nil
|
||||
}
|
||||
|
||||
// categorizeIndices separates indices into those that need to be queried from peers
|
||||
// and those that are already stored.
|
||||
func categorizeIndices(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) (map[uint64]bool, map[uint64]bool) {
|
||||
indicesToQuery := make(map[uint64]bool, len(indices))
|
||||
indicesStored := make(map[uint64]bool, len(indices))
|
||||
|
||||
allStoredIndices := storage.Summary(blockRoot).Stored()
|
||||
for _, index := range indices {
|
||||
if allStoredIndices[index] {
|
||||
indicesStored[index] = true
|
||||
continue
|
||||
}
|
||||
indicesToQuery[index] = true
|
||||
}
|
||||
|
||||
return indicesToQuery, indicesStored
|
||||
}
|
||||
|
||||
// tryRequestingColumnsFromPeers attempts to request missing data column sidecars from connected peers.
|
||||
// It explores the connected peers to find those that are expected to custody the requested columns
|
||||
// and returns only when all requested columns are either retrieved or have been tried to be retrieved
|
||||
// by all possible peers.
|
||||
// Returns a map of block roots to their verified read-only data column sidecars and a map of block roots.
|
||||
// Returns an error if at least one requested column could not be retrieved.
|
||||
// WARNING: This function alters `missingIndicesByRoot`. The caller should NOT use it after running this function.
|
||||
func tryRequestingColumnsFromPeers(
|
||||
p DataColumnSidecarsParams,
|
||||
roBlocks []blocks.ROBlock,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
// Create a new random source for peer selection.
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
// Compute slots by block root.
|
||||
slotByRoot := computeSlotByBlockRoot(roBlocks)
|
||||
|
||||
// Determine all sidecars each peers are expected to custody.
|
||||
connectedPeersSlice := p.P2P.Peers().Connected()
|
||||
connectedPeers := make(map[goPeer.ID]bool, len(connectedPeersSlice))
|
||||
for _, peer := range connectedPeersSlice {
|
||||
connectedPeers[peer] = true
|
||||
}
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
|
||||
verifiedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for len(missingIndicesByRoot) > 0 && len(indicesByRootByPeer) > 0 {
|
||||
// Select peers to query the missing sidecars from.
|
||||
indicesByRootByPeerToQuery, err := selectPeers(p, randomSource, len(missingIndicesByRoot), indicesByRootByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers")
|
||||
}
|
||||
|
||||
// Remove selected peers from the maps.
|
||||
for peer := range indicesByRootByPeerToQuery {
|
||||
delete(connectedPeers, peer)
|
||||
}
|
||||
|
||||
// Fetch the sidecars from the chosen peers.
|
||||
roDataColumnsByPeer := fetchDataColumnSidecarsFromPeers(p, slotByRoot, slotsWithCommitments, indicesByRootByPeerToQuery)
|
||||
|
||||
// Verify the received data column sidecars.
|
||||
verifiedRoDataColumnSidecars, err := verifyDataColumnSidecarsByPeer(p.P2P, p.NewVerifier, roDataColumnsByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "verify data columns sidecars by peer")
|
||||
}
|
||||
|
||||
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
|
||||
newMissingIndicesByRoot, localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
|
||||
missingIndicesByRoot = newMissingIndicesByRoot
|
||||
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
|
||||
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
|
||||
}
|
||||
|
||||
// Compute indices by root by peers with the updated missing indices and connected peers.
|
||||
indicesByRootByPeer, err = computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingIndicesByRoot) > 0 {
|
||||
return nil, errors.New("not all requested data column sidecars were retrieved from peers")
|
||||
}
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// selectPeers selects peers to query the sidecars.
|
||||
// It begins by randomly selecting a peer in `origIndicesByRootByPeer` that has enough bandwidth,
|
||||
// and assigns to it all its available sidecars. Then, it randomly select an other peer, until
|
||||
// all sidecars in `missingIndicesByRoot` are covered.
|
||||
func selectPeers(
|
||||
p DataColumnSidecarsParams,
|
||||
randomSource *rand.Rand,
|
||||
count int,
|
||||
origIndicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
const randomPeerTimeout = 30 * time.Second
|
||||
|
||||
// Select peers to query the missing sidecars from.
|
||||
indicesByRootByPeer := copyIndicesByRootByPeer(origIndicesByRootByPeer)
|
||||
internalIndicesByRootByPeer := copyIndicesByRootByPeer(indicesByRootByPeer)
|
||||
indicesByRootByPeerToQuery := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
for len(internalIndicesByRootByPeer) > 0 {
|
||||
// Randomly select a peer with enough bandwidth.
|
||||
peer, err := func() (goPeer.ID, error) {
|
||||
ctx, cancel := context.WithTimeout(p.Ctx, randomPeerTimeout)
|
||||
defer cancel()
|
||||
|
||||
peer, err := randomPeer(ctx, randomSource, p.RateLimiter, count, internalIndicesByRootByPeer)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "select random peer")
|
||||
}
|
||||
|
||||
return peer, err
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Query all the sidecars that peer can offer us.
|
||||
newIndicesByRoot, ok := internalIndicesByRootByPeer[peer]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("peer %s not found in internal indices by root by peer map", peer)
|
||||
}
|
||||
|
||||
indicesByRootByPeerToQuery[peer] = newIndicesByRoot
|
||||
|
||||
// Remove this peer from the maps to avoid re-selection.
|
||||
delete(indicesByRootByPeer, peer)
|
||||
delete(internalIndicesByRootByPeer, peer)
|
||||
|
||||
// Delete the corresponding sidecars from other peers in the internal map
|
||||
// to avoid re-selection during this iteration.
|
||||
for peer, indicesByRoot := range internalIndicesByRootByPeer {
|
||||
for root, indices := range indicesByRoot {
|
||||
newIndices := newIndicesByRoot[root]
|
||||
for index := range newIndices {
|
||||
delete(indices, index)
|
||||
}
|
||||
if len(indices) == 0 {
|
||||
delete(indicesByRoot, root)
|
||||
}
|
||||
}
|
||||
if len(indicesByRoot) == 0 {
|
||||
delete(internalIndicesByRootByPeer, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return indicesByRootByPeerToQuery, nil
|
||||
}
|
||||
|
||||
// updateResults updates the missing indices and verified sidecars maps based on the newly verified sidecars.
|
||||
func updateResults(
|
||||
verifiedSidecars []blocks.VerifiedRODataColumn,
|
||||
origMissingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte]map[uint64]bool, map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn) {
|
||||
// Copy the original map to avoid modifying it directly.
|
||||
missingIndicesByRoot := copyIndicesByRoot(origMissingIndicesByRoot)
|
||||
verifiedSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for _, verifiedSidecar := range verifiedSidecars {
|
||||
blockRoot := verifiedSidecar.BlockRoot()
|
||||
index := verifiedSidecar.Index
|
||||
|
||||
// Add to the result map grouped by block root
|
||||
verifiedSidecarsByRoot[blockRoot] = append(verifiedSidecarsByRoot[blockRoot], verifiedSidecar)
|
||||
|
||||
if indices, ok := missingIndicesByRoot[blockRoot]; ok {
|
||||
delete(indices, index)
|
||||
if len(indices) == 0 {
|
||||
delete(missingIndicesByRoot, blockRoot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return missingIndicesByRoot, verifiedSidecarsByRoot
|
||||
}
|
||||
|
||||
// fetchDataColumnSidecarsFromPeers retrieves data column sidecars from peers.
|
||||
func fetchDataColumnSidecarsFromPeers(
|
||||
params DataColumnSidecarsParams,
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
indicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) map[goPeer.ID][]blocks.RODataColumn {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
mut sync.Mutex
|
||||
)
|
||||
|
||||
roDataColumnsByPeer := make(map[goPeer.ID][]blocks.RODataColumn)
|
||||
wg.Add(len(indicesByRootByPeer))
|
||||
for peerID, indicesByRoot := range indicesByRootByPeer {
|
||||
go func(peerID goPeer.ID, indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) {
|
||||
defer wg.Done()
|
||||
|
||||
requestedCount := 0
|
||||
for _, indices := range indicesByRoot {
|
||||
requestedCount += len(indices)
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": peerID,
|
||||
"agent": agentString(peerID, params.P2P.Host()),
|
||||
"blockCount": len(indicesByRoot),
|
||||
"totalRequestedCount": requestedCount,
|
||||
})
|
||||
|
||||
roDataColumns, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, peerID, indicesByRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Warning("Failed to send data column sidecars request")
|
||||
return
|
||||
}
|
||||
|
||||
mut.Lock()
|
||||
defer mut.Unlock()
|
||||
roDataColumnsByPeer[peerID] = roDataColumns
|
||||
}(peerID, indicesByRoot)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return roDataColumnsByPeer
|
||||
}
|
||||
|
||||
func sendDataColumnSidecarsRequest(
|
||||
params DataColumnSidecarsParams,
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
peerID goPeer.ID,
|
||||
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
const batchSize = 32
|
||||
|
||||
rootCount := int64(len(indicesByRoot))
|
||||
requestedSidecarsCount := 0
|
||||
for _, indices := range indicesByRoot {
|
||||
requestedSidecarsCount += len(indices)
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": peerID,
|
||||
"agent": agentString(peerID, params.P2P.Host()),
|
||||
"requestedSidecars": requestedSidecarsCount,
|
||||
})
|
||||
|
||||
// Try to build a by range byRangeRequest first.
|
||||
byRangeRequests, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "craft by range request")
|
||||
}
|
||||
|
||||
// If we have a valid by range request, send it.
|
||||
if len(byRangeRequests) > 0 {
|
||||
count := 0
|
||||
for _, indices := range indicesByRoot {
|
||||
count += len(indices)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, count)
|
||||
for _, request := range byRangeRequests {
|
||||
params.RateLimiter.Add(peerID.String(), rootCount)
|
||||
localRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(params, peerID, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "send data column sidecars by range request to peer %s", peerID)
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, localRoDataColumns...)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"respondedSidecars": len(roDataColumns),
|
||||
"requests": len(byRangeRequests),
|
||||
"type": "byRange",
|
||||
"duration": time.Since(start),
|
||||
}).Debug("Received data column sidecars")
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// Build identifiers for the by root request.
|
||||
byRootRequest := buildByRootRequest(indicesByRoot)
|
||||
|
||||
// Send the by root request.
|
||||
start := time.Now()
|
||||
params.RateLimiter.Add(peerID.String(), rootCount)
|
||||
roDataColumns, err := SendDataColumnSidecarsByRootRequest(params, peerID, byRootRequest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "send data column sidecars by root request to peer %s", peerID)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"respondedSidecars": len(roDataColumns),
|
||||
"requests": 1,
|
||||
"type": "byRoot",
|
||||
"duration": time.Since(start),
|
||||
}).Debug("Received data column sidecars")
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// buildByRangeRequests constructs a by range request from the given indices,
|
||||
// only if the indices are the same all blocks and if the blocks are contiguous.
|
||||
// (Missing blocks or blocks without commitments do count as contiguous)
|
||||
// If one of this condition is not met, returns nil.
|
||||
func buildByRangeRequests(
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
batchSize uint64,
|
||||
) ([]*ethpb.DataColumnSidecarsByRangeRequest, error) {
|
||||
if len(indicesByRoot) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var reference map[uint64]bool
|
||||
slots := make([]primitives.Slot, 0, len(slotByRoot))
|
||||
for root, indices := range indicesByRoot {
|
||||
if reference == nil {
|
||||
reference = indices
|
||||
}
|
||||
|
||||
if !compareIndices(reference, indices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
slot, ok := slotByRoot[root]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("slot not found for block root %#x", root)
|
||||
}
|
||||
|
||||
slots = append(slots, slot)
|
||||
}
|
||||
|
||||
slices.Sort(slots)
|
||||
|
||||
for i := 1; i < len(slots); i++ {
|
||||
previous, current := slots[i-1], slots[i]
|
||||
if current == previous+1 {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := previous + 1; j < current; j++ {
|
||||
if slotsWithCommitments[j] {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
columns := sortedSliceFromMap(reference)
|
||||
startSlot, endSlot := slots[0], slots[len(slots)-1]
|
||||
totalCount := uint64(endSlot - startSlot + 1)
|
||||
|
||||
requests := make([]*ethpb.DataColumnSidecarsByRangeRequest, 0, totalCount/batchSize)
|
||||
for start := startSlot; start <= endSlot; start += primitives.Slot(batchSize) {
|
||||
end := min(start+primitives.Slot(batchSize)-1, endSlot)
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: start,
|
||||
Count: uint64(end - start + 1),
|
||||
Columns: columns,
|
||||
}
|
||||
|
||||
requests = append(requests, request)
|
||||
}
|
||||
|
||||
return requests, nil
|
||||
}
|
||||
|
||||
// buildByRootRequest constructs a by root request from the given indices.
|
||||
func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) p2ptypes.DataColumnsByRootIdentifiers {
|
||||
identifiers := make(p2ptypes.DataColumnsByRootIdentifiers, 0, len(indicesByRoot))
|
||||
for root, indices := range indicesByRoot {
|
||||
identifier := ð.DataColumnsByRootIdentifier{
|
||||
BlockRoot: root[:],
|
||||
Columns: sortedSliceFromMap(indices),
|
||||
}
|
||||
identifiers = append(identifiers, identifier)
|
||||
}
|
||||
|
||||
// Sort identifiers to have a deterministic output.
|
||||
slices.SortFunc(identifiers, func(left, right *eth.DataColumnsByRootIdentifier) int {
|
||||
if cmp := bytes.Compare(left.BlockRoot, right.BlockRoot); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return slices.Compare(left.Columns, right.Columns)
|
||||
})
|
||||
|
||||
return identifiers
|
||||
}
|
||||
|
||||
// verifyDataColumnSidecarsByPeer verifies the received data column sidecars.
|
||||
// If at least one sidecar from a peer is invalid, the peer is downscored and
|
||||
// all its sidecars are rejected. (Sidecars from other peers are still accepted.)
|
||||
func verifyDataColumnSidecarsByPeer(
|
||||
p2p prysmP2P.P2P,
|
||||
newVerifier verification.NewDataColumnsVerifier,
|
||||
roDataColumnsByPeer map[goPeer.ID][]blocks.RODataColumn,
|
||||
) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// First optimistically verify all received data columns in a single batch.
|
||||
count := 0
|
||||
for _, columns := range roDataColumnsByPeer {
|
||||
count += len(columns)
|
||||
}
|
||||
|
||||
roDataColumnSidecars := make([]blocks.RODataColumn, 0, count)
|
||||
for _, columns := range roDataColumnsByPeer {
|
||||
roDataColumnSidecars = append(roDataColumnSidecars, columns...)
|
||||
}
|
||||
|
||||
verifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, roDataColumnSidecars)
|
||||
if err == nil {
|
||||
// This is the happy path where all sidecars are verified.
|
||||
return verifiedRoDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// An error occurred during verification, which means that at least one sidecar is invalid.
|
||||
// Reverify peer by peer to identify faulty peer(s), reject all its sidecars, and downscore it.
|
||||
verifiedRoDataColumnSidecars = make([]blocks.VerifiedRODataColumn, 0, count)
|
||||
for peer, columns := range roDataColumnsByPeer {
|
||||
peerVerifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, columns)
|
||||
if err != nil {
|
||||
// This peer has invalid sidecars.
|
||||
log := log.WithError(err).WithField("peerID", peer)
|
||||
newScore := p2p.Peers().Scorers().BadResponsesScorer().Increment(peer)
|
||||
log.Warning("Peer returned invalid data column sidecars")
|
||||
log.WithFields(logrus.Fields{"reason": "invalidDataColumnSidecars", "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, peerVerifiedRoDataColumnSidecars...)
|
||||
}
|
||||
|
||||
return verifiedRoDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// verifyByRootDataColumnSidecars verifies the provided read-only data columns against the
|
||||
// requirements for data column sidecars received via the by root request.
|
||||
func verifyByRootDataColumnSidecars(newVerifier verification.NewDataColumnsVerifier, roDataColumns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
verifier := newVerifier(roDataColumns, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return nil, errors.Wrap(err, "valid fields")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "verified RO data columns - should never happen")
|
||||
}
|
||||
|
||||
return verifiedRoDataColumns, nil
|
||||
}
|
||||
|
||||
// computeIndicesByRootByPeer returns a peers->root->indices map only for
|
||||
// root and indices given in `indicesByBlockRoot`. It also only selects peers
|
||||
// for a given root only if its head state is higher than the block slot.
|
||||
func computeIndicesByRootByPeer(
|
||||
p2p prysmP2P.P2P,
|
||||
slotByBlockRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
indicesByBlockRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
peers map[goPeer.ID]bool,
|
||||
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
// First, compute custody columns for all peers
|
||||
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
|
||||
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
|
||||
for peer := range peers {
|
||||
// Computes the custody columns for each peer
|
||||
nodeID, err := prysmP2P.ConvertPeerIDToNodeID(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "convert peer ID to node ID for peer %s", peer)
|
||||
}
|
||||
|
||||
custodyGroupCount := p2p.CustodyGroupCountFromPeer(peer)
|
||||
|
||||
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "peerdas info for peer %s", peer)
|
||||
}
|
||||
|
||||
for column := range dasInfo.CustodyColumns {
|
||||
if _, exists := peersByIndex[column]; !exists {
|
||||
peersByIndex[column] = make(map[goPeer.ID]bool)
|
||||
}
|
||||
peersByIndex[column][peer] = true
|
||||
}
|
||||
|
||||
// Compute the head slot for each peer
|
||||
peerChainState, err := p2p.Peers().ChainState(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get chain state for peer %s", peer)
|
||||
}
|
||||
|
||||
if peerChainState == nil {
|
||||
return nil, errors.Errorf("chain state is nil for peer %s", peer)
|
||||
}
|
||||
|
||||
headSlotByPeer[peer] = peerChainState.HeadSlot
|
||||
}
|
||||
|
||||
// For each block root and its indices, find suitable peers
|
||||
indicesByRootByPeer := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
for blockRoot, indices := range indicesByBlockRoot {
|
||||
blockSlot, ok := slotByBlockRoot[blockRoot]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("slot not found for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
for index := range indices {
|
||||
peers := peersByIndex[index]
|
||||
for peer := range peers {
|
||||
peerHeadSlot, ok := headSlotByPeer[peer]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("head slot not found for peer %s", peer)
|
||||
}
|
||||
|
||||
if peerHeadSlot < blockSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
// Build peers->root->indices map
|
||||
if _, exists := indicesByRootByPeer[peer]; !exists {
|
||||
indicesByRootByPeer[peer] = make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
}
|
||||
if _, exists := indicesByRootByPeer[peer][blockRoot]; !exists {
|
||||
indicesByRootByPeer[peer][blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
indicesByRootByPeer[peer][blockRoot][index] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return indicesByRootByPeer, nil
|
||||
}
|
||||
|
||||
// randomPeer selects a random peer. If no peers has enough bandwidth, it will wait and retry.
|
||||
// Returns the selected peer ID and any error.
|
||||
func randomPeer(
|
||||
ctx context.Context,
|
||||
randomSource *rand.Rand,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
count int,
|
||||
indicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (goPeer.ID, error) {
|
||||
const waitPeriod = 5 * time.Second
|
||||
|
||||
peerCount := len(indicesByRootByPeer)
|
||||
if peerCount == 0 {
|
||||
return "", errors.New("no peers available")
|
||||
}
|
||||
|
||||
for ctx.Err() == nil {
|
||||
nonRateLimitedPeers := make([]goPeer.ID, 0, len(indicesByRootByPeer))
|
||||
for peer := range indicesByRootByPeer {
|
||||
remaining := rateLimiter.Remaining(peer.String())
|
||||
if remaining >= int64(count) {
|
||||
nonRateLimitedPeers = append(nonRateLimitedPeers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
if len(nonRateLimitedPeers) == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerCount": peerCount,
|
||||
"delay": waitPeriod,
|
||||
}).Debug("Waiting for a peer with enough bandwidth for data column sidecars")
|
||||
time.Sleep(waitPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
randomIndex := randomSource.Intn(len(nonRateLimitedPeers))
|
||||
return nonRateLimitedPeers[randomIndex], nil
|
||||
}
|
||||
|
||||
return "", ctx.Err()
|
||||
}
|
||||
|
||||
// copyIndicesByRootByPeer creates a deep copy of the given nested map.
|
||||
// Returns a new map with the same structure and contents.
|
||||
func copyIndicesByRootByPeer(original map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool) map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool {
|
||||
copied := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
|
||||
for peer, indicesByRoot := range original {
|
||||
copied[peer] = copyIndicesByRoot(indicesByRoot)
|
||||
}
|
||||
|
||||
return copied
|
||||
}
|
||||
|
||||
// copyIndicesByRoot creates a deep copy of the given nested map.
|
||||
// Returns a new map with the same structure and contents.
|
||||
func copyIndicesByRoot(original map[[fieldparams.RootLength]byte]map[uint64]bool) map[[fieldparams.RootLength]byte]map[uint64]bool {
|
||||
copied := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
|
||||
for root, indexMap := range original {
|
||||
copied[root] = make(map[uint64]bool, len(indexMap))
|
||||
for index, value := range indexMap {
|
||||
copied[root][index] = value
|
||||
}
|
||||
}
|
||||
return copied
|
||||
}
|
||||
|
||||
// compareIndices compares two map[uint64]bool and returns true if they are equal.
|
||||
func compareIndices(left, right map[uint64]bool) bool {
|
||||
if len(left) != len(right) {
|
||||
return false
|
||||
}
|
||||
|
||||
for key, leftValue := range left {
|
||||
rightValue, exists := right[key]
|
||||
if !exists || leftValue != rightValue {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// sortedSliceFromMap converts a map[uint64]bool to a sorted slice of keys.
|
||||
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
slices.Sort(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// computeSlotByBlockRoot maps each block root to its corresponding slot.
|
||||
func computeSlotByBlockRoot(roBlocks []blocks.ROBlock) map[[fieldparams.RootLength]byte]primitives.Slot {
|
||||
slotByBlockRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, len(roBlocks))
|
||||
for _, roBlock := range roBlocks {
|
||||
slotByBlockRoot[roBlock.Root()] = roBlock.Block().Slot()
|
||||
}
|
||||
return slotByBlockRoot
|
||||
}
|
||||
|
||||
// computeTotalCount calculates the total count of indices across all roots.
|
||||
func computeTotalCount(input map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||
totalCount := 0
|
||||
for _, indices := range input {
|
||||
totalCount += len(indices)
|
||||
}
|
||||
return totalCount
|
||||
}
|
||||
984
beacon-chain/sync/data_column_sidecars_test.go
Normal file
984
beacon-chain/sync/data_column_sidecars_test.go
Normal file
@@ -0,0 +1,984 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
// Slot 1: All needed sidecars are available in storage
|
||||
// Slot 2: No commitment
|
||||
// Slot 3: All sidecars are saved excepted the needed ones
|
||||
// Slot 4: Some sidecars are in the storage, other have to be retrieved from peers.
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
const blobCount = 3
|
||||
indices := map[uint64]bool{31: true, 81: true, 106: true}
|
||||
|
||||
// Block 1
|
||||
block1, _, verifiedSidecars1 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(1))
|
||||
root1 := block1.Root()
|
||||
|
||||
toStore1 := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||
for index := range indices {
|
||||
sidecar := verifiedSidecars1[index]
|
||||
toStore1 = append(toStore1, sidecar)
|
||||
}
|
||||
|
||||
err = storage.Save(toStore1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 2
|
||||
block2, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(2))
|
||||
|
||||
// Block 3
|
||||
block3, _, verifiedSidecars3 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(3))
|
||||
root3 := block3.Root()
|
||||
|
||||
toStore3 := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns-uint64(len(indices)))
|
||||
for i := range numberOfColumns {
|
||||
if !indices[i] {
|
||||
sidecar := verifiedSidecars3[i]
|
||||
toStore3 = append(toStore3, sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
err = storage.Save(toStore3)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 4
|
||||
block4, _, verifiedSidecars4 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(4))
|
||||
root4 := block4.Root()
|
||||
toStore4 := []blocks.VerifiedRODataColumn{verifiedSidecars4[106]}
|
||||
|
||||
err = storage.Save(toStore4)
|
||||
require.NoError(t, err)
|
||||
|
||||
privateKeyBytes := [32]byte{1}
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Peers
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
p2p.Peers().SetConnectionState(other.PeerID(), peers.Connected)
|
||||
p2p.Connect(other)
|
||||
|
||||
p2p.Peers().SetChainState(other.PeerID(), ðpb.StatusV2{
|
||||
HeadSlot: 4,
|
||||
})
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 4,
|
||||
Count: 1,
|
||||
Columns: []uint64{31, 81},
|
||||
}
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
gs := startup.NewClockSynchronizer()
|
||||
err = gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[31].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
RateLimiter: leakybucket.NewCollector(1., 10, time.Second, false /* deleteEmptyBuckets */),
|
||||
CtxMap: ctxMap,
|
||||
Storage: storage,
|
||||
NewVerifier: newDataColumnsVerifier,
|
||||
}
|
||||
|
||||
expected := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||
root1: {verifiedSidecars1[31], verifiedSidecars1[81], verifiedSidecars1[106]},
|
||||
// no root2 (no commitments in this block)
|
||||
root3: {verifiedSidecars3[31], verifiedSidecars3[81], verifiedSidecars3[106]},
|
||||
root4: {verifiedSidecars4[31], verifiedSidecars4[81], verifiedSidecars4[106]},
|
||||
}
|
||||
|
||||
blocks := []blocks.ROBlock{block1, block2, block3, block4}
|
||||
actual, err := FetchDataColumnSidecars(params, blocks, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for root := range expected {
|
||||
require.Equal(t, len(expected[root]), len(actual[root]))
|
||||
for i := range expected[root] {
|
||||
require.DeepSSZEqual(t, expected[root][i], actual[root][i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCategorizeIndices(t *testing.T) {
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
|
||||
})
|
||||
|
||||
err := storage.Save(verifiedRoSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedToQuery := map[uint64]bool{13: true}
|
||||
expectedStored := map[uint64]bool{12: true, 14: true}
|
||||
|
||||
actualToQuery, actualStored := categorizeIndices(storage, verifiedRoSidecars[0].BlockRoot(), []uint64{12, 13, 14})
|
||||
|
||||
require.Equal(t, len(expectedToQuery), len(actualToQuery))
|
||||
require.Equal(t, len(expectedStored), len(actualStored))
|
||||
|
||||
for index := range expectedToQuery {
|
||||
require.Equal(t, true, actualToQuery[index])
|
||||
}
|
||||
for index := range expectedStored {
|
||||
require.Equal(t, true, actualStored[index])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelectPeers(t *testing.T) {
|
||||
const (
|
||||
count = 3
|
||||
seed = 46
|
||||
)
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
RateLimiter: leakybucket.NewCollector(1., 10, time.Second, false /* deleteEmptyBuckets */),
|
||||
}
|
||||
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
"peer1": {
|
||||
{1}: {12: true, 13: true},
|
||||
{2}: {13: true, 14: true, 15: true},
|
||||
{3}: {14: true, 15: true},
|
||||
},
|
||||
"peer2": {
|
||||
{1}: {13: true, 14: true},
|
||||
{2}: {13: true, 14: true, 15: true},
|
||||
{3}: {14: true, 16: true},
|
||||
},
|
||||
}
|
||||
|
||||
expected_1 := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
"peer1": {
|
||||
{1}: {12: true, 13: true},
|
||||
{2}: {13: true, 14: true, 15: true},
|
||||
{3}: {14: true, 15: true},
|
||||
},
|
||||
"peer2": {
|
||||
{1}: {14: true},
|
||||
{3}: {16: true},
|
||||
},
|
||||
}
|
||||
|
||||
expected_2 := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
"peer1": {
|
||||
{1}: {12: true},
|
||||
{3}: {15: true},
|
||||
},
|
||||
"peer2": {
|
||||
{1}: {13: true, 14: true},
|
||||
{2}: {13: true, 14: true, 15: true},
|
||||
{3}: {14: true, 16: true},
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := selectPeers(params, randomSource, count, indicesByRootByPeer)
|
||||
|
||||
expected := expected_1
|
||||
if len(actual["peer1"]) == 2 {
|
||||
expected = expected_2
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for peerID := range expected {
|
||||
require.Equal(t, len(expected[peerID]), len(actual[peerID]))
|
||||
for root := range expected[peerID] {
|
||||
require.Equal(t, len(expected[peerID][root]), len(actual[peerID][root]))
|
||||
for indices := range expected[peerID][root] {
|
||||
require.Equal(t, expected[peerID][root][indices], actual[peerID][root][indices])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateResults(t *testing.T) {
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
|
||||
})
|
||||
|
||||
missingIndicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
verifiedSidecars[0].BlockRoot(): {12: true, 13: true},
|
||||
verifiedSidecars[2].BlockRoot(): {13: true, 14: true, 15: true},
|
||||
}
|
||||
|
||||
expectedMissingIndicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
verifiedSidecars[2].BlockRoot(): {15: true},
|
||||
}
|
||||
|
||||
expectedVerifiedSidecarsByRoot := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||
verifiedSidecars[0].BlockRoot(): {verifiedSidecars[0], verifiedSidecars[1]},
|
||||
verifiedSidecars[2].BlockRoot(): {verifiedSidecars[2], verifiedSidecars[3]},
|
||||
}
|
||||
|
||||
actualMissingIndicesByRoot, actualVerifiedSidecarsByRoot := updateResults(verifiedSidecars, missingIndicesByRoot)
|
||||
require.DeepEqual(t, expectedMissingIndicesByRoot, actualMissingIndicesByRoot)
|
||||
require.DeepEqual(t, expectedVerifiedSidecarsByRoot, actualVerifiedSidecarsByRoot)
|
||||
}
|
||||
|
||||
func TestFetchDataColumnSidecarsFromPeers(t *testing.T) {
|
||||
const count = 4
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
expectedResponseSidecarPb := ðpb.DataColumnSidecar{
|
||||
Index: 2,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
expectedResponseSidecar, err := blocks.NewRODataColumn(expectedResponseSidecarPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{3}: 3,
|
||||
{4}: 4,
|
||||
{7}: 7,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
4: true,
|
||||
7: true,
|
||||
}
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 1,
|
||||
Count: 7,
|
||||
Columns: []uint64{1, 2},
|
||||
}
|
||||
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
|
||||
p2p.Connect(other)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
receivedRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, receivedRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponseSidecarPb)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
other.PeerID(): {
|
||||
{1}: {1: true, 2: true},
|
||||
{3}: {1: true, 2: true},
|
||||
{4}: {1: true, 2: true},
|
||||
{7}: {1: true, 2: true},
|
||||
},
|
||||
}
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
CtxMap: ctxMap,
|
||||
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
|
||||
}
|
||||
|
||||
expectedResponse := map[peer.ID][]blocks.RODataColumn{
|
||||
other.PeerID(): {expectedResponseSidecar},
|
||||
}
|
||||
|
||||
actualResponse := fetchDataColumnSidecarsFromPeers(params, slotByRoot, slotsWithCommitments, indicesByRootByPeer)
|
||||
require.Equal(t, len(expectedResponse), len(actualResponse))
|
||||
|
||||
for peerID := range expectedResponse {
|
||||
require.DeepSSZEqual(t, expectedResponse[peerID], actualResponse[peerID])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsRequest(t *testing.T) {
|
||||
const count = 4
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
expectedResponsePb := ðpb.DataColumnSidecar{
|
||||
Index: 2,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
expectedResponse, err := blocks.NewRODataColumn(expectedResponsePb)
|
||||
require.NoError(t, err)
|
||||
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("contiguous", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
{3}: {1: true, 2: true},
|
||||
{4}: {1: true, 2: true},
|
||||
{7}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{3}: 3,
|
||||
{4}: 4,
|
||||
{7}: 7,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
4: true,
|
||||
7: true,
|
||||
}
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 1,
|
||||
Count: 7,
|
||||
Columns: []uint64{1, 2},
|
||||
}
|
||||
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
|
||||
p2p.Connect(other)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
receivedRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, receivedRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponsePb)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
CtxMap: ctxMap,
|
||||
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
|
||||
}
|
||||
|
||||
actualResponse, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, other.PeerID(), indicesByRoot)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedResponse, actualResponse[0])
|
||||
})
|
||||
|
||||
t.Run("non contiguous", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
expectedResponse.BlockRoot(): {1: true, 2: true},
|
||||
{4}: {1: true, 2: true},
|
||||
{7}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
expectedResponse.BlockRoot(): 1,
|
||||
{4}: 4,
|
||||
{7}: 7,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
4: true,
|
||||
7: true,
|
||||
}
|
||||
|
||||
roots := [...][fieldparams.RootLength]byte{expectedResponse.BlockRoot(), {4}, {7}}
|
||||
|
||||
expectedRequest := &p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{
|
||||
BlockRoot: roots[1][:],
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
BlockRoot: roots[2][:],
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
BlockRoot: roots[0][:],
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
}
|
||||
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
|
||||
p2p.Connect(other)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
receivedRequest := new(p2ptypes.DataColumnsByRootIdentifiers)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, *expectedRequest, *receivedRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponsePb)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
CtxMap: ctxMap,
|
||||
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
|
||||
}
|
||||
|
||||
actualResponse, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, other.PeerID(), indicesByRoot)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedResponse, actualResponse[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildByRangeRequests(t *testing.T) {
|
||||
const nullBatchSize = 0
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
actual, err := buildByRangeRequests(nil, nil, nil, nullBatchSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 0, len(actual))
|
||||
})
|
||||
|
||||
t.Run("missing Root", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
_, err := buildByRangeRequests(nil, nil, indicesByRoot, nullBatchSize)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("indices differ", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
{2}: {1: true, 2: true},
|
||||
{3}: {2: true, 3: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{2}: 2,
|
||||
{3}: 3,
|
||||
}
|
||||
|
||||
actual, err := buildByRangeRequests(slotByRoot, nil, indicesByRoot, nullBatchSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(actual))
|
||||
})
|
||||
|
||||
t.Run("slots non contiguous", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
{2}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{2}: 3,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
3: true,
|
||||
}
|
||||
|
||||
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, nullBatchSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(actual))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const batchSize = 3
|
||||
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
{3}: {1: true, 2: true},
|
||||
{4}: {1: true, 2: true},
|
||||
{7}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{3}: 3,
|
||||
{4}: 4,
|
||||
{7}: 7,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
4: true,
|
||||
7: true,
|
||||
}
|
||||
|
||||
expected := []*ethpb.DataColumnSidecarsByRangeRequest{
|
||||
{
|
||||
StartSlot: 1,
|
||||
Count: 3,
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
StartSlot: 4,
|
||||
Count: 3,
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
StartSlot: 7,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildByRootRequest(t *testing.T) {
|
||||
root1 := [fieldparams.RootLength]byte{1}
|
||||
root2 := [fieldparams.RootLength]byte{2}
|
||||
|
||||
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
root1: {1: true, 2: true},
|
||||
root2: {3: true},
|
||||
}
|
||||
|
||||
expected := p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{
|
||||
BlockRoot: root1[:],
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
BlockRoot: root2[:],
|
||||
Columns: []uint64{3},
|
||||
},
|
||||
}
|
||||
|
||||
actual := buildByRootRequest(input)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarsByPeer(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const (
|
||||
start, stop = 0, 15
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
|
||||
// Setup test data and expectations
|
||||
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
|
||||
"peer1": roDataColumnSidecars[start:5],
|
||||
"peer2": roDataColumnSidecars[5:9],
|
||||
"peer3": roDataColumnSidecars[9:stop],
|
||||
}
|
||||
gs := startup.NewClockSynchronizer()
|
||||
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, stop-start, len(actual))
|
||||
|
||||
for i := range actual {
|
||||
actualSidecar := actual[i]
|
||||
index := actualSidecar.Index
|
||||
expectedSidecar := expected[index]
|
||||
require.DeepEqual(t, expectedSidecar, actualSidecar)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("one rogue peer", func(t *testing.T) {
|
||||
const (
|
||||
start, middle, stop = 0, 5, 15
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
|
||||
// Setup test data and expectations
|
||||
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
// Modify one sidecar to ensure proof verification fails.
|
||||
if roDataColumnSidecars[middle].KzgProofs[0][0] == 0 {
|
||||
roDataColumnSidecars[middle].KzgProofs[0][0]++
|
||||
} else {
|
||||
roDataColumnSidecars[middle].KzgProofs[0][0]--
|
||||
}
|
||||
|
||||
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
|
||||
"peer1": roDataColumnSidecars[start:middle],
|
||||
"peer2": roDataColumnSidecars[5:middle],
|
||||
"peer3": roDataColumnSidecars[middle:stop],
|
||||
}
|
||||
gs := startup.NewClockSynchronizer()
|
||||
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, middle-start, len(actual))
|
||||
|
||||
for i := range actual {
|
||||
actualSidecar := actual[i]
|
||||
index := actualSidecar.Index
|
||||
expectedSidecar := expected[index]
|
||||
require.DeepEqual(t, expectedSidecar, actualSidecar)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeIndicesByRootByPeer(t *testing.T) {
|
||||
peerIdStrs := []string{
|
||||
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq", // Custodies 89, 94, 97 & 122
|
||||
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf", // Custodies 1, 11, 37 & 86
|
||||
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8", // Custodies 2, 37, 38 & 68
|
||||
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx", // Custodies 10, 29, 36 & 108
|
||||
}
|
||||
|
||||
headSlotByPeer := map[string]primitives.Slot{
|
||||
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq": 89,
|
||||
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf": 10,
|
||||
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8": 12,
|
||||
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx": 9,
|
||||
}
|
||||
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
peers := p2p.Peers()
|
||||
|
||||
peerIDs := make([]peer.ID, 0, len(peerIdStrs))
|
||||
for _, peerIdStr := range peerIdStrs {
|
||||
peerID, err := peer.Decode(peerIdStr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peers.SetChainState(peerID, ðpb.StatusV2{
|
||||
HeadSlot: headSlotByPeer[peerIdStr],
|
||||
})
|
||||
|
||||
peerIDs = append(peerIDs, peerID)
|
||||
}
|
||||
|
||||
slotByBlockRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
[fieldparams.RootLength]byte{1}: 8,
|
||||
[fieldparams.RootLength]byte{2}: 10,
|
||||
[fieldparams.RootLength]byte{3}: 9,
|
||||
[fieldparams.RootLength]byte{4}: 50,
|
||||
}
|
||||
|
||||
indicesByBlockRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
[fieldparams.RootLength]byte{1}: {3: true, 4: true, 5: true},
|
||||
[fieldparams.RootLength]byte{2}: {1: true, 10: true, 37: true, 80: true},
|
||||
[fieldparams.RootLength]byte{3}: {10: true, 38: true, 39: true, 40: true},
|
||||
[fieldparams.RootLength]byte{4}: {89: true, 108: true, 122: true},
|
||||
}
|
||||
|
||||
expected := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
peerIDs[0]: {
|
||||
[fieldparams.RootLength]byte{4}: {89: true, 122: true},
|
||||
},
|
||||
peerIDs[1]: {
|
||||
[fieldparams.RootLength]byte{2}: {1: true, 37: true},
|
||||
},
|
||||
peerIDs[2]: {
|
||||
[fieldparams.RootLength]byte{2}: {37: true},
|
||||
[fieldparams.RootLength]byte{3}: {38: true},
|
||||
},
|
||||
peerIDs[3]: {
|
||||
[fieldparams.RootLength]byte{3}: {10: true},
|
||||
},
|
||||
}
|
||||
|
||||
peerIDsMap := make(map[peer.ID]bool, len(peerIDs))
|
||||
for _, id := range peerIDs {
|
||||
peerIDsMap[id] = true
|
||||
}
|
||||
|
||||
actual, err := computeIndicesByRootByPeer(p2p, slotByBlockRoot, indicesByBlockRoot, peerIDsMap)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
|
||||
for peer, indicesByRoot := range expected {
|
||||
require.Equal(t, len(indicesByRoot), len(actual[peer]))
|
||||
for root, indices := range indicesByRoot {
|
||||
require.Equal(t, len(indices), len(actual[peer][root]))
|
||||
for index := range indices {
|
||||
require.Equal(t, actual[peer][root][index], true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandomPeer(t *testing.T) {
|
||||
// Fixed seed.
|
||||
const seed = 42
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
t.Run("no peers", func(t *testing.T) {
|
||||
pid, err := randomPeer(t.Context(), randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, nil)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, peer.ID(""), pid)
|
||||
})
|
||||
|
||||
t.Run("context cancelled", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{peer.ID("peer1"): {}}
|
||||
pid, err := randomPeer(ctx, randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, indicesByRootByPeer)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, peer.ID(""), pid)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const count = 1
|
||||
collector := leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */)
|
||||
peer1, peer2, peer3 := peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3")
|
||||
|
||||
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
peer1: {},
|
||||
peer2: {},
|
||||
peer3: {},
|
||||
}
|
||||
|
||||
pid, err := randomPeer(t.Context(), randomSource, collector, count, indicesByRootByPeer)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, map[peer.ID]bool{peer1: true, peer2: true, peer3: true}[pid])
|
||||
})
|
||||
}
|
||||
|
||||
func TestCopyIndicesByRootByPeer(t *testing.T) {
|
||||
original := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
peer.ID("peer1"): {
|
||||
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
|
||||
[fieldparams.RootLength]byte{2}: {2: true},
|
||||
},
|
||||
peer.ID("peer2"): {
|
||||
[fieldparams.RootLength]byte{1}: {1: true},
|
||||
},
|
||||
}
|
||||
|
||||
copied := copyIndicesByRootByPeer(original)
|
||||
|
||||
require.Equal(t, len(original), len(copied))
|
||||
for peer, indicesByRoot := range original {
|
||||
require.Equal(t, len(indicesByRoot), len(copied[peer]))
|
||||
for root, indices := range indicesByRoot {
|
||||
require.Equal(t, len(indices), len(copied[peer][root]))
|
||||
for index := range indices {
|
||||
require.Equal(t, copied[peer][root][index], true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareIndices(t *testing.T) {
|
||||
left := map[uint64]bool{3: true, 5: true, 7: true}
|
||||
right := map[uint64]bool{5: true}
|
||||
require.Equal(t, false, compareIndices(left, right))
|
||||
|
||||
left = map[uint64]bool{3: true, 5: true, 7: true}
|
||||
right = map[uint64]bool{3: true, 6: true, 7: true}
|
||||
require.Equal(t, false, compareIndices(left, right))
|
||||
|
||||
left = map[uint64]bool{3: true, 5: true, 7: true}
|
||||
right = map[uint64]bool{5: true, 7: true, 3: true}
|
||||
require.Equal(t, true, compareIndices(left, right))
|
||||
}
|
||||
|
||||
func TestSlortedSliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{54: true, 23: true, 35: true}
|
||||
expected := []uint64{23, 35, 54}
|
||||
actual := sortedSliceFromMap(input)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestComputeSlotByBlockRoot(t *testing.T) {
|
||||
const (
|
||||
count = 3
|
||||
multiplier = 10
|
||||
)
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, count)
|
||||
for i := range count {
|
||||
signedBlock := util.NewBeaconBlock()
|
||||
signedBlock.Block.Slot = primitives.Slot(i).Mul(multiplier)
|
||||
roSignedBlock, err := blocks.NewSignedBeaconBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, [fieldparams.RootLength]byte{byte(i)})
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlocks = append(roBlocks, roBlock)
|
||||
}
|
||||
|
||||
expected := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
[fieldparams.RootLength]byte{0}: primitives.Slot(0),
|
||||
[fieldparams.RootLength]byte{1}: primitives.Slot(10),
|
||||
[fieldparams.RootLength]byte{2}: primitives.Slot(20),
|
||||
}
|
||||
|
||||
actual := computeSlotByBlockRoot(roBlocks)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for k, v := range expected {
|
||||
require.Equal(t, v, actual[k])
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeTotalCount(t *testing.T) {
|
||||
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
|
||||
[fieldparams.RootLength]byte{2}: {2: true},
|
||||
}
|
||||
|
||||
const expected = 3
|
||||
actual := computeTotalCount(input)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
936
beacon-chain/sync/data_columns.go
Normal file
936
beacon-chain/sync/data_columns.go
Normal file
@@ -0,0 +1,936 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RequestDataColumnSidecarsByRoot is an opinionated, high level function which, for each data column in `dataColumnsToFetch`:
|
||||
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
|
||||
// - Request the data column sidecars from the selected peers.
|
||||
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
|
||||
//
|
||||
// This function:
|
||||
// - returns on success when all the initially missing sidecars in `dataColumnsToFetch` are retrieved, or
|
||||
// - returns an error if all peers in `peers` are exhausted and at least one data column sidecar is still missing.
|
||||
//
|
||||
// TODO: In case at least one column is still missing after peer exhaustion,
|
||||
//
|
||||
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
|
||||
func RequestDataColumnSidecarsByRoot(
|
||||
ctx context.Context,
|
||||
dataColumnsToFetch []uint64,
|
||||
block blocks.ROBlock,
|
||||
peers []core.PeerID,
|
||||
clock *startup.Clock,
|
||||
p2p p2p.P2P,
|
||||
ctxMap ContextByteVersions,
|
||||
newColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(dataColumnsToFetch) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Assemble the peers who can provide the needed data columns.
|
||||
dataColumnsByAdmissiblePeer, _, _, err := AdmissiblePeersForDataColumns(peers, dataColumnsToFetch, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get admissible peers for data columns")
|
||||
}
|
||||
|
||||
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnsToFetch))
|
||||
remainingMissingColumns := make(map[uint64]bool, len(dataColumnsToFetch))
|
||||
for _, column := range dataColumnsToFetch {
|
||||
remainingMissingColumns[column] = true
|
||||
}
|
||||
|
||||
blockRoot := block.Root()
|
||||
|
||||
for len(dataColumnsByAdmissiblePeer) > 0 {
|
||||
peersToFetchFrom, err := SelectPeersToFetchDataColumnsFrom(sliceFromMap(remainingMissingColumns, true /*sorted*/), dataColumnsByAdmissiblePeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers to fetch data columns from")
|
||||
}
|
||||
|
||||
// Request the data columns from each peer.
|
||||
successfulColumns := make(map[uint64]bool, len(remainingMissingColumns))
|
||||
for peer, peerRequestedColumns := range peersToFetchFrom {
|
||||
log := log.WithFields(logrus.Fields{"peer": peer.String(), "blockRoot": fmt.Sprintf("%#x", blockRoot)})
|
||||
|
||||
// Build the requests for the data columns.
|
||||
byRootRequest := ð.DataColumnsByRootIdentifier{BlockRoot: blockRoot[:], Columns: peerRequestedColumns}
|
||||
|
||||
// Send the requests to the peer.
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: ctx,
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
CtxMap: ctxMap,
|
||||
}
|
||||
peerSidecars, err := SendDataColumnSidecarsByRootRequest(params, peer, types.DataColumnsByRootIdentifiers{byRootRequest})
|
||||
if err != nil {
|
||||
// Remove this peer since it failed to respond correctly.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": peer.String(),
|
||||
"blockRoot": fmt.Sprintf("%#x", block.Root()),
|
||||
}).WithError(err).Debug("Failed to request data columns from peer")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if returned data columns align with the block.
|
||||
if err := peerdas.DataColumnsAlignWithBlock(block, peerSidecars); err != nil {
|
||||
// Remove this peer since it failed to respond correctly.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Align with block failed")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the received sidecars.
|
||||
verifier := newColumnsVerifier(peerSidecars, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Valid verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Sidecar inclusion proof verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Sidecar KZG proof verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
verifiedPeerSidecars, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
return nil, errors.Wrap(err, "verified data columns")
|
||||
}
|
||||
|
||||
// Mark columns as successful
|
||||
for _, sidecar := range verifiedPeerSidecars {
|
||||
successfulColumns[sidecar.Index] = true
|
||||
}
|
||||
|
||||
// Check if all requested columns were successfully returned.
|
||||
peerMissingColumns := make(map[uint64]bool)
|
||||
for _, index := range peerRequestedColumns {
|
||||
if !successfulColumns[index] {
|
||||
peerMissingColumns[index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(peerMissingColumns) > 0 {
|
||||
// Remove this peer if some requested columns were not correctly returned.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithField("missingColumns", sliceFromMap(peerMissingColumns, true /*sorted*/)).Debug("Peer did not provide all requested data columns")
|
||||
}
|
||||
|
||||
verifiedSidecars = append(verifiedSidecars, verifiedPeerSidecars...)
|
||||
}
|
||||
|
||||
// Update remaining columns for the next retry.
|
||||
for col := range successfulColumns {
|
||||
delete(remainingMissingColumns, col)
|
||||
}
|
||||
|
||||
if len(remainingMissingColumns) > 0 {
|
||||
// Some columns are still missing, retry with the remaining peers.
|
||||
continue
|
||||
}
|
||||
|
||||
return verifiedSidecars, nil
|
||||
}
|
||||
|
||||
// If we still have remaining columns after all retries, return error
|
||||
return nil, errors.Errorf("failed to retrieve all requested data columns after retries for block root=%#x, missing columns=%v", blockRoot, sliceFromMap(remainingMissingColumns, true /*sorted*/))
|
||||
}
|
||||
|
||||
// RequestMissingDataColumnsByRange is an opinionated, high level function which, for each block in `blks`:
|
||||
// - Computes all data column sidecars we should store and which are missing (according to our node ID and `groupCount`),
|
||||
// - Builds an optimized set of data column sidecars by range requests in order to never request a data column that is already stored in the DB,
|
||||
// and in order to minimize the number of total requests, while not exceeding `batchSize` sidecars per requests.
|
||||
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
|
||||
// - Request the data column sidecars from the selected peers.
|
||||
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
|
||||
//
|
||||
// This function:
|
||||
// - returns on success when all the initially missing sidecars for `blks` are retrieved, or
|
||||
// - returns an error if no progress at all is made after 5 consecutives trials.
|
||||
// (If at least one additional data column sidecar is retrieved between two trials, the counter is reset.)
|
||||
//
|
||||
// In case of success, initially missing data columns grouped by block root are returned.
|
||||
// This function expects blocks to be sorted by slot.
|
||||
//
|
||||
// TODO: In case at least one column is still missing after all allowed retries,
|
||||
//
|
||||
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
|
||||
func RequestMissingDataColumnsByRange(
|
||||
ctx context.Context,
|
||||
clock *startup.Clock,
|
||||
ctxMap ContextByteVersions,
|
||||
p2p p2p.P2P,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
groupCount uint64,
|
||||
dataColumnsStorage filesystem.DataColumnStorageReader,
|
||||
blks []blocks.ROBlock,
|
||||
batchSize int,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.RODataColumn, error) {
|
||||
const maxAllowedStall = 5 // Number of trials before giving up.
|
||||
|
||||
if len(blks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the current slot.
|
||||
currentSlot := clock.CurrentSlot()
|
||||
|
||||
// Compute the minimum slot for which we should serve data columns.
|
||||
minimumSlot, err := dataColumnsRPCMinValidSlot(currentSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns RPC min valid slot")
|
||||
}
|
||||
|
||||
// Get blocks by root and compute all missing columns by root.
|
||||
blockByRoot := make(map[[fieldparams.RootLength]byte]blocks.ROBlock, len(blks))
|
||||
missingColumnsByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(blks))
|
||||
for _, blk := range blks {
|
||||
// Extract the block root and the block slot
|
||||
blockRoot, blockSlot := blk.Root(), blk.Block().Slot()
|
||||
|
||||
// Populate the block by root.
|
||||
blockByRoot[blockRoot] = blk
|
||||
|
||||
// Skip blocks that are not in the retention period.
|
||||
if blockSlot < minimumSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
missingColumns, err := MissingDataColumns(blk, p2p.NodeID(), groupCount, dataColumnsStorage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
for _, column := range missingColumns {
|
||||
if _, ok := missingColumnsByRoot[blockRoot]; !ok {
|
||||
missingColumnsByRoot[blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
missingColumnsByRoot[blockRoot][column] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if there are no missing data columns.
|
||||
if len(missingColumnsByRoot) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the number of missing data columns.
|
||||
previousMissingDataColumnsCount := itemsCount(missingColumnsByRoot)
|
||||
|
||||
// Count the number of retries for the same amount of missing data columns.
|
||||
stallCount := 0
|
||||
|
||||
// Add log fields.
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"initialMissingColumnsCount": previousMissingDataColumnsCount,
|
||||
"blockCount": len(blks),
|
||||
"firstSlot": blks[0].Block().Slot(),
|
||||
"lastSlot": blks[len(blks)-1].Block().Slot(),
|
||||
})
|
||||
|
||||
// Log the start of the process.
|
||||
start := time.Now()
|
||||
log.Debug("Requesting data column sidecars - start")
|
||||
|
||||
alignedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn, len(blks))
|
||||
for len(missingColumnsByRoot) > 0 {
|
||||
// Build requests.
|
||||
requests, err := buildDataColumnByRangeRequests(blks, missingColumnsByRoot, batchSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "build data column by range requests")
|
||||
}
|
||||
|
||||
// Requests data column sidecars from peers.
|
||||
retrievedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn)
|
||||
for _, request := range requests {
|
||||
roDataColumns, err := fetchDataColumnsFromPeers(ctx, clock, p2p, rateLimiter, ctxMap, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fetch data columns from peers")
|
||||
}
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
root := roDataColumn.BlockRoot()
|
||||
if _, ok := blockByRoot[root]; !ok {
|
||||
// It may happen if the peer which sent the data columns is on a different fork.
|
||||
continue
|
||||
}
|
||||
|
||||
retrievedDataColumnsByRoot[root] = append(retrievedDataColumnsByRoot[root], roDataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
for root, dataColumns := range retrievedDataColumnsByRoot {
|
||||
// Retrieve the block from the root.
|
||||
block, ok := blockByRoot[root]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found - this should never happen")
|
||||
}
|
||||
|
||||
// Check if the data columns align with blocks.
|
||||
if err := peerdas.DataColumnsAlignWithBlock(block, dataColumns); err != nil {
|
||||
log.WithField("root", root).WithError(err).Debug("Data columns do not align with block")
|
||||
continue
|
||||
}
|
||||
|
||||
alignedDataColumnsByRoot[root] = append(alignedDataColumnsByRoot[root], dataColumns...)
|
||||
|
||||
// Remove aligned data columns from the missing columns.
|
||||
for _, dataColumn := range dataColumns {
|
||||
delete(missingColumnsByRoot[root], dataColumn.Index)
|
||||
if len(missingColumnsByRoot[root]) == 0 {
|
||||
delete(missingColumnsByRoot, root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
missingDataColumnsCount := itemsCount(missingColumnsByRoot)
|
||||
if missingDataColumnsCount == previousMissingDataColumnsCount {
|
||||
stallCount++
|
||||
} else {
|
||||
stallCount = 0
|
||||
}
|
||||
|
||||
previousMissingDataColumnsCount = missingDataColumnsCount
|
||||
|
||||
if missingDataColumnsCount > 0 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"remainingMissingColumnsCount": missingDataColumnsCount,
|
||||
"stallCount": stallCount,
|
||||
"maxAllowedStall": maxAllowedStall,
|
||||
})
|
||||
|
||||
if stallCount >= maxAllowedStall {
|
||||
// It is very likely `bwbs` contains orphaned blocks, for which no peer has the data columns.
|
||||
// We give up and let the state machine handle the situation.
|
||||
const message = "Requesting data column sidecars - no progress, giving up"
|
||||
log.Warning(message)
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"remainingMissingColumnsCount": missingDataColumnsCount,
|
||||
"stallCount": stallCount,
|
||||
}).Debug("Requesting data column sidecars - continue")
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("duration", time.Since(start)).Debug("Requesting data column sidecars - success")
|
||||
return alignedDataColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// MissingDataColumns looks at the data columns we should store for a given block regarding `custodyGroupCount`,
|
||||
// and returns the indices of the missing ones.
|
||||
func MissingDataColumns(block blocks.ROBlock, nodeID enode.ID, custodyGroupCount uint64, dataColumnStorage filesystem.DataColumnStorageReader) ([]uint64, error) {
|
||||
// Blocks before Fulu have no data columns.
|
||||
if block.Version() < version.Fulu {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the blob commitments from the block.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Nothing to build if there are no commitments.
|
||||
if len(commitments) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the expected columns.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
expectedColumns := peerInfo.CustodyColumns
|
||||
|
||||
// Get the stored columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
summary := dataColumnStorage.Summary(block.Root())
|
||||
|
||||
storedColumns := make(map[uint64]bool, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if summary.HasIndex(i) {
|
||||
storedColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the missing columns.
|
||||
missingColumns := make([]uint64, 0, len(expectedColumns))
|
||||
for column := range expectedColumns {
|
||||
if !storedColumns[column] {
|
||||
missingColumns = append(missingColumns, column)
|
||||
}
|
||||
}
|
||||
|
||||
return missingColumns, nil
|
||||
}
|
||||
|
||||
// SelectPeersToFetchDataColumnsFrom implements greedy algorithm in order to select peers to fetch data columns from.
|
||||
// https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm
|
||||
func SelectPeersToFetchDataColumnsFrom(neededDataColumns []uint64, dataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID][]uint64, error) {
|
||||
// Copy the provided needed data columns into a set that we will remove elements from.
|
||||
remainingDataColumns := make(map[uint64]bool, len(neededDataColumns))
|
||||
for _, dataColumn := range neededDataColumns {
|
||||
remainingDataColumns[dataColumn] = true
|
||||
}
|
||||
|
||||
dataColumnsFromSelectedPeers := make(map[peer.ID][]uint64)
|
||||
|
||||
// Filter `dataColumnsByPeer` to only contain needed data columns.
|
||||
neededDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(dataColumnsByPeer))
|
||||
for pid, dataColumns := range dataColumnsByPeer {
|
||||
for dataColumn := range dataColumns {
|
||||
if remainingDataColumns[dataColumn] {
|
||||
if _, ok := neededDataColumnsByPeer[pid]; !ok {
|
||||
neededDataColumnsByPeer[pid] = make(map[uint64]bool, len(neededDataColumns))
|
||||
}
|
||||
|
||||
neededDataColumnsByPeer[pid][dataColumn] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
for len(remainingDataColumns) > 0 {
|
||||
// Check if at least one peer remains. If not, it means that we don't have enough peers to fetch all needed data columns.
|
||||
if len(neededDataColumnsByPeer) == 0 {
|
||||
missingDataColumnsSortedSlice := sliceFromMap(remainingDataColumns, true /*sorted*/)
|
||||
return dataColumnsFromSelectedPeers, errors.Errorf("no peer to fetch the following data columns: %v", missingDataColumnsSortedSlice)
|
||||
}
|
||||
|
||||
// Select the peer that custody the most needed data columns (greedy selection).
|
||||
var bestPeer peer.ID
|
||||
for peer, dataColumns := range neededDataColumnsByPeer {
|
||||
if len(dataColumns) > len(neededDataColumnsByPeer[bestPeer]) {
|
||||
bestPeer = peer
|
||||
}
|
||||
}
|
||||
|
||||
dataColumnsSortedSlice := sliceFromMap(neededDataColumnsByPeer[bestPeer], true /*sorted*/)
|
||||
if uint64(len(dataColumnsSortedSlice)) > maxRequestDataColumnSidecars {
|
||||
dataColumnsSortedSlice = dataColumnsSortedSlice[:maxRequestDataColumnSidecars]
|
||||
}
|
||||
dataColumnsFromSelectedPeers[bestPeer] = dataColumnsSortedSlice
|
||||
|
||||
// Remove the selected peer from the list of peers.
|
||||
delete(neededDataColumnsByPeer, bestPeer)
|
||||
|
||||
// Remove the selected peer's data columns from the list of remaining data columns.
|
||||
for _, dataColumn := range dataColumnsSortedSlice {
|
||||
delete(remainingDataColumns, dataColumn)
|
||||
}
|
||||
|
||||
// Remove the selected peer's data columns from the list of needed data columns by peer.
|
||||
for _, dataColumn := range dataColumnsSortedSlice {
|
||||
for peer, dataColumns := range neededDataColumnsByPeer {
|
||||
delete(dataColumns, dataColumn)
|
||||
|
||||
if len(dataColumns) == 0 {
|
||||
delete(neededDataColumnsByPeer, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dataColumnsFromSelectedPeers, nil
|
||||
}
|
||||
|
||||
// AdmissiblePeersForCustodyGroup returns a map of peers that custody at least one custody group listed in `neededCustodyGroups`.
|
||||
//
|
||||
// It returns:
|
||||
// - A map, where the key of the map is the peer, the value is the custody groups of the peer.
|
||||
// - A map, where the key of the map is the custody group, the value is a list of peers that custody the group.
|
||||
// - A slice of descriptions for non admissible peers.
|
||||
// - An error if any.
|
||||
//
|
||||
// NOTE: distributeSamplesToPeer from the DataColumnSampler implements similar logic,
|
||||
// but with only one column queried in each request.
|
||||
func AdmissiblePeersForDataColumns(
|
||||
peers []peer.ID,
|
||||
neededDataColumns []uint64,
|
||||
p2p p2p.P2P,
|
||||
) (map[peer.ID]map[uint64]bool, map[uint64][]peer.ID, []string, error) {
|
||||
peerCount := len(peers)
|
||||
neededDataColumnsCount := uint64(len(neededDataColumns))
|
||||
|
||||
// Create description slice for non admissible peers.
|
||||
descriptions := make([]string, 0, peerCount)
|
||||
|
||||
// Compute custody columns for each peer.
|
||||
dataColumnsByPeer, err := custodyColumnsFromPeers(p2p, peers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "custody columns from peers")
|
||||
}
|
||||
|
||||
// Filter peers which custody at least one needed data column.
|
||||
dataColumnsByAdmissiblePeer, localDescriptions := filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns, dataColumnsByPeer)
|
||||
descriptions = append(descriptions, localDescriptions...)
|
||||
|
||||
// Compute a map from needed data columns to their peers.
|
||||
admissiblePeersByDataColumn := make(map[uint64][]peer.ID, neededDataColumnsCount)
|
||||
for peerId, peerDataColumns := range dataColumnsByAdmissiblePeer {
|
||||
for _, dataColumn := range neededDataColumns {
|
||||
if peerDataColumns[dataColumn] {
|
||||
admissiblePeersByDataColumn[dataColumn] = append(admissiblePeersByDataColumn[dataColumn], peerId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, descriptions, nil
|
||||
}
|
||||
|
||||
// custodyColumnsFromPeers computes all the custody columns indexed by peer.
|
||||
func custodyColumnsFromPeers(p2pIface p2p.P2P, peers []peer.ID) (map[peer.ID]map[uint64]bool, error) {
|
||||
peerCount := len(peers)
|
||||
|
||||
custodyColumnsByPeer := make(map[peer.ID]map[uint64]bool, peerCount)
|
||||
for _, peer := range peers {
|
||||
// Get the node ID from the peer ID.
|
||||
nodeID, err := p2p.ConvertPeerIDToNodeID(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert peer ID to node ID")
|
||||
}
|
||||
|
||||
// Get the custody group count of the peer.
|
||||
custodyGroupCount := p2pIface.CustodyGroupCountFromPeer(peer)
|
||||
|
||||
// Get peerdas info of the peer.
|
||||
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peerdas info")
|
||||
}
|
||||
|
||||
custodyColumnsByPeer[peer] = dasInfo.CustodyColumns
|
||||
}
|
||||
|
||||
return custodyColumnsByPeer, nil
|
||||
}
|
||||
|
||||
// `filterPeerWhichCustodyAtLeastOneDataColumn` filters peers which custody at least one data column
|
||||
// specified in `neededDataColumns`. It returns also a list of descriptions for non admissible peers.
|
||||
func filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns []uint64, inputDataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID]map[uint64]bool, []string) {
|
||||
// Create pretty needed data columns for logs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
outputDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(inputDataColumnsByPeer))
|
||||
descriptions := make([]string, 0)
|
||||
|
||||
outerLoop:
|
||||
for peer, peerCustodyDataColumns := range inputDataColumnsByPeer {
|
||||
for _, neededDataColumn := range neededDataColumns {
|
||||
if peerCustodyDataColumns[neededDataColumn] {
|
||||
outputDataColumnsByPeer[peer] = peerCustodyDataColumns
|
||||
|
||||
continue outerLoop
|
||||
}
|
||||
}
|
||||
|
||||
peerCustodyColumnsCount := uint64(len(peerCustodyDataColumns))
|
||||
var peerCustodyColumnsLog interface{} = "all"
|
||||
|
||||
if peerCustodyColumnsCount < numberOfColumns {
|
||||
peerCustodyColumnsLog = sliceFromMap(peerCustodyDataColumns, true /*sorted*/)
|
||||
}
|
||||
|
||||
description := fmt.Sprintf("peer %s: does not custody any needed column, custody columns: %v", peer, peerCustodyColumnsLog)
|
||||
descriptions = append(descriptions, description)
|
||||
}
|
||||
|
||||
return outputDataColumnsByPeer, descriptions
|
||||
}
|
||||
|
||||
// buildDataColumnByRangeRequests builds an optimized slices of data column by range requests:
|
||||
// 1. It will never request a data column that is already stored in the DB if there is no "hole" in `roBlocks` other than missed slots.
|
||||
// 2. It will minimize the number of requests.
|
||||
// It expects blocks to be sorted by slot.
|
||||
func buildDataColumnByRangeRequests(roBlocks []blocks.ROBlock, missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool, batchSize int) ([]*eth.DataColumnSidecarsByRangeRequest, error) {
|
||||
batchSizeSlot := primitives.Slot(batchSize)
|
||||
|
||||
// Return early if there are no blocks to process.
|
||||
if len(roBlocks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// It's safe to get the first item of the slice since we've already checked that it's not empty.
|
||||
firstROBlock, lastROBlock := roBlocks[0], roBlocks[len(roBlocks)-1]
|
||||
firstBlockSlot, lastBlockSlot := firstROBlock.Block().Slot(), lastROBlock.Block().Slot()
|
||||
firstBlockRoot := firstROBlock.Root()
|
||||
|
||||
previousMissingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[firstBlockRoot]))
|
||||
|
||||
if missing, ok := missingColumnsByRoot[firstBlockRoot]; ok {
|
||||
for key, value := range missing {
|
||||
previousMissingDataColumns[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
previousBlockSlot, previousStartBlockSlot := firstBlockSlot, firstBlockSlot
|
||||
|
||||
result := make([]*eth.DataColumnSidecarsByRangeRequest, 0, 1)
|
||||
for index := 1; index < len(roBlocks); index++ {
|
||||
roBlock := roBlocks[index]
|
||||
|
||||
// Extract the block from the RO-block.
|
||||
block := roBlock.Block()
|
||||
|
||||
// Extract the slot from the block.
|
||||
blockRoot, blockSlot := roBlock.Root(), block.Slot()
|
||||
|
||||
if blockSlot <= previousBlockSlot {
|
||||
return nil, errors.Errorf("blocks are not strictly sorted by slot. Previous block slot: %d, current block slot: %d", previousBlockSlot, blockSlot)
|
||||
}
|
||||
|
||||
// Extract KZG commitments count from the current block body
|
||||
blockKzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the count of KZG commitments.
|
||||
blockKzgCommitmentCount := len(blockKzgCommitments)
|
||||
|
||||
// Skip blocks without commitments.
|
||||
if blockKzgCommitmentCount == 0 {
|
||||
previousBlockSlot = blockSlot
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the missing data columns for the current block.
|
||||
missingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[blockRoot]))
|
||||
for key, value := range missingColumnsByRoot[blockRoot] {
|
||||
missingDataColumns[key] = value
|
||||
}
|
||||
|
||||
// Compute if the missing data columns differ.
|
||||
missingDataColumnsDiffer := uint64MapDiffer(previousMissingDataColumns, missingDataColumns)
|
||||
|
||||
// Compute if the batch size is reached.
|
||||
batchSizeReached := blockSlot-previousStartBlockSlot >= batchSizeSlot
|
||||
|
||||
if missingDataColumnsDiffer || batchSizeReached {
|
||||
// Append the slice to the result.
|
||||
request := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: previousStartBlockSlot,
|
||||
Count: uint64(blockSlot - previousStartBlockSlot),
|
||||
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
|
||||
}
|
||||
|
||||
result = append(result, request)
|
||||
|
||||
previousStartBlockSlot, previousMissingDataColumns = blockSlot, missingDataColumns
|
||||
}
|
||||
|
||||
previousBlockSlot = blockSlot
|
||||
}
|
||||
|
||||
lastRequest := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: previousStartBlockSlot,
|
||||
Count: uint64(lastBlockSlot - previousStartBlockSlot + 1),
|
||||
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
|
||||
}
|
||||
|
||||
result = append(result, lastRequest)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// fetchDataColumnsFromPeers requests data columns by range to relevant peers
|
||||
func fetchDataColumnsFromPeers(
|
||||
ctx context.Context,
|
||||
clock *startup.Clock,
|
||||
p2p p2p.P2P,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
ctxMap ContextByteVersions,
|
||||
targetRequest *eth.DataColumnSidecarsByRangeRequest,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Filter out requests with no data columns.
|
||||
if len(targetRequest.Columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get all admissible peers with the data columns they custody.
|
||||
dataColumnsByAdmissiblePeer, err := waitForPeersForDataColumns(p2p, rateLimiter, targetRequest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "wait for peers for data columns")
|
||||
}
|
||||
|
||||
// Select the peers that will be requested.
|
||||
dataColumnsToFetchByPeer, err := SelectPeersToFetchDataColumnsFrom(targetRequest.Columns, dataColumnsByAdmissiblePeer)
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
return nil, errors.Wrap(err, "select peers to fetch data columns from")
|
||||
}
|
||||
|
||||
var roDataColumns []blocks.RODataColumn
|
||||
for peer, columnsToFetch := range dataColumnsToFetchByPeer {
|
||||
// Build the request.
|
||||
request := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: targetRequest.StartSlot,
|
||||
Count: targetRequest.Count,
|
||||
Columns: columnsToFetch,
|
||||
}
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: ctx,
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
CtxMap: ctxMap,
|
||||
}
|
||||
peerRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(params, peer, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "send data column sidecars by range request")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, peerRoDataColumns...)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// waitForPeersForDataColumns returns a map, where the key of the map is the peer, the value is the custody columns of the peer.
|
||||
// It uses only peers
|
||||
// - synced up to `lastSlot`, and
|
||||
// - have bandwidth to serve `blockCount` blocks.
|
||||
// It waits until at least one peer per data column is available.
|
||||
func waitForPeersForDataColumns(p2p p2p.P2P, rateLimiter *leakybucket.Collector, request *eth.DataColumnSidecarsByRangeRequest) (map[peer.ID]map[uint64]bool, error) {
|
||||
const delay = 5 * time.Second
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Build nice log fields.
|
||||
lastSlot := request.StartSlot.Add(request.Count).Sub(1)
|
||||
|
||||
var neededDataColumnsLog interface{} = "all"
|
||||
neededDataColumnCount := uint64(len(request.Columns))
|
||||
if neededDataColumnCount < numberOfColumns {
|
||||
neededDataColumnsLog = request.Columns
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"start": request.StartSlot,
|
||||
"targetSlot": lastSlot,
|
||||
"neededDataColumns": neededDataColumnsLog,
|
||||
})
|
||||
|
||||
// Keep only peers with head epoch greater than or equal to the epoch corresponding to the target slot, and
|
||||
// keep only peers with enough bandwidth.
|
||||
filteredPeers, descriptions, err := filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter eers by target slot and bandwidth")
|
||||
}
|
||||
|
||||
// Get the peers that are admissible for the data columns.
|
||||
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err := AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "admissible peers for data columns")
|
||||
}
|
||||
|
||||
descriptions = append(descriptions, moreDescriptions...)
|
||||
|
||||
// Compute data columns without any peer.
|
||||
dataColumnsWithoutPeers := computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
|
||||
|
||||
// Wait if no suitable peers are available.
|
||||
for len(dataColumnsWithoutPeers) > 0 {
|
||||
// Build a nice log fields.
|
||||
var dataColumnsWithoutPeersLog interface{} = "all"
|
||||
dataColumnsWithoutPeersCount := uint64(len(dataColumnsWithoutPeers))
|
||||
if dataColumnsWithoutPeersCount < numberOfColumns {
|
||||
dataColumnsWithoutPeersLog = sliceFromMap(dataColumnsWithoutPeers, true /*sorted*/)
|
||||
}
|
||||
|
||||
log.WithField("columnsWithoutPeer", dataColumnsWithoutPeersLog).Warning("Fetch data columns from peers - no available peers, retrying later")
|
||||
for _, description := range descriptions {
|
||||
log.Debug(description)
|
||||
}
|
||||
|
||||
for pid, peerDataColumns := range dataColumnsByAdmissiblePeer {
|
||||
var peerDataColumnsLog interface{} = "all"
|
||||
peerDataColumnsCount := uint64(len(peerDataColumns))
|
||||
if peerDataColumnsCount < numberOfColumns {
|
||||
peerDataColumnsLog = sliceFromMap(peerDataColumns, true /*sorted*/)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"peerDataColumns": peerDataColumnsLog,
|
||||
}).Debug("Peer data columns")
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
|
||||
// Filter for peers with head epoch greater than or equal to our target epoch for ByRange requests.
|
||||
filteredPeers, descriptions, err = filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter peers by target slot and bandwidth")
|
||||
}
|
||||
|
||||
// Get the peers that are admissible for the data columns.
|
||||
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err = AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "admissible peers for data columns")
|
||||
}
|
||||
|
||||
descriptions = append(descriptions, moreDescriptions...)
|
||||
|
||||
// Compute data columns without any peer.
|
||||
dataColumnsWithoutPeers = computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
|
||||
}
|
||||
|
||||
return dataColumnsByAdmissiblePeer, nil
|
||||
}
|
||||
|
||||
// Filter peers to ensure they are synced to the target slot and have sufficient bandwidth to serve the request.
|
||||
func filterPeersByTargetSlotAndBandwidth(p2p p2p.P2P, rateLimiter *leakybucket.Collector, lastSlot primitives.Slot, blockCount uint64) ([]peer.ID, []string, error) {
|
||||
peers := p2p.Peers().Connected()
|
||||
|
||||
slotPeers, descriptions, err := filterPeersByTargetSlot(p2p, peers, lastSlot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "peers with slot and data columns")
|
||||
}
|
||||
|
||||
// Filter for peers with sufficient bandwidth to serve the request.
|
||||
slotAndBandwidthPeers := hasSufficientBandwidth(rateLimiter, slotPeers, blockCount)
|
||||
|
||||
// Add debugging logs for the filtered peers.
|
||||
peerWithSufficientBandwidthMap := make(map[peer.ID]bool, len(peers))
|
||||
for _, peer := range slotAndBandwidthPeers {
|
||||
peerWithSufficientBandwidthMap[peer] = true
|
||||
}
|
||||
|
||||
for _, peer := range slotPeers {
|
||||
if !peerWithSufficientBandwidthMap[peer] {
|
||||
description := fmt.Sprintf("peer %s: does not have sufficient bandwidth", peer)
|
||||
descriptions = append(descriptions, description)
|
||||
}
|
||||
}
|
||||
return slotAndBandwidthPeers, descriptions, nil
|
||||
}
|
||||
|
||||
func hasSufficientBandwidth(rateLimiter *leakybucket.Collector, peers []peer.ID, count uint64) []peer.ID {
|
||||
var filteredPeers []peer.ID
|
||||
|
||||
for _, p := range peers {
|
||||
if uint64(rateLimiter.Remaining(p.String())) < count {
|
||||
continue
|
||||
}
|
||||
copiedP := p
|
||||
filteredPeers = append(filteredPeers, copiedP)
|
||||
}
|
||||
return filteredPeers
|
||||
}
|
||||
|
||||
func computeDataColumnsWithoutPeers(neededColumns []uint64, peersByColumn map[uint64][]peer.ID) map[uint64]bool {
|
||||
result := make(map[uint64]bool)
|
||||
for _, column := range neededColumns {
|
||||
if _, ok := peersByColumn[column]; !ok {
|
||||
result[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Filter peers with head epoch lower than our target epoch for ByRange requests.
|
||||
func filterPeersByTargetSlot(p2p p2p.P2P, peers []peer.ID, targetSlot primitives.Slot) ([]peer.ID, []string, error) {
|
||||
filteredPeers := make([]peer.ID, 0, len(peers))
|
||||
descriptions := make([]string, 0, len(peers))
|
||||
// Compute the target epoch from the target slot.
|
||||
targetEpoch := slots.ToEpoch(targetSlot)
|
||||
|
||||
for _, peer := range peers {
|
||||
peerChainState, err := p2p.Peers().ChainState(peer)
|
||||
if err != nil {
|
||||
description := fmt.Sprintf("peer %s: error: %s", peer, err)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
if peerChainState == nil {
|
||||
description := fmt.Sprintf("peer %s: chain state is nil", peer)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
peerHeadEpoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
|
||||
if peerHeadEpoch < targetEpoch {
|
||||
description := fmt.Sprintf("peer %s: peer head epoch %d < our target epoch %d", peer, peerHeadEpoch, targetEpoch)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
filteredPeers = append(filteredPeers, peer)
|
||||
}
|
||||
|
||||
return filteredPeers, descriptions, nil
|
||||
}
|
||||
|
||||
// itemsCount returns the total count of items
|
||||
func itemsCount(missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||
count := 0
|
||||
for _, columns := range missingColumnsByRoot {
|
||||
count += len(columns)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// uint64MapDiffer returns true if the two maps differ.
|
||||
func uint64MapDiffer(left, right map[uint64]bool) bool {
|
||||
if len(left) != len(right) {
|
||||
return true
|
||||
}
|
||||
|
||||
for k := range left {
|
||||
if !right[k] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
1639
beacon-chain/sync/data_columns_test.go
Normal file
1639
beacon-chain/sync/data_columns_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -20,6 +20,7 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -72,7 +73,9 @@ go_test(
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
@@ -89,6 +92,7 @@ go_test(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -3,11 +3,13 @@ package initialsync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
prysmsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -34,7 +37,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// maxPendingRequests limits how many concurrent fetch request one can initiate.
|
||||
maxPendingRequests = 64
|
||||
// peersPercentagePerRequest caps percentage of peers to be used in a request.
|
||||
@@ -78,6 +80,8 @@ type blocksFetcherConfig struct {
|
||||
peerFilterCapacityWeight float64
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageReader
|
||||
cv verification.NewDataColumnsVerifier
|
||||
}
|
||||
|
||||
// blocksFetcher is a service to fetch chain data from peers.
|
||||
@@ -94,6 +98,8 @@ type blocksFetcher struct {
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageReader
|
||||
cv verification.NewDataColumnsVerifier
|
||||
blocksPerPeriod uint64
|
||||
rateLimiter *leakybucket.Collector
|
||||
peerLocks map[peer.ID]*peerLock
|
||||
@@ -124,7 +130,7 @@ type fetchRequestResponse struct {
|
||||
blobsFrom peer.ID
|
||||
start primitives.Slot
|
||||
count uint64
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
err error
|
||||
}
|
||||
|
||||
@@ -162,6 +168,8 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
bs: cfg.bs,
|
||||
dcs: cfg.dcs,
|
||||
cv: cfg.cv,
|
||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||
rateLimiter: rateLimiter,
|
||||
peerLocks: make(map[peer.ID]*peerLock),
|
||||
@@ -298,7 +306,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
response := &fetchRequestResponse{
|
||||
start: start,
|
||||
count: count,
|
||||
bwb: []blocks.BlockWithROBlobs{},
|
||||
bwb: []blocks.BlockWithROSidecars{},
|
||||
err: nil,
|
||||
}
|
||||
|
||||
@@ -317,30 +325,114 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
if f.mode == modeStopOnFinalizedEpoch {
|
||||
highestFinalizedSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(targetEpoch + 1))
|
||||
if start > highestFinalizedSlot {
|
||||
response.err = fmt.Errorf("%w, slot: %d, highest finalized slot: %d",
|
||||
errSlotIsTooHigh, start, highestFinalizedSlot)
|
||||
response.err = fmt.Errorf(
|
||||
"%w, slot: %d, highest finalized slot: %d",
|
||||
errSlotIsTooHigh, start, highestFinalizedSlot,
|
||||
)
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
response.bwb, response.blocksFrom, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
|
||||
if response.err == nil {
|
||||
pid, bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.blocksFrom, peers)
|
||||
pid, err := f.fetchSidecars(ctx, response.blocksFrom, peers, response.bwb)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch sidecars")
|
||||
response.err = err
|
||||
}
|
||||
response.bwb = bwb
|
||||
|
||||
response.blobsFrom = pid
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer.
|
||||
// fetchSidecars fetches sidecars corresponding to blocks in `response.bwb`.
|
||||
// It mutates `Blobs` and `Columns` fields of `response.bwb` with fetched sidecars.
|
||||
// `pid` is the initial peer to request blob from (usually the peer from which the block originated),
|
||||
// `peers` is a list of peers to use for the request blobs if `pid` fails.
|
||||
// `bwScs` must me sorted by slot.
|
||||
// It returns the peer ID from which blobs were fetched (if any).
|
||||
func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []peer.ID, bwScs []blocks.BlockWithROSidecars) (peer.ID, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
if len(bwScs) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
firstFuluIndex, err := findFirstFuluIndex(bwScs)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "find first Fulu index")
|
||||
}
|
||||
|
||||
preFulu := bwScs[:firstFuluIndex]
|
||||
postFulu := bwScs[firstFuluIndex:]
|
||||
|
||||
var blobsPid peer.ID
|
||||
|
||||
if len(preFulu) > 0 {
|
||||
// Fetch blob sidecars.
|
||||
blobsPid, err = f.fetchBlobsFromPeer(ctx, preFulu, pid, peers)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "fetch blobs from peer")
|
||||
}
|
||||
}
|
||||
|
||||
if len(postFulu) == 0 {
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// Compute the columns to request.
|
||||
custodyGroupCount, err := f.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return blobsPid, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
info, _, err := peerdas.Info(f.p2p.NodeID(), samplingSize)
|
||||
if err != nil {
|
||||
return blobsPid, errors.Wrap(err, "custody info")
|
||||
}
|
||||
|
||||
params := prysmsync.DataColumnSidecarsParams{
|
||||
Ctx: ctx,
|
||||
Tor: f.clock,
|
||||
P2P: f.p2p,
|
||||
RateLimiter: f.rateLimiter,
|
||||
CtxMap: f.ctxMap,
|
||||
Storage: f.dcs,
|
||||
NewVerifier: f.cv,
|
||||
}
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, len(postFulu))
|
||||
for _, block := range postFulu {
|
||||
roBlocks = append(roBlocks, block.Block)
|
||||
}
|
||||
|
||||
verifiedRoDataColumnsByRoot, err := prysmsync.FetchDataColumnSidecars(params, roBlocks, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
// Populate the response.
|
||||
for i := range bwScs {
|
||||
bwSc := &bwScs[i]
|
||||
root := bwSc.Block.Root()
|
||||
if columns, ok := verifiedRoDataColumnsByRoot[root]; ok {
|
||||
bwSc.Columns = columns
|
||||
}
|
||||
}
|
||||
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer, sorted by slot.
|
||||
func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
ctx context.Context,
|
||||
start primitives.Slot, count uint64,
|
||||
peers []peer.ID,
|
||||
) ([]blocks.BlockWithROBlobs, peer.ID, error) {
|
||||
) ([]blocks.BlockWithROSidecars, peer.ID, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlocksFromPeer")
|
||||
defer span.End()
|
||||
|
||||
@@ -355,8 +447,7 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
// peers are dialed first.
|
||||
peers = append(bestPeers, peers...)
|
||||
peers = dedupPeers(peers)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
for _, p := range peers {
|
||||
blocks, err := f.requestBlocks(ctx, req, p)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Could not request blocks by range from peer")
|
||||
@@ -380,14 +471,14 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
return nil, "", errNoPeersAvailable
|
||||
}
|
||||
|
||||
func sortedBlockWithVerifiedBlobSlice(bs []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROBlobs, error) {
|
||||
rb := make([]blocks.BlockWithROBlobs, len(bs))
|
||||
for i, b := range bs {
|
||||
func sortedBlockWithVerifiedBlobSlice(blks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROSidecars, error) {
|
||||
rb := make([]blocks.BlockWithROSidecars, len(blks))
|
||||
for i, b := range blks {
|
||||
ro, err := blocks.NewROBlock(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rb[i] = blocks.BlockWithROBlobs{Block: ro}
|
||||
rb[i] = blocks.BlockWithROSidecars{Block: ro}
|
||||
}
|
||||
sort.Sort(blocks.BlockWithROBlobsSlice(rb))
|
||||
return rb, nil
|
||||
@@ -403,7 +494,8 @@ type commitmentCountList []commitmentCount
|
||||
|
||||
// countCommitments makes a list of all blocks that have commitments that need to be satisfied.
|
||||
// This gives us a representation to finish building the request that is lightweight and readable for testing.
|
||||
func countCommitments(bwb []blocks.BlockWithROBlobs, retentionStart primitives.Slot) commitmentCountList {
|
||||
// `bwb` must be sorted by slot.
|
||||
func countCommitments(bwb []blocks.BlockWithROSidecars, retentionStart primitives.Slot) commitmentCountList {
|
||||
if len(bwb) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -485,7 +577,9 @@ func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
|
||||
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
|
||||
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
|
||||
|
||||
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) ([]blocks.BlockWithROBlobs, error) {
|
||||
// verifyAndPopulateBlobs mutate the input `bwb` argument by adding verified blobs.
|
||||
// This function mutates the input `bwb` argument.
|
||||
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||
blobsByRoot := make(map[[32]byte][]blocks.ROBlob)
|
||||
for i := range blobs {
|
||||
if blobs[i].Slot() < req.StartSlot {
|
||||
@@ -495,46 +589,53 @@ func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob
|
||||
blobsByRoot[br] = append(blobsByRoot[br], blobs[i])
|
||||
}
|
||||
for i := range bwb {
|
||||
bwi, err := populateBlock(bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
err := populateBlock(&bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
if err != nil {
|
||||
if errors.Is(err, errDidntPopulate) {
|
||||
continue
|
||||
}
|
||||
return bwb, err
|
||||
return err
|
||||
}
|
||||
bwb[i] = bwi
|
||||
}
|
||||
return bwb, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var errDidntPopulate = errors.New("skipping population of block")
|
||||
|
||||
func populateBlock(bw blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks.BlockWithROBlobs, error) {
|
||||
// populateBlock verifies and populates blobs for a block.
|
||||
// This function mutates the input `bw` argument.
|
||||
func populateBlock(bw *blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||
blk := bw.Block
|
||||
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
commits, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
if len(commits) == 0 {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
// Drop blobs on the floor if we already have them.
|
||||
if bss != nil && bss.Summary(blk.Root()).AllAvailable(len(commits)) {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
if len(commits) != len(blobs) {
|
||||
return bw, missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||
return missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||
}
|
||||
|
||||
for ci := range commits {
|
||||
if err := verify.BlobAlignsWithBlock(blobs[ci], blk); err != nil {
|
||||
return bw, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bw.Blobs = blobs
|
||||
return bw, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
||||
@@ -547,29 +648,38 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e
|
||||
}
|
||||
|
||||
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROBlobs, pid peer.ID, peers []peer.ID) (peer.ID, []blocks.BlockWithROBlobs, error) {
|
||||
// This function mutates the input `bwb` argument.
|
||||
// `pid` is the initial peer to request blobs from (usually the peer from which the block originated),
|
||||
// `peers` is a list of peers to use for the request if `pid` fails.
|
||||
// `bwb` must be sorted by slot.
|
||||
// It returns the peer ID from which blobs were fetched.
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROSidecars, pid peer.ID, peers []peer.ID) (peer.ID, error) {
|
||||
if len(bwb) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
|
||||
defer span.End()
|
||||
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
|
||||
return "", bwb, nil
|
||||
return "", nil
|
||||
}
|
||||
blobWindowStart, err := prysmsync.BlobRPCMinValidSlot(f.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", err
|
||||
}
|
||||
// Construct request message based on observed interval of blocks in need of blobs.
|
||||
req := countCommitments(bwb, blobWindowStart).blobRange(f.bs).Request()
|
||||
if req == nil {
|
||||
return "", bwb, nil
|
||||
return "", nil
|
||||
}
|
||||
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
|
||||
// We dial the initial peer first to ensure that we get the desired set of blobs.
|
||||
wantedPeers := append([]peer.ID{pid}, peers...)
|
||||
bestPeers := f.hasSufficientBandwidth(wantedPeers, req.Count)
|
||||
peers = append([]peer.ID{pid}, peers...)
|
||||
peers = f.hasSufficientBandwidth(peers, req.Count)
|
||||
// We append the best peers to the front so that higher capacity
|
||||
// peers are dialed first. If all of them fail, we fallback to the
|
||||
// initial peer we wanted to request blobs from.
|
||||
peers = append(bestPeers, pid)
|
||||
peers = append(peers, pid)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
blobs, err := f.requestBlobs(ctx, req, p)
|
||||
@@ -578,14 +688,24 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.Blo
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
robs, err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs)
|
||||
if err != nil {
|
||||
if err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs); err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
|
||||
continue
|
||||
}
|
||||
return p, robs, err
|
||||
return p, err
|
||||
}
|
||||
return "", nil, errNoPeersAvailable
|
||||
return "", errNoPeersAvailable
|
||||
}
|
||||
|
||||
// sortedSliceFromMap returns a sorted slice of keys from a map.
|
||||
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
slices.Sort(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
||||
@@ -642,6 +762,7 @@ func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecar
|
||||
}
|
||||
f.rateLimiter.Add(pid.String(), int64(req.Count))
|
||||
l.Unlock()
|
||||
|
||||
return prysmsync.SendBlobsByRangeRequest(ctx, f.clock, f.p2p, pid, f.ctxMap, req)
|
||||
}
|
||||
|
||||
@@ -699,13 +820,17 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
||||
}
|
||||
|
||||
func (f *blocksFetcher) hasSufficientBandwidth(peers []peer.ID, count uint64) []peer.ID {
|
||||
filteredPeers := []peer.ID{}
|
||||
for _, p := range peers {
|
||||
if uint64(f.rateLimiter.Remaining(p.String())) < count {
|
||||
filteredPeers := make([]peer.ID, 0, len(peers))
|
||||
|
||||
for _, peer := range peers {
|
||||
remaining := uint64(0)
|
||||
if remainingInt := f.rateLimiter.Remaining(peer.String()); remainingInt > 0 {
|
||||
remaining = uint64(remainingInt)
|
||||
}
|
||||
if remaining < count {
|
||||
continue
|
||||
}
|
||||
copiedP := p
|
||||
filteredPeers = append(filteredPeers, copiedP)
|
||||
filteredPeers = append(filteredPeers, peer)
|
||||
}
|
||||
return filteredPeers
|
||||
}
|
||||
@@ -745,3 +870,23 @@ func dedupPeers(peers []peer.ID) []peer.ID {
|
||||
}
|
||||
return newPeerList
|
||||
}
|
||||
|
||||
// findFirstFuluIndex returns the index of the first block with a version >= Fulu.
|
||||
// It returns an error if blocks are not correctly sorted by version regarding Fulu.
|
||||
func findFirstFuluIndex(bwScs []blocks.BlockWithROSidecars) (int, error) {
|
||||
firstFuluIndex := len(bwScs)
|
||||
|
||||
for i, bwSc := range bwScs {
|
||||
blockVersion := bwSc.Block.Version()
|
||||
if blockVersion >= version.Fulu && firstFuluIndex > i {
|
||||
firstFuluIndex = i
|
||||
continue
|
||||
}
|
||||
|
||||
if blockVersion < version.Fulu && firstFuluIndex <= i {
|
||||
return 0, errors.New("blocks are not sorted by version")
|
||||
}
|
||||
}
|
||||
|
||||
return firstFuluIndex, nil
|
||||
}
|
||||
@@ -12,11 +12,12 @@ import (
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pm "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
beaconsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -266,7 +267,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
|
||||
p := p2pt.NewTestP2P(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
connectPeers(t, p, tt.peers, p.Peers())
|
||||
cache.RLock()
|
||||
genesisRoot := cache.rootCache[0]
|
||||
@@ -307,9 +308,9 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
fetcher.stop()
|
||||
}()
|
||||
|
||||
processFetchedBlocks := func() ([]blocks.BlockWithROBlobs, error) {
|
||||
processFetchedBlocks := func() ([]blocks.BlockWithROSidecars, error) {
|
||||
defer cancel()
|
||||
var unionRespBlocks []blocks.BlockWithROBlobs
|
||||
var unionRespBlocks []blocks.BlockWithROSidecars
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -398,6 +399,7 @@ func TestBlocksFetcher_scheduleRequest(t *testing.T) {
|
||||
fetcher.scheduleRequest(t.Context(), 1, blockBatchLimit))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
blockBatchLimit := flags.Get().BlockBatchLimit
|
||||
chainConfig := struct {
|
||||
@@ -455,7 +457,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
var bwb []blocks.BlockWithROBlobs
|
||||
var bwb []blocks.BlockWithROSidecars
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Error(ctx.Err())
|
||||
@@ -531,9 +533,9 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p3 := p2pt.NewTestP2P(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p3 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
p1.Connect(p3)
|
||||
require.Equal(t, 2, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
@@ -543,7 +545,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
Count: 64,
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||
streamHandlerFn := func(stream network.Stream) {
|
||||
assert.NoError(t, stream.Close())
|
||||
@@ -602,15 +604,15 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
Count: 64,
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||
streamHandlerFn := func(stream network.Stream) {
|
||||
assert.NoError(t, stream.Close())
|
||||
@@ -638,7 +640,7 @@ func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
req *ethpb.BeaconBlocksByRangeRequest
|
||||
@@ -883,7 +885,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
},
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p1.Encoding().ProtocolSuffix())
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
@@ -893,7 +895,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
p2.BHost.SetStreamHandler(protocol, tt.handlerGenFn(tt.req))
|
||||
@@ -993,7 +995,7 @@ func TestBlobRangeForBlocks(t *testing.T) {
|
||||
func TestBlobRequest(t *testing.T) {
|
||||
var nilReq *ethpb.BlobSidecarsByRangeRequest
|
||||
// no blocks
|
||||
req := countCommitments([]blocks.BlockWithROBlobs{}, 0).blobRange(nil).Request()
|
||||
req := countCommitments([]blocks.BlockWithROSidecars{}, 0).blobRange(nil).Request()
|
||||
require.Equal(t, nilReq, req)
|
||||
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
@@ -1026,22 +1028,16 @@ func TestBlobRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCountCommitments(t *testing.T) {
|
||||
// no blocks
|
||||
// blocks before retention start filtered
|
||||
// blocks without commitments filtered
|
||||
// pre-deneb filtered
|
||||
// variety of commitment counts are accurate, from 1 to max
|
||||
type testcase struct {
|
||||
name string
|
||||
bwb func(t *testing.T, c testcase) []blocks.BlockWithROBlobs
|
||||
numBlocks int
|
||||
retStart primitives.Slot
|
||||
resCount int
|
||||
name string
|
||||
bwb func(t *testing.T, c testcase) []blocks.BlockWithROSidecars
|
||||
retStart primitives.Slot
|
||||
resCount int
|
||||
}
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "nil blocks is safe",
|
||||
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROBlobs {
|
||||
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROSidecars {
|
||||
return nil
|
||||
},
|
||||
retStart: 0,
|
||||
@@ -1179,7 +1175,7 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROBlobs, []blocks.ROBlob) {
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROSidecars, []blocks.ROBlob) {
|
||||
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
for i := range blks {
|
||||
@@ -1190,7 +1186,7 @@ func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROB
|
||||
return bwb, blobs
|
||||
}
|
||||
|
||||
func testReqFromResp(bwb []blocks.BlockWithROBlobs) *ethpb.BlobSidecarsByRangeRequest {
|
||||
func testReqFromResp(bwb []blocks.BlockWithROSidecars) *ethpb.BlobSidecarsByRangeRequest {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: bwb[0].Block.Block().Slot(),
|
||||
Count: uint64(bwb[len(bwb)-1].Block.Block().Slot()-bwb[0].Block.Block().Slot()) + 1,
|
||||
@@ -1207,7 +1203,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, len(blobs), len(expectedCommits))
|
||||
|
||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
require.NoError(t, err)
|
||||
for _, bw := range bwb {
|
||||
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
|
||||
@@ -1228,7 +1224,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
})
|
||||
t.Run("missing blobs", func(t *testing.T) {
|
||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||
_, err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
|
||||
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
|
||||
})
|
||||
t.Run("no blobs for last block", func(t *testing.T) {
|
||||
@@ -1240,7 +1236,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
blobs = blobs[0 : len(blobs)-len(cmts)]
|
||||
lastBlk, _ = util.GenerateTestDenebBlockWithSidecar(t, lastBlk.Block().ParentRoot(), lastBlk.Block().Slot(), 0)
|
||||
bwb[lastIdx].Block = lastBlk
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("blobs not copied if all locally available", func(t *testing.T) {
|
||||
@@ -1254,7 +1250,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
r7: {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(bwb[i1].Blobs))
|
||||
require.Equal(t, 0, len(bwb[i7].Blobs))
|
||||
@@ -1302,3 +1298,203 @@ func TestBlockFetcher_HasSufficientBandwidth(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, 2, len(receivedPeers))
|
||||
}
|
||||
|
||||
func TestSortedSliceFromMap(t *testing.T) {
|
||||
m := map[uint64]bool{1: true, 3: true, 2: true, 4: true}
|
||||
expected := []uint64{1, 2, 3, 4}
|
||||
|
||||
actual := sortedSliceFromMap(m)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestFetchSidecars(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
t.Run("No blocks", func(t *testing.T) {
|
||||
fetcher := new(blocksFetcher)
|
||||
|
||||
pid, err := fetcher.fetchSidecars(ctx, "", nil, []blocks.BlockWithROSidecars{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, peer.ID(""), pid)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
samplesPerSlot := beaconConfig.SamplesPerSlot
|
||||
|
||||
// Define "now" to be one epoch after genesis time + retention period.
|
||||
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
|
||||
secondsPerSlot := beaconConfig.SecondsPerSlot
|
||||
slotsPerEpoch := beaconConfig.SlotsPerEpoch
|
||||
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
|
||||
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||
|
||||
genesisValidatorRoot := [fieldparams.RootLength]byte{}
|
||||
nower := func() time.Time { return now }
|
||||
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||
|
||||
// Define a Deneb block with blobs out of retention period.
|
||||
denebBlock := util.NewBeaconBlockDeneb()
|
||||
denebBlock.Block.Slot = 0 // Genesis slot, out of retention period.
|
||||
signedDenebBlock, err := blocks.NewSignedBeaconBlock(denebBlock)
|
||||
require.NoError(t, err)
|
||||
roDebebBlock, err := blocks.NewROBlock(signedDenebBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define a Fulu block with blobs in the retention period.
|
||||
fuluBlock := util.NewBeaconBlockFulu()
|
||||
fuluBlock.Block.Slot = slotsPerEpoch // Within retention period.
|
||||
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
bodyRoot, err := fuluBlock.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save data column sidecars for this fulu block in the database.
|
||||
params := make([]util.DataColumnParam, 0, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRoot[:]}
|
||||
params = append(params, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save the data column sidecars to the storage.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a blocks fetcher.
|
||||
fetcher := &blocksFetcher{
|
||||
clock: clock,
|
||||
p2p: p2ptest.NewTestP2P(t),
|
||||
dcs: dataColumnStorage,
|
||||
}
|
||||
|
||||
// Fetch sidecars.
|
||||
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||
{Block: roDebebBlock},
|
||||
{Block: roFuluBlock},
|
||||
}
|
||||
pid, err := fetcher.fetchSidecars(ctx, "", nil, blocksWithSidecars)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, peer.ID(""), pid)
|
||||
|
||||
// Verify that block with sidecars were modified correctly.
|
||||
require.Equal(t, 0, len(blocksWithSidecars[0].Blobs))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[0].Columns))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[1].Blobs))
|
||||
|
||||
// We don't check the content of the columns here. The extensive test is done
|
||||
// in TestFetchDataColumnsSidecars.
|
||||
require.Equal(t, samplesPerSlot, uint64(len(blocksWithSidecars[1].Columns)))
|
||||
})
|
||||
}
|
||||
func TestFirstFuluIndex(t *testing.T) {
|
||||
bellatrix := util.NewBeaconBlockBellatrix()
|
||||
signedBellatrix, err := blocks.NewSignedBeaconBlock(bellatrix)
|
||||
require.NoError(t, err)
|
||||
roBellatrix, err := blocks.NewROBlock(signedBellatrix)
|
||||
require.NoError(t, err)
|
||||
|
||||
capella := util.NewBeaconBlockCapella()
|
||||
signedCapella, err := blocks.NewSignedBeaconBlock(capella)
|
||||
require.NoError(t, err)
|
||||
roCapella, err := blocks.NewROBlock(signedCapella)
|
||||
require.NoError(t, err)
|
||||
|
||||
deneb := util.NewBeaconBlockDeneb()
|
||||
signedDeneb, err := blocks.NewSignedBeaconBlock(deneb)
|
||||
require.NoError(t, err)
|
||||
roDeneb, err := blocks.NewROBlock(signedDeneb)
|
||||
require.NoError(t, err)
|
||||
|
||||
fulu := util.NewBeaconBlockFulu()
|
||||
signedFulu, err := blocks.NewSignedBeaconBlock(fulu)
|
||||
require.NoError(t, err)
|
||||
roFulu, err := blocks.NewROBlock(signedFulu)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupBlocks func(t *testing.T) []blocks.BlockWithROSidecars
|
||||
expectedIndex int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "all blocks are pre-Fulu",
|
||||
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||
return []blocks.BlockWithROSidecars{
|
||||
{Block: roBellatrix},
|
||||
{Block: roCapella},
|
||||
{Block: roDeneb},
|
||||
}
|
||||
},
|
||||
expectedIndex: 3, // Should be the length of the slice
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "all blocks are Fulu or later",
|
||||
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||
return []blocks.BlockWithROSidecars{
|
||||
{Block: roFulu},
|
||||
{Block: roFulu},
|
||||
}
|
||||
},
|
||||
expectedIndex: 0,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "mixed blocks correctly sorted",
|
||||
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||
|
||||
return []blocks.BlockWithROSidecars{
|
||||
{Block: roBellatrix},
|
||||
{Block: roCapella},
|
||||
{Block: roDeneb},
|
||||
{Block: roFulu},
|
||||
{Block: roFulu},
|
||||
}
|
||||
},
|
||||
expectedIndex: 3, // Index where Fulu blocks start
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "mixed blocks incorrectly sorted",
|
||||
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||
return []blocks.BlockWithROSidecars{
|
||||
{Block: roBellatrix},
|
||||
{Block: roCapella},
|
||||
{Block: roFulu},
|
||||
{Block: roDeneb},
|
||||
{Block: roFulu},
|
||||
}
|
||||
},
|
||||
expectedIndex: 0,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blocks := tt.setupBlocks(t)
|
||||
index, err := findFirstFuluIndex(blocks)
|
||||
|
||||
if tt.expectError {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedIndex, index)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
type forkData struct {
|
||||
blocksFrom peer.ID
|
||||
blobsFrom peer.ID
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
}
|
||||
|
||||
// nonSkippedSlotAfter checks slots after the given one in an attempt to find a non-empty future slot.
|
||||
@@ -275,16 +275,18 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
||||
"slot": block.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", parentRoot),
|
||||
}).Debug("Block with unknown parent root has been found")
|
||||
altBlocks, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid blocks received in findForkWithPeer")
|
||||
}
|
||||
|
||||
// We need to fetch the blobs for the given alt-chain if any exist, so that we can try to verify and import
|
||||
// the blocks.
|
||||
bpid, bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
|
||||
bpid, err := f.fetchSidecars(ctx, pid, []peer.ID{pid}, bwb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
|
||||
return nil, errors.Wrap(err, "fetch sidecars")
|
||||
}
|
||||
|
||||
// The caller will use the BlocksWith VerifiedBlobs in bwb as the starting point for
|
||||
// round-robin syncing the alternate chain.
|
||||
return &forkData{blocksFrom: pid, blobsFrom: bpid, bwb: bwb}, nil
|
||||
@@ -303,10 +305,9 @@ func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfa
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "received invalid blocks in findAncestor")
|
||||
}
|
||||
var bpid peer.ID
|
||||
bpid, bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid, []peer.ID{pid})
|
||||
bpid, err := f.fetchSidecars(ctx, pid, []peer.ID{pid}, bwb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findAncestor")
|
||||
return nil, errors.Wrap(err, "fetch sidecars")
|
||||
}
|
||||
return &forkData{
|
||||
blocksFrom: pid,
|
||||
@@ -350,9 +351,12 @@ func (f *blocksFetcher) calculateHeadAndTargetEpochs() (headEpoch, targetEpoch p
|
||||
cp := f.chain.FinalizedCheckpt()
|
||||
headEpoch = cp.Epoch
|
||||
targetEpoch, peers = f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch)
|
||||
} else {
|
||||
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
|
||||
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
|
||||
|
||||
return headEpoch, targetEpoch, peers
|
||||
}
|
||||
|
||||
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
|
||||
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
|
||||
|
||||
return headEpoch, targetEpoch, peers
|
||||
}
|
||||
}
|
||||
@@ -371,13 +371,13 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
|
||||
t.Run("slot is too early", func(t *testing.T) {
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 0)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), nil, 0)
|
||||
assert.ErrorContains(t, "slot is too low to backtrack", err)
|
||||
})
|
||||
|
||||
t.Run("no peer status", func(t *testing.T) {
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 64)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), nil, 64)
|
||||
assert.ErrorContains(t, "cannot obtain peer's status", err)
|
||||
})
|
||||
|
||||
@@ -391,7 +391,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
HeadRoot: nil,
|
||||
HeadSlot: 0,
|
||||
})
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 64)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), nil, 64)
|
||||
assert.ErrorContains(t, "cannot locate non-empty slot for a peer", err)
|
||||
})
|
||||
|
||||
@@ -401,7 +401,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2, 64)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
|
||||
assert.ErrorContains(t, "no alternative blocks exist within scanned range", err)
|
||||
})
|
||||
|
||||
@@ -413,7 +413,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
fork, err := fetcher.findForkWithPeer(ctx, p2, 64)
|
||||
fork, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(fork.bwb))
|
||||
assert.Equal(t, forkedSlot, fork.bwb[0].Block.Block().Slot(), "Expected slot %d to be ancestor", forkedSlot)
|
||||
@@ -426,7 +426,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2, 64)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
|
||||
require.ErrorContains(t, "failed to find common ancestor", err)
|
||||
})
|
||||
|
||||
@@ -438,7 +438,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
fork, err := fetcher.findForkWithPeer(ctx, p2, 64)
|
||||
fork, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
|
||||
require.NoError(t, err)
|
||||
|
||||
reqEnd := testForkStartSlot(t, 64) + primitives.Slot(findForkReqRangeSize())
|
||||
@@ -512,7 +512,7 @@ func TestBlocksFetcher_findAncestor(t *testing.T) {
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[4])
|
||||
require.NoError(t, err)
|
||||
_, err = fetcher.findAncestor(ctx, p2.PeerID(), wsb)
|
||||
_, err = fetcher.findAncestor(ctx, p2.PeerID(), nil, wsb)
|
||||
assert.ErrorContains(t, "protocols not supported", err)
|
||||
})
|
||||
|
||||
@@ -525,7 +525,7 @@ func TestBlocksFetcher_findAncestor(t *testing.T) {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[4])
|
||||
require.NoError(t, err)
|
||||
|
||||
fork, err := fetcher.findAncestor(ctx, p2.PeerID(), wsb)
|
||||
fork, err := fetcher.findAncestor(ctx, p2.PeerID(), nil, wsb)
|
||||
assert.ErrorContains(t, "no common ancestor found", err)
|
||||
assert.Equal(t, (*forkData)(nil), fork)
|
||||
})
|
||||
|
||||
@@ -72,6 +72,8 @@ type blocksQueueConfig struct {
|
||||
db db.ReadOnlyDatabase
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageReader
|
||||
cv verification.NewDataColumnsVerifier
|
||||
}
|
||||
|
||||
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
|
||||
@@ -96,7 +98,7 @@ type blocksQueue struct {
|
||||
type blocksQueueFetchedData struct {
|
||||
blocksFrom peer.ID
|
||||
blobsFrom peer.ID
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
}
|
||||
|
||||
// newBlocksQueue creates initialized priority queue.
|
||||
@@ -115,6 +117,8 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
|
||||
db: cfg.db,
|
||||
clock: cfg.clock,
|
||||
bs: cfg.bs,
|
||||
dcs: cfg.dcs,
|
||||
cv: cfg.cv,
|
||||
})
|
||||
}
|
||||
highestExpectedSlot := cfg.highestExpectedSlot
|
||||
@@ -479,4 +483,4 @@ func onCheckStaleEvent(ctx context.Context) eventHandlerFn {
|
||||
|
||||
return stateSkipped, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -263,7 +263,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
|
||||
highestExpectedSlot: tt.highestExpectedSlot,
|
||||
})
|
||||
assert.NoError(t, queue.start())
|
||||
processBlock := func(b blocks.BlockWithROBlobs) error {
|
||||
processBlock := func(b blocks.BlockWithROSidecars) error {
|
||||
block := b.Block
|
||||
if !beaconDB.HasBlock(ctx, block.Block().ParentRoot()) {
|
||||
return fmt.Errorf("%w: %#x", errParentDoesNotExist, block.Block().ParentRoot())
|
||||
@@ -275,7 +275,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
|
||||
return mc.ReceiveBlock(ctx, block, root, nil)
|
||||
}
|
||||
|
||||
var blocks []blocks.BlockWithROBlobs
|
||||
var blocks []blocks.BlockWithROSidecars
|
||||
for data := range queue.fetchedData {
|
||||
for _, b := range data.bwb {
|
||||
if err := processBlock(b); err != nil {
|
||||
@@ -538,7 +538,7 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
response := &fetchRequestResponse{
|
||||
blocksFrom: "abc",
|
||||
bwb: []blocks.BlockWithROBlobs{
|
||||
bwb: []blocks.BlockWithROSidecars{
|
||||
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsb}},
|
||||
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsbCopy}},
|
||||
},
|
||||
@@ -640,7 +640,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
queue.smm.machines[256].fetched.blocksFrom = pidDataParsed
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROBlobs{
|
||||
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||
{Block: rwsb},
|
||||
}
|
||||
|
||||
@@ -674,7 +674,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||
{Block: rwsb},
|
||||
}
|
||||
|
||||
@@ -705,7 +705,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||
{Block: rwsb},
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
@@ -78,6 +80,8 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S
|
||||
highestExpectedSlot: highestSlot,
|
||||
mode: mode,
|
||||
bs: s.cfg.BlobStorage,
|
||||
dcs: s.cfg.DataColumnStorage,
|
||||
cv: s.newDataColumnsVerifier,
|
||||
}
|
||||
queue := newBlocksQueue(ctx, cfg)
|
||||
if err := queue.start(); err != nil {
|
||||
@@ -157,31 +161,82 @@ func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQue
|
||||
log.WithError(err).Debug("Batch did not contain a valid sequence of unprocessed blocks")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(bwb) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
"firstUnprocessed": bwb[0].Block.Block().Slot(),
|
||||
|
||||
// Separate blocks with blobs from blocks with data columns.
|
||||
fistDataColumnIndex := sort.Search(len(bwb), func(i int) bool {
|
||||
return bwb[i].Block.Version() >= version.Fulu
|
||||
})
|
||||
|
||||
blocksWithBlobs := bwb[:fistDataColumnIndex]
|
||||
blocksWithDataColumns := bwb[fistDataColumnIndex:]
|
||||
|
||||
blobBatchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
lazilyPersistentStoreBlobs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, blobBatchVerifier)
|
||||
|
||||
log := log.WithField("firstSlot", data.bwb[0].Block.Block().Slot())
|
||||
logBlobs, logDataColumns := log, log
|
||||
|
||||
if len(blocksWithBlobs) > 0 {
|
||||
logBlobs = logBlobs.WithField("firstUnprocessed", blocksWithBlobs[0].Block.Block().Slot())
|
||||
}
|
||||
for i, b := range bwb {
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(b.Blobs)
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues")
|
||||
|
||||
for i, b := range blocksWithBlobs {
|
||||
if err := lazilyPersistentStoreBlobs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
|
||||
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warning("Batch failure due to BlobSidecar issues")
|
||||
return uint64(i), err
|
||||
}
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
|
||||
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, lazilyPersistentStoreBlobs); err != nil {
|
||||
if errors.Is(err, errParentDoesNotExist) {
|
||||
log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
logBlobs.WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
|
||||
} else {
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||
}
|
||||
|
||||
return uint64(i), err
|
||||
}
|
||||
}
|
||||
|
||||
if len(blocksWithDataColumns) == 0 {
|
||||
return uint64(len(bwb)), nil
|
||||
}
|
||||
|
||||
// Save data column sidecars.
|
||||
count := 0
|
||||
for _, b := range blocksWithDataColumns {
|
||||
count += len(b.Columns)
|
||||
}
|
||||
|
||||
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||
for _, blockWithDataColumns := range blocksWithDataColumns {
|
||||
sidecarsToSave = append(sidecarsToSave, blockWithDataColumns.Columns...)
|
||||
}
|
||||
|
||||
if err := s.cfg.DataColumnStorage.Save(sidecarsToSave); err != nil {
|
||||
return 0, errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
for i, b := range blocksWithDataColumns {
|
||||
logDataColumns := logDataColumns.WithFields(syncFields(b.Block))
|
||||
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, nil); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, errParentDoesNotExist):
|
||||
logDataColumns.
|
||||
WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
Debug("Could not process batch blocks due to missing parent")
|
||||
return uint64(i), err
|
||||
default:
|
||||
logDataColumns.WithError(err).Warning("Block processing failure")
|
||||
return uint64(i), err
|
||||
}
|
||||
}
|
||||
}
|
||||
return uint64(len(bwb)), nil
|
||||
}
|
||||
|
||||
@@ -193,12 +248,18 @@ func syncFields(b blocks.ROBlock) logrus.Fields {
|
||||
}
|
||||
|
||||
// highestFinalizedEpoch returns the absolute highest finalized epoch of all connected peers.
|
||||
// Note this can be lower than our finalized epoch if we have no peers or peers that are all behind us.
|
||||
// It returns `0` if no peers are connected.
|
||||
// Note this can be lower than our finalized epoch if our connected peers are all behind us.
|
||||
func (s *Service) highestFinalizedEpoch() primitives.Epoch {
|
||||
highest := primitives.Epoch(0)
|
||||
for _, pid := range s.cfg.P2P.Peers().Connected() {
|
||||
peerChainState, err := s.cfg.P2P.Peers().ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch > highest {
|
||||
|
||||
if err != nil || peerChainState == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if peerChainState.FinalizedEpoch > highest {
|
||||
highest = peerChainState.FinalizedEpoch
|
||||
}
|
||||
}
|
||||
@@ -250,7 +311,7 @@ func (s *Service) logBatchSyncStatus(firstBlk blocks.ROBlock, nBlocks int) {
|
||||
func (s *Service) processBlock(
|
||||
ctx context.Context,
|
||||
genesis time.Time,
|
||||
bwb blocks.BlockWithROBlobs,
|
||||
bwb blocks.BlockWithROSidecars,
|
||||
blockReceiver blockReceiverFn,
|
||||
avs das.AvailabilityStore,
|
||||
) error {
|
||||
@@ -269,7 +330,7 @@ func (s *Service) processBlock(
|
||||
|
||||
type processedChecker func(context.Context, blocks.ROBlock) bool
|
||||
|
||||
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROBlobs, error) {
|
||||
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROSidecars, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROSidecars, error) {
|
||||
// use a pointer to avoid confusing the zero-value with the case where the first element is processed.
|
||||
var processed *int
|
||||
for i := range bwb {
|
||||
@@ -299,43 +360,100 @@ func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSl
|
||||
return bwb[nonProcessedIdx:], nil
|
||||
}
|
||||
|
||||
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROBlobs, bFunc batchBlockReceiverFn) (uint64, error) {
|
||||
if len(bwb) == 0 {
|
||||
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn) (uint64, error) {
|
||||
bwbCount := uint64(len(bwb))
|
||||
if bwbCount == 0 {
|
||||
return 0, errors.New("0 blocks provided into method")
|
||||
}
|
||||
|
||||
headSlot := s.cfg.Chain.HeadSlot()
|
||||
var err error
|
||||
bwb, err = validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
|
||||
bwb, err := validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(bwb) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
first := bwb[0].Block
|
||||
if !s.cfg.Chain.HasBlock(ctx, first.Block().ParentRoot()) {
|
||||
firstBlock := bwb[0].Block
|
||||
if !s.cfg.Chain.HasBlock(ctx, firstBlock.Block().ParentRoot()) {
|
||||
return 0, fmt.Errorf("%w: %#x (in processBatchedBlocks, slot=%d)",
|
||||
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
|
||||
errParentDoesNotExist, firstBlock.Block().ParentRoot(), firstBlock.Block().Slot())
|
||||
}
|
||||
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
s.logBatchSyncStatus(first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
if len(bb.Blobs) == 0 {
|
||||
firstFuluIndex, err := findFirstFuluIndex(bwb)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "finding first Fulu index")
|
||||
}
|
||||
|
||||
blocksWithBlobs := bwb[:firstFuluIndex]
|
||||
blocksWithDataColumns := bwb[firstFuluIndex:]
|
||||
|
||||
if err := s.processBlocksWithBlobs(ctx, blocksWithBlobs, bFunc, firstBlock); err != nil {
|
||||
return 0, errors.Wrap(err, "processing blocks with blobs")
|
||||
}
|
||||
|
||||
if err := s.processBlocksWithDataColumns(ctx, blocksWithDataColumns, bFunc, firstBlock); err != nil {
|
||||
return 0, errors.Wrap(err, "processing blocks with data columns")
|
||||
}
|
||||
|
||||
return bwbCount, nil
|
||||
}
|
||||
|
||||
func (s *Service) processBlocksWithBlobs(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
|
||||
bwbCount := len(bwbs)
|
||||
if bwbCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
batchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
persistentStore := das.NewLazilyPersistentStore(s.cfg.BlobStorage, batchVerifier)
|
||||
s.logBatchSyncStatus(firstBlock, bwbCount)
|
||||
|
||||
for _, bwb := range bwbs {
|
||||
if len(bwb.Blobs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(bb.Blobs)
|
||||
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
return 0, err
|
||||
if err := persistentStore.Persist(s.clock.CurrentSlot(), bwb.Blobs...); err != nil {
|
||||
return errors.Wrap(err, "persisting blobs")
|
||||
}
|
||||
}
|
||||
|
||||
robs := blocks.BlockWithROBlobsSlice(bwb).ROBlocks()
|
||||
return uint64(len(bwb)), bFunc(ctx, robs, avs)
|
||||
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
|
||||
if err := bFunc(ctx, robs, persistentStore); err != nil {
|
||||
return errors.Wrap(err, "processing blocks with blobs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processBlocksWithDataColumns(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
|
||||
bwbCount := len(bwbs)
|
||||
if bwbCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.logBatchSyncStatus(firstBlock, bwbCount)
|
||||
|
||||
// Save data column sidecars.
|
||||
count := 0
|
||||
for _, bwb := range bwbs {
|
||||
count += len(bwb.Columns)
|
||||
}
|
||||
|
||||
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||
for _, blockWithDataColumns := range bwbs {
|
||||
sidecarsToSave = append(sidecarsToSave, blockWithDataColumns.Columns...)
|
||||
}
|
||||
|
||||
if err := s.cfg.DataColumnStorage.Save(sidecarsToSave); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
|
||||
if err := bFunc(ctx, robs, nil); err != nil {
|
||||
return errors.Wrap(err, "process post-Fulu blocks")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isPunishableError(err error) bool {
|
||||
@@ -380,4 +498,4 @@ func (s *Service) isProcessedBlock(ctx context.Context, blk blocks.ROBlock) bool
|
||||
func (s *Service) downscorePeer(peerID peer.ID, reason string) {
|
||||
newScore := s.cfg.P2P.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
}
|
||||
@@ -8,9 +8,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -308,7 +310,7 @@ func TestService_roundRobinSync(t *testing.T) {
|
||||
} // no-op mock
|
||||
clock := startup.NewClock(gt, vr)
|
||||
s := &Service{
|
||||
ctx: t.Context(),
|
||||
ctx: context.Background(),
|
||||
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB},
|
||||
synced: abool.New(),
|
||||
chainStarted: abool.NewBool(true),
|
||||
@@ -373,7 +375,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||
return nil
|
||||
@@ -385,7 +387,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err = blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
return nil
|
||||
}, nil)
|
||||
@@ -396,7 +398,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err = blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||
return nil
|
||||
@@ -432,7 +434,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
s.genesisTime = genesis
|
||||
|
||||
t.Run("process non-linear batch", func(t *testing.T) {
|
||||
var batch []blocks.BlockWithROBlobs
|
||||
var batch []blocks.BlockWithROSidecars
|
||||
currBlockRoot := genesisBlkRoot
|
||||
for i := primitives.Slot(1); i < 10; i++ {
|
||||
parentRoot := currBlockRoot
|
||||
@@ -446,11 +448,11 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
|
||||
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
var batch2 []blocks.BlockWithROBlobs
|
||||
var batch2 []blocks.BlockWithROSidecars
|
||||
for i := primitives.Slot(10); i < 20; i++ {
|
||||
parentRoot := currBlockRoot
|
||||
blk1 := util.NewBeaconBlock()
|
||||
@@ -463,7 +465,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
batch2 = append(batch2, blocks.BlockWithROBlobs{Block: rowsb})
|
||||
batch2 = append(batch2, blocks.BlockWithROSidecars{Block: rowsb})
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
@@ -485,7 +487,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
assert.ErrorContains(t, "block is already processed", err)
|
||||
require.Equal(t, uint64(0), count)
|
||||
|
||||
var badBatch2 []blocks.BlockWithROBlobs
|
||||
var badBatch2 []blocks.BlockWithROSidecars
|
||||
for i, b := range batch2 {
|
||||
// create a non-linear batch
|
||||
if i%3 == 0 && i != 0 {
|
||||
@@ -568,7 +570,7 @@ func TestService_blockProviderScoring(t *testing.T) {
|
||||
} // no-op mock
|
||||
clock := startup.NewClock(gt, vr)
|
||||
s := &Service{
|
||||
ctx: t.Context(),
|
||||
ctx: context.Background(),
|
||||
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB},
|
||||
synced: abool.New(),
|
||||
chainStarted: abool.NewBool(true),
|
||||
@@ -637,7 +639,7 @@ func TestService_syncToFinalizedEpoch(t *testing.T) {
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
s := &Service{
|
||||
ctx: t.Context(),
|
||||
ctx: context.Background(),
|
||||
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB},
|
||||
synced: abool.New(),
|
||||
chainStarted: abool.NewBool(true),
|
||||
@@ -685,7 +687,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, genesisBlk)
|
||||
|
||||
var batch []blocks.BlockWithROBlobs
|
||||
var batch []blocks.BlockWithROSidecars
|
||||
currBlockRoot := genesisBlkRoot
|
||||
for i := primitives.Slot(1); i < 10; i++ {
|
||||
parentRoot := currBlockRoot
|
||||
@@ -699,7 +701,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
|
||||
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
@@ -712,3 +714,155 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
||||
// Ensure that the unprocessed batch is returned correctly.
|
||||
assert.Equal(t, len(retBlocks), len(batch)-2)
|
||||
}
|
||||
|
||||
func TestService_PropcessFetchedDataRegSync(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Fulu blocks.
|
||||
fuluBlock1 := util.NewBeaconBlockFulu()
|
||||
signedFuluBlock1, err := blocks.NewSignedBeaconBlock(fuluBlock1)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock1, err := blocks.NewROBlock(signedFuluBlock1)
|
||||
require.NoError(t, err)
|
||||
block1Root := roFuluBlock1.Root()
|
||||
|
||||
fuluBlock2 := util.NewBeaconBlockFulu()
|
||||
fuluBlock2.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
fuluBlock2.Block.Slot = 1
|
||||
fuluBlock2.Block.ParentRoot = block1Root[:]
|
||||
signedFuluBlock2, err := blocks.NewSignedBeaconBlock(fuluBlock2)
|
||||
require.NoError(t, err)
|
||||
|
||||
roFuluBlock2, err := blocks.NewROBlock(signedFuluBlock2)
|
||||
require.NoError(t, err)
|
||||
block2Root := roFuluBlock2.Root()
|
||||
parentRoot2 := roFuluBlock2.Block().ParentRoot()
|
||||
bodyRoot2, err := roFuluBlock2.Block().Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a mock chain service.
|
||||
const validatorCount = uint64(64)
|
||||
state, _ := util.DeterministicGenesisState(t, validatorCount)
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ð.Checkpoint{},
|
||||
DB: dbtest.SetupDB(t),
|
||||
State: state,
|
||||
Root: block1Root[:],
|
||||
}
|
||||
|
||||
// Create a new service instance.
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
Chain: chain,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
},
|
||||
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||
}
|
||||
|
||||
// Save the parent block in the database.
|
||||
err = chain.DB.SaveBlock(ctx, roFuluBlock1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create data column sidecars.
|
||||
const count = uint64(3)
|
||||
params := make([]util.DataColumnParam, 0, count)
|
||||
for i := range count {
|
||||
param := util.DataColumnParam{Index: i, BodyRoot: bodyRoot2[:], ParentRoot: parentRoot2[:], Slot: roFuluBlock2.Block().Slot()}
|
||||
params = append(params, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||
{Block: roFuluBlock2, Columns: verifiedRoDataColumnSidecars},
|
||||
}
|
||||
|
||||
data := &blocksQueueFetchedData{
|
||||
bwb: blocksWithSidecars,
|
||||
}
|
||||
|
||||
actual, err := service.processFetchedDataRegSync(ctx, data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), actual)
|
||||
|
||||
// Check block and data column sidecars were saved correctly.
|
||||
require.Equal(t, true, chain.DB.HasBlock(ctx, block2Root))
|
||||
|
||||
summary := dataColumnStorage.Summary(block2Root)
|
||||
for i := range count {
|
||||
require.Equal(t, true, summary.HasIndex(i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_processBlocksWithDataColumns(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("no blocks", func(t *testing.T) {
|
||||
fuluBlock := util.NewBeaconBlockFulu()
|
||||
|
||||
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := new(Service)
|
||||
err = service.processBlocksWithDataColumns(ctx, nil, nil, roFuluBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
fuluBlock := util.NewBeaconBlockFulu()
|
||||
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||
require.NoError(t, err)
|
||||
bodyRoot, err := roFuluBlock.Block().Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create data column sidecars.
|
||||
const count = uint64(3)
|
||||
params := make([]util.DataColumnParam, 0, count)
|
||||
for i := range count {
|
||||
param := util.DataColumnParam{Index: i, BodyRoot: bodyRoot[:]}
|
||||
params = append(params, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||
{Block: roFuluBlock, Columns: verifiedRoDataColumnSidecars},
|
||||
}
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a service.
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
P2P: p2pt.NewTestP2P(t),
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
},
|
||||
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||
}
|
||||
|
||||
receiverFunc := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
require.Equal(t, 1, len(blks))
|
||||
return nil
|
||||
}
|
||||
|
||||
err = service.processBlocksWithDataColumns(ctx, blocksWithSidecars, receiverFunc, roFuluBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that the data columns were saved correctly.
|
||||
summary := dataColumnStorage.Summary(roFuluBlock.Root())
|
||||
for i := range count {
|
||||
require.Equal(t, true, summary.HasIndex(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
@@ -53,22 +54,24 @@ type Config struct {
|
||||
ClockWaiter startup.ClockWaiter
|
||||
InitialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// Service service.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
genesisTime time.Time
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
genesisTime time.Time
|
||||
}
|
||||
|
||||
// Option is a functional option for the initial-sync Service.
|
||||
@@ -149,6 +152,7 @@ func (s *Service) Start() {
|
||||
return
|
||||
}
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
s.newDataColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
gt := clock.GenesisTime()
|
||||
if gt.IsZero() {
|
||||
@@ -175,19 +179,22 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.chainStarted.Set()
|
||||
log.Info("Starting initial chain sync...")
|
||||
|
||||
// Are we already in sync, or close to it?
|
||||
if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) {
|
||||
log.Info("Already synced to the current chain head")
|
||||
s.markSynced()
|
||||
return
|
||||
}
|
||||
|
||||
peers, err := s.waitForMinimumPeers()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error waiting for minimum number of peers")
|
||||
return
|
||||
}
|
||||
if err := s.fetchOriginBlobs(peers); err != nil {
|
||||
log.WithError(err).Error("Failed to fetch missing blobs for checkpoint origin")
|
||||
|
||||
if err := s.fetchOriginSidecars(peers); err != nil {
|
||||
log.WithError(err).Error("Error fetching origin sidecars")
|
||||
return
|
||||
}
|
||||
if err := s.roundRobinSync(); err != nil {
|
||||
@@ -200,6 +207,48 @@ func (s *Service) Start() {
|
||||
s.markSynced()
|
||||
}
|
||||
|
||||
// fetchOriginSidecars fetches origin sidecars
|
||||
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
block, err := s.cfg.DB.Block(s.ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "block")
|
||||
}
|
||||
|
||||
currentSlot, blockSlot := s.clock.CurrentSlot(), block.Block().Slot()
|
||||
currentEpoch, blockEpoch := slots.ToEpoch(currentSlot), slots.ToEpoch(blockSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
blockVersion := roBlock.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.fetchOriginColumns(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin columns")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := s.fetchOriginBlobs(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin blobs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop initial sync.
|
||||
func (s *Service) Stop() error {
|
||||
s.cancel()
|
||||
@@ -304,23 +353,9 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
r, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
}
|
||||
blk, err := s.cfg.DB.Block(s.ctx, r)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).Error("Block for checkpoint sync origin root not found in db")
|
||||
return err
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(s.clock.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
rob, err := blocks.NewROBlockWithRoot(blk, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
r := rob.Root()
|
||||
|
||||
req, err := missingBlobRequest(rob, s.cfg.BlobStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -335,16 +370,17 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(blobSidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
current := s.clock.CurrentSlot()
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
if err := avs.Persist(current, sidecars...); err != nil {
|
||||
if err := avs.Persist(current, blobSidecars...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Blobs from peer for origin block were unusable")
|
||||
continue
|
||||
@@ -355,6 +391,67 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early if the origin block has no blob commitments.
|
||||
commitments, err := roBlock.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch blob commitments")
|
||||
}
|
||||
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the columns to request.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
info, _, err := peerdas.Info(s.cfg.P2P.NodeID(), samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch peer info")
|
||||
}
|
||||
|
||||
// Fetch origin data column sidecars.
|
||||
root := roBlock.Root()
|
||||
|
||||
params := sync.DataColumnSidecarsParams{
|
||||
Ctx: s.ctx,
|
||||
Tor: s.clock,
|
||||
P2P: s.cfg.P2P,
|
||||
CtxMap: s.ctxMap,
|
||||
Storage: s.cfg.DataColumnStorage,
|
||||
NewVerifier: s.newDataColumnsVerifier,
|
||||
}
|
||||
|
||||
verfifiedRoDataColumnsByRoot, err := sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
// Save origin data columns to disk.
|
||||
verifiedRoDataColumnsSidecars, ok := verfifiedRoDataColumnsByRoot[root]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
|
||||
}
|
||||
|
||||
if err := s.cfg.DataColumnStorage.Save(verifiedRoDataColumnsSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"blobCount": len(commitments),
|
||||
"columnCount": len(verifiedRoDataColumnsSidecars),
|
||||
}).Info("Successfully downloaded data columns for checkpoint sync block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func shufflePeers(pids []peer.ID) {
|
||||
rg := rand.NewGenerator()
|
||||
rg.Shuffle(len(pids), func(i, j int) {
|
||||
@@ -367,3 +464,9 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier {
|
||||
return func(roDataColumns []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return ini.NewDataColumnsVerifier(roDataColumns, reqs)
|
||||
}
|
||||
}
|
||||
@@ -7,14 +7,17 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -138,7 +141,7 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
p := p2pt.NewTestP2P(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
connectPeers(t, p, []*peerData{}, p.Peers())
|
||||
for i, tt := range tests {
|
||||
if i == 0 {
|
||||
@@ -328,7 +331,7 @@ func TestService_markSynced(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_Resync(t *testing.T) {
|
||||
p := p2pt.NewTestP2P(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
connectPeers(t, p, []*peerData{
|
||||
{blocks: makeSequence(1, 160), finalizedEpoch: 5, headSlot: 160},
|
||||
}, p.Peers())
|
||||
@@ -511,5 +514,152 @@ func TestOriginOutsideRetention(t *testing.T) {
|
||||
require.NoError(t, concreteDB.SaveOriginCheckpointBlockRoot(ctx, blk.Root()))
|
||||
// This would break due to missing service dependencies, but will return nil fast due to being outside retention.
|
||||
require.Equal(t, false, params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(clock.CurrentSlot())))
|
||||
require.NoError(t, s.fetchOriginBlobs([]peer.ID{}))
|
||||
require.NoError(t, s.fetchOriginSidecars([]peer.ID{}))
|
||||
}
|
||||
|
||||
func TestFetchOriginSidecars(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
|
||||
secondsPerSlot := beaconConfig.SecondsPerSlot
|
||||
slotsPerEpoch := beaconConfig.SlotsPerEpoch
|
||||
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||
|
||||
genesisValidatorRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
t.Run("out of retention period", func(t *testing.T) {
|
||||
// Create an origin block.
|
||||
block := util.NewBeaconBlockFulu()
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save the block.
|
||||
db := dbtest.SetupDB(t)
|
||||
err = db.SaveOriginCheckpointBlockRoot(ctx, roBlock.Root())
|
||||
require.NoError(t, err)
|
||||
err = db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define "now" to be one epoch after genesis time + retention period.
|
||||
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
|
||||
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||
nower := func() time.Time { return now }
|
||||
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
DB: db,
|
||||
},
|
||||
clock: clock,
|
||||
}
|
||||
|
||||
err = service.fetchOriginSidecars(nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("no commitments", func(t *testing.T) {
|
||||
// Create an origin block.
|
||||
block := util.NewBeaconBlockFulu()
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save the block.
|
||||
db := dbtest.SetupDB(t)
|
||||
err = db.SaveOriginCheckpointBlockRoot(ctx, roBlock.Root())
|
||||
require.NoError(t, err)
|
||||
err = db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define "now" to be after genesis time + retention period.
|
||||
nowWrtGenesisSecs := retentionEpochs.Mul(secondsPerEpoch)
|
||||
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||
nower := func() time.Time { return now }
|
||||
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
DB: db,
|
||||
P2P: p2ptest.NewTestP2P(t),
|
||||
},
|
||||
clock: clock,
|
||||
}
|
||||
|
||||
err = service.fetchOriginSidecars(nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create block and sidecars.
|
||||
const blobCount = 1
|
||||
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
// Save the block.
|
||||
db := dbtest.SetupDB(t)
|
||||
err = db.SaveOriginCheckpointBlockRoot(ctx, roBlock.Root())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute the columns to request.
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
custodyGroupCount, err := p2p.CustodyGroupCount()
|
||||
require.NoError(t, err)
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
info, _, err := peerdas.Info(p2p.NodeID(), samplingSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save all sidecars except what we need.
|
||||
toSave := make([]blocks.VerifiedRODataColumn, 0, uint64(len(verifiedRoSidecars))-samplingSize)
|
||||
for _, sidecar := range verifiedRoSidecars {
|
||||
if !info.CustodyColumns[sidecar.Index] {
|
||||
toSave = append(toSave, sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
err = dataColumnStorage.Save(toSave)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define "now" to be after genesis time + retention period.
|
||||
nowWrtGenesisSecs := retentionEpochs.Mul(secondsPerEpoch)
|
||||
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||
nower := func() time.Time { return now }
|
||||
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
DB: db,
|
||||
P2P: p2p,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
},
|
||||
clock: clock,
|
||||
}
|
||||
|
||||
err = service.fetchOriginSidecars(nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that needed sidecars are saved.
|
||||
summary := dataColumnStorage.Summary(roBlock.Root())
|
||||
for index := range info.CustodyColumns {
|
||||
require.Equal(t, true, summary.HasIndex(index))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -175,8 +176,9 @@ func (s *Service) getBlocksInQueue(slot primitives.Slot) []interfaces.ReadOnlySi
|
||||
func (s *Service) removeBlockFromQueue(b interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
s.pendingQueueLock.Lock()
|
||||
defer s.pendingQueueLock.Unlock()
|
||||
|
||||
if err := s.deleteBlockFromPendingQueue(b.Block().Slot(), b, blkRoot); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete block from pending queue")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -196,41 +198,82 @@ func (s *Service) hasPeer() bool {
|
||||
var errNoPeersForPending = errors.New("no suitable peers to process pending block queue, delaying")
|
||||
|
||||
// processAndBroadcastBlock validates, processes, and broadcasts a block.
|
||||
// part of the function is to request missing blobs from peers if the block contains kzg commitments.
|
||||
func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
// Part of the function is to request missing sidecars from peers if the block contains kzg commitments.
|
||||
func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [fieldparams.RootLength]byte) error {
|
||||
if err := s.processBlock(ctx, b, blkRoot); err != nil {
|
||||
return errors.Wrap(err, "process block")
|
||||
}
|
||||
|
||||
if err := s.receiveAndBroadCastBlock(ctx, b, blkRoot, b.Block().Slot()); err != nil {
|
||||
return errors.Wrap(err, "receive and broadcast block")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [fieldparams.RootLength]byte) error {
|
||||
blockSlot := b.Block().Slot()
|
||||
|
||||
if err := s.validateBeaconBlock(ctx, b, blkRoot); err != nil {
|
||||
if !errors.Is(ErrOptimisticParent, err) {
|
||||
log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not validate block")
|
||||
log.WithError(err).WithField("slot", blockSlot).Debug("Could not validate block")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
request, err := s.pendingBlobsRequestForBlock(blkRoot, b)
|
||||
blockEpoch, denebForkEpoch, fuluForkEpoch := slots.ToEpoch(blockSlot), params.BeaconConfig().DenebForkEpoch, params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(b, blkRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(request) > 0 {
|
||||
peers := s.getBestPeers()
|
||||
peerCount := len(peers)
|
||||
if peerCount == 0 {
|
||||
return errors.Wrapf(errNoPeersForPending, "block root=%#x", blkRoot)
|
||||
}
|
||||
if err := s.sendAndSaveBlobSidecars(ctx, request, peers[rand.NewGenerator().Int()%peerCount], b); err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
if blockEpoch >= fuluForkEpoch {
|
||||
if err := s.requestAndSaveMissingDataColumnSidecars([]blocks.ROBlock{roBlock}); err != nil {
|
||||
return errors.Wrap(err, "request and save missing data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockEpoch >= denebForkEpoch {
|
||||
request, err := s.pendingBlobsRequestForBlock(blkRoot, b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "pending blobs request for block")
|
||||
}
|
||||
|
||||
if len(request) > 0 {
|
||||
peers := s.getBestPeers()
|
||||
peerCount := len(peers)
|
||||
|
||||
if peerCount == 0 {
|
||||
return errors.Wrapf(errNoPeersForPending, "block root=%#x", blkRoot)
|
||||
}
|
||||
|
||||
if err := s.sendAndSaveBlobSidecars(ctx, request, peers[rand.NewGenerator().Int()%peerCount], b); err != nil {
|
||||
return errors.Wrap(err, "send and save blob sidecars")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) receiveAndBroadCastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [fieldparams.RootLength]byte, blockSlot primitives.Slot) error {
|
||||
if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot, nil); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "receive block")
|
||||
}
|
||||
|
||||
s.setSeenBlockIndexSlot(b.Block().Slot(), b.Block().ProposerIndex())
|
||||
s.setSeenBlockIndexSlot(blockSlot, b.Block().ProposerIndex())
|
||||
|
||||
pb, err := b.Proto()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not get protobuf block")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.Broadcast(ctx, pb); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast block")
|
||||
return err
|
||||
@@ -286,55 +329,105 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
ctx, span := prysmTrace.StartSpan(ctx, "sendBatchRootRequest")
|
||||
defer span.End()
|
||||
|
||||
roots = dedupRoots(roots)
|
||||
s.pendingQueueLock.RLock()
|
||||
for i := len(roots) - 1; i >= 0; i-- {
|
||||
r := roots[i]
|
||||
if s.seenPendingBlocks[r] || s.cfg.chain.BlockBeingSynced(r) {
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
} else {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
}
|
||||
s.pendingQueueLock.RUnlock()
|
||||
|
||||
// Exit early if there are no roots to request.
|
||||
if len(roots) == 0 {
|
||||
return nil
|
||||
}
|
||||
bestPeers := s.getBestPeers()
|
||||
if len(bestPeers) == 0 {
|
||||
|
||||
// Remove duplicates (if any) from the list of roots.
|
||||
roots = dedupRoots(roots)
|
||||
|
||||
// Filters out in place roots that are already seen in pending blocks or being synced.
|
||||
func() {
|
||||
s.pendingQueueLock.RLock()
|
||||
defer s.pendingQueueLock.RUnlock()
|
||||
|
||||
for i := len(roots) - 1; i >= 0; i-- {
|
||||
r := roots[i]
|
||||
if s.seenPendingBlocks[r] || s.cfg.chain.BlockBeingSynced(r) {
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
}()
|
||||
|
||||
// Nothing to do, exit early.
|
||||
if len(roots) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Randomly choose a peer to query from our best peers. If that peer cannot return
|
||||
// all the requested blocks, we randomly select another peer.
|
||||
pid := bestPeers[randGen.Int()%len(bestPeers)]
|
||||
for i := 0; i < numOfTries; i++ {
|
||||
|
||||
// Fetch best peers to request blocks from.
|
||||
bestPeers := s.getBestPeers()
|
||||
|
||||
// No suitable peer, exit early.
|
||||
if len(bestPeers) == 0 {
|
||||
log.WithField("roots", fmt.Sprintf("%#x", roots)).Debug("Send batch root request: No suitable peers")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Randomly choose a peer to query from our best peers.
|
||||
// If that peer cannot return all the requested blocks,
|
||||
// we randomly select another peer.
|
||||
randomIndex := randGen.Int() % len(bestPeers)
|
||||
pid := bestPeers[randomIndex]
|
||||
|
||||
for range numOfTries {
|
||||
req := p2ptypes.BeaconBlockByRootsReq(roots)
|
||||
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
|
||||
|
||||
// Get the current epoch.
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Trim the request to the maximum number of blocks we can request if needed.
|
||||
maxReqBlock := params.MaxRequestBlock(currentEpoch)
|
||||
if uint64(len(roots)) > maxReqBlock {
|
||||
rootCount := uint64(len(roots))
|
||||
if rootCount > maxReqBlock {
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Debug("Could not send recent block request")
|
||||
}
|
||||
newRoots := make([][32]byte, 0, len(roots))
|
||||
s.pendingQueueLock.RLock()
|
||||
for _, rt := range roots {
|
||||
if !s.seenPendingBlocks[rt] {
|
||||
newRoots = append(newRoots, rt)
|
||||
|
||||
// Filter out roots that are already seen in pending blocks.
|
||||
newRoots := make([][32]byte, 0, rootCount)
|
||||
func() {
|
||||
s.pendingQueueLock.RLock()
|
||||
defer s.pendingQueueLock.RUnlock()
|
||||
|
||||
for _, rt := range roots {
|
||||
if !s.seenPendingBlocks[rt] {
|
||||
newRoots = append(newRoots, rt)
|
||||
}
|
||||
}
|
||||
}
|
||||
s.pendingQueueLock.RUnlock()
|
||||
}()
|
||||
|
||||
// Exit early if all roots have been seen.
|
||||
// This is the happy path.
|
||||
if len(newRoots) == 0 {
|
||||
break
|
||||
return nil
|
||||
}
|
||||
// Choosing a new peer with the leftover set of
|
||||
// roots to request.
|
||||
|
||||
// There is still some roots that have not been seen.
|
||||
// Choosing a new peer with the leftover set of oots to request.
|
||||
roots = newRoots
|
||||
pid = bestPeers[randGen.Int()%len(bestPeers)]
|
||||
|
||||
// Choose a new peer to query.
|
||||
randomIndex = randGen.Int() % len(bestPeers)
|
||||
pid = bestPeers[randomIndex]
|
||||
}
|
||||
|
||||
// Some roots are still missing after all allowed tries.
|
||||
// This is the unhappy path.
|
||||
log.WithFields(logrus.Fields{
|
||||
"roots": fmt.Sprintf("%#x", roots),
|
||||
"tries": numOfTries,
|
||||
}).Debug("Send batch root request: Some roots are still missing after all allowed tries")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -535,4 +628,4 @@ func dedupRoots(roots [][32]byte) [][32]byte {
|
||||
newRoots = append(newRoots, roots[i])
|
||||
}
|
||||
return newRoots
|
||||
}
|
||||
}
|
||||
@@ -61,48 +61,49 @@ func TestRateLimiter_ExceedCapacity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
|
||||
p1 := mockp2p.NewTestP2P(t)
|
||||
p2 := mockp2p.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
p1.Peers().Add(nil, p2.PeerID(), p2.BHost.Addrs()[0], network.DirOutbound)
|
||||
// TODO: Uncomment out of devnet
|
||||
// func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
|
||||
// p1 := mockp2p.NewTestP2P(t)
|
||||
// p2 := mockp2p.NewTestP2P(t)
|
||||
// p1.Connect(p2)
|
||||
// p1.Peers().Add(nil, p2.PeerID(), p2.BHost.Addrs()[0], network.DirOutbound)
|
||||
|
||||
rlimiter := newRateLimiter(p1)
|
||||
// rlimiter := newRateLimiter(p1)
|
||||
|
||||
// BlockByRange
|
||||
topic := p2p.RPCBlocksByRangeTopicV1 + p1.Encoding().ProtocolSuffix()
|
||||
// // BlockByRange
|
||||
// topic := p2p.RPCBlocksByRangeTopicV1 + p1.Encoding().ProtocolSuffix()
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
p2.BHost.SetStreamHandler(protocol.ID(topic), func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
code, errMsg, err := readStatusCodeNoDeadline(stream, p2.Encoding())
|
||||
require.NoError(t, err, "could not read incoming stream")
|
||||
assert.Equal(t, responseCodeInvalidRequest, code, "not equal response codes")
|
||||
assert.Equal(t, p2ptypes.ErrRateLimited.Error(), errMsg, "not equal errors")
|
||||
})
|
||||
wg.Add(1)
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), protocol.ID(topic))
|
||||
require.NoError(t, err, "could not create stream")
|
||||
// wg := sync.WaitGroup{}
|
||||
// p2.BHost.SetStreamHandler(protocol.ID(topic), func(stream network.Stream) {
|
||||
// defer wg.Done()
|
||||
// code, errMsg, err := readStatusCodeNoDeadline(stream, p2.Encoding())
|
||||
// require.NoError(t, err, "could not read incoming stream")
|
||||
// assert.Equal(t, responseCodeInvalidRequest, code, "not equal response codes")
|
||||
// assert.Equal(t, p2ptypes.ErrRateLimited.Error(), errMsg, "not equal errors")
|
||||
// })
|
||||
// wg.Add(1)
|
||||
// stream, err := p1.BHost.NewStream(context.Background(), p2.PeerID(), protocol.ID(topic))
|
||||
// require.NoError(t, err, "could not create stream")
|
||||
|
||||
for i := 0; i < 2*defaultBurstLimit; i++ {
|
||||
err = rlimiter.validateRawRpcRequest(stream, 1)
|
||||
rlimiter.addRawStream(stream)
|
||||
require.NoError(t, err, "could not validate incoming request")
|
||||
}
|
||||
// Triggers rate limit error on burst.
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
|
||||
// for i := 0; i < 2*defaultBurstLimit; i++ {
|
||||
// err = rlimiter.validateRawRpcRequest(stream, 1)
|
||||
// rlimiter.addRawStream(stream)
|
||||
// require.NoError(t, err, "could not validate incoming request")
|
||||
// }
|
||||
// // Triggers rate limit error on burst.
|
||||
// assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
|
||||
|
||||
// Make Peer bad.
|
||||
for i := 0; i < defaultBurstLimit; i++ {
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
|
||||
}
|
||||
assert.NotNil(t, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
require.NoError(t, stream.Close(), "could not close stream")
|
||||
// // Make Peer bad.
|
||||
// for i := 0; i < defaultBurstLimit; i++ {
|
||||
// assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
|
||||
// }
|
||||
// assert.NotNil(t, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
||||
// require.NoError(t, stream.Close(), "could not close stream")
|
||||
|
||||
if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
}
|
||||
// if util.WaitTimeout(&wg, 1*time.Second) {
|
||||
// t.Fatal("Did not receive stream within 1 sec")
|
||||
// }
|
||||
// }
|
||||
|
||||
func Test_limiter_retrieveCollector_requiresLock(t *testing.T) {
|
||||
l := limiter{}
|
||||
|
||||
@@ -411,150 +411,151 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
d := db.SetupDB(t)
|
||||
saveBlocks := func(req *ethpb.BeaconBlocksByRangeRequest) {
|
||||
// Populate the database with blocks that would match the request.
|
||||
var parentRoot [32]byte
|
||||
// Default to 1 to be inline with the spec.
|
||||
req.Step = 1
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += primitives.Slot(req.Step) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = i
|
||||
if req.Step == 1 {
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
}
|
||||
util.SaveBlock(t, t.Context(), d, block)
|
||||
rt, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
parentRoot = rt
|
||||
}
|
||||
}
|
||||
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
|
||||
req *ethpb.BeaconBlocksByRangeRequest, validateBlocks bool, success bool) error {
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
reqAnswered := false
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer func() {
|
||||
reqAnswered = true
|
||||
}()
|
||||
if !validateBlocks {
|
||||
return
|
||||
}
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count); i += primitives.Slot(req.Step) {
|
||||
if !success {
|
||||
continue
|
||||
}
|
||||
expectSuccess(t, stream)
|
||||
res := util.NewBeaconBlock()
|
||||
assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
if res.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
|
||||
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
|
||||
}
|
||||
}
|
||||
})
|
||||
stream, err := p1.BHost.NewStream(t.Context(), p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
if err := r.beaconBlocksByRangeRPCHandler(t.Context(), req, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
assert.Equal(t, reqAnswered, true)
|
||||
return nil
|
||||
}
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
// d := db.SetupDB(t)
|
||||
// saveBlocks := func(req *ethpb.BeaconBlocksByRangeRequest) {
|
||||
// // Populate the database with blocks that would match the request.
|
||||
// var parentRoot [32]byte
|
||||
// // Default to 1 to be inline with the spec.
|
||||
// req.Step = 1
|
||||
// for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += primitives.Slot(req.Step) {
|
||||
// block := util.NewBeaconBlock()
|
||||
// block.Block.Slot = i
|
||||
// if req.Step == 1 {
|
||||
// block.Block.ParentRoot = parentRoot[:]
|
||||
// }
|
||||
// util.SaveBlock(t, context.Background(), d, block)
|
||||
// rt, err := block.Block.HashTreeRoot()
|
||||
// require.NoError(t, err)
|
||||
// parentRoot = rt
|
||||
// }
|
||||
// }
|
||||
// sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
|
||||
// req *ethpb.BeaconBlocksByRangeRequest, validateBlocks bool, success bool) error {
|
||||
// pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
// reqAnswered := false
|
||||
// p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
// defer func() {
|
||||
// reqAnswered = true
|
||||
// }()
|
||||
// if !validateBlocks {
|
||||
// return
|
||||
// }
|
||||
// for i := req.StartSlot; i < req.StartSlot.Add(req.Count); i += primitives.Slot(req.Step) {
|
||||
// if !success {
|
||||
// continue
|
||||
// }
|
||||
// expectSuccess(t, stream)
|
||||
// res := util.NewBeaconBlock()
|
||||
// assert.NoError(t, r.cfg.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
// if res.Block.Slot.SubSlot(req.StartSlot).Mod(req.Step) != 0 {
|
||||
// t.Errorf("Received unexpected block slot %d", res.Block.Slot)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
// stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
// require.NoError(t, err)
|
||||
// if err := r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// time.Sleep(100 * time.Millisecond)
|
||||
// assert.Equal(t, reqAnswered, true)
|
||||
// return nil
|
||||
// }
|
||||
|
||||
t.Run("high request count param and no overflow", func(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
// t.Run("high request count param and no overflow", func(t *testing.T) {
|
||||
// p1 := p2ptest.NewTestP2P(t)
|
||||
// p2 := p2ptest.NewTestP2P(t)
|
||||
// p1.Connect(p2)
|
||||
// assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
reqSize := params.MaxRequestBlock(slots.ToEpoch(clock.CurrentSlot()))
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
|
||||
// clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
// reqSize := params.MaxRequestBlock(slots.ToEpoch(clock.CurrentSlot()))
|
||||
// r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}, clock: clock}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
|
||||
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
defaultBlockBurstFactor := 2 // TODO: can we update the default value set in TestMain to match flags?
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(flags.Get().BlockBatchLimit*defaultBlockBurstFactor), time.Second, false)
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 100,
|
||||
Count: reqSize,
|
||||
}
|
||||
saveBlocks(req)
|
||||
// pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
// topic := string(pcl)
|
||||
// defaultBlockBurstFactor := 2 // TODO: can we update the default value set in TestMain to match flags?
|
||||
// r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(flags.Get().BlockBatchLimit*defaultBlockBurstFactor), time.Second, false)
|
||||
// req := ðpb.BeaconBlocksByRangeRequest{
|
||||
// StartSlot: 100,
|
||||
// Count: reqSize,
|
||||
// }
|
||||
// saveBlocks(req)
|
||||
|
||||
// This doesn't error because reqSize by default is 128, which is exactly the burst factor * batch limit
|
||||
assert.NoError(t, sendRequest(p1, p2, r, req, true, true))
|
||||
// // This doesn't error because reqSize by default is 128, which is exactly the burst factor * batch limit
|
||||
// assert.NoError(t, sendRequest(p1, p2, r, req, true, true))
|
||||
|
||||
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
||||
expectedCapacity := int64(0) // Whole capacity is used, but no overflow.
|
||||
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
})
|
||||
// remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
||||
// expectedCapacity := int64(0) // Whole capacity is used, but no overflow.
|
||||
// assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
// })
|
||||
|
||||
t.Run("high request count param and overflow", func(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
// t.Run("high request count param and overflow", func(t *testing.T) {
|
||||
// p1 := p2ptest.NewTestP2P(t)
|
||||
// p2 := p2ptest.NewTestP2P(t)
|
||||
// p1.Connect(p2)
|
||||
// assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
reqSize := params.MaxRequestBlock(slots.ToEpoch(clock.CurrentSlot())) - 1
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
|
||||
// clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
// reqSize := params.MaxRequestBlock(slots.ToEpoch(clock.CurrentSlot())) - 1
|
||||
// r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
|
||||
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(flags.Get().BlockBatchLimit), time.Second, false)
|
||||
// pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
// topic := string(pcl)
|
||||
// r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, int64(flags.Get().BlockBatchLimit), time.Second, false)
|
||||
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 100,
|
||||
Count: reqSize,
|
||||
}
|
||||
saveBlocks(req)
|
||||
// req := ðpb.BeaconBlocksByRangeRequest{
|
||||
// StartSlot: 100,
|
||||
// Count: reqSize,
|
||||
// }
|
||||
// saveBlocks(req)
|
||||
|
||||
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
||||
err := sendRequest(p1, p2, r, req, false, true)
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
|
||||
}
|
||||
// for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
||||
// err := sendRequest(p1, p2, r, req, false, true)
|
||||
// assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
|
||||
// }
|
||||
|
||||
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
||||
expectedCapacity := int64(0) // Whole capacity is used.
|
||||
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
})
|
||||
// remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
||||
// expectedCapacity := int64(0) // Whole capacity is used.
|
||||
// assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
// })
|
||||
|
||||
t.Run("many requests with count set to max blocks per second", func(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
// t.Run("many requests with count set to max blocks per second", func(t *testing.T) {
|
||||
// p1 := p2ptest.NewTestP2P(t)
|
||||
// p2 := p2ptest.NewTestP2P(t)
|
||||
// p1.Connect(p2)
|
||||
// assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
|
||||
clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, time.Second, false)
|
||||
// capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
|
||||
// clock := startup.NewClock(time.Unix(0, 0), [32]byte{})
|
||||
// r := &Service{cfg: &config{p2p: p1, beaconDB: d, clock: clock, chain: &chainMock.ChainService{}}, availableBlocker: mockBlocker{avail: true}, rateLimiter: newRateLimiter(p1)}
|
||||
// pcl := protocol.ID(p2p.RPCBlocksByRangeTopicV1)
|
||||
// topic := string(pcl)
|
||||
// r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(0.000001, capacity, time.Second, false)
|
||||
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 100,
|
||||
Count: uint64(flags.Get().BlockBatchLimit),
|
||||
}
|
||||
saveBlocks(req)
|
||||
// req := ðpb.BeaconBlocksByRangeRequest{
|
||||
// StartSlot: 100,
|
||||
// Count: uint64(flags.Get().BlockBatchLimit),
|
||||
// }
|
||||
// saveBlocks(req)
|
||||
|
||||
for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
|
||||
assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
|
||||
}
|
||||
// for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
|
||||
// assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
|
||||
// }
|
||||
|
||||
// One more request should result in overflow.
|
||||
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
||||
err := sendRequest(p1, p2, r, req, false, false)
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
|
||||
}
|
||||
// // One more request should result in overflow.
|
||||
// for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
||||
// err := sendRequest(p1, p2, r, req, false, false)
|
||||
// assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
|
||||
// }
|
||||
|
||||
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
||||
expectedCapacity := int64(0) // Whole capacity is used.
|
||||
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
})
|
||||
}
|
||||
// remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
||||
// expectedCapacity := int64(0) // Whole capacity is used.
|
||||
// assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
// })
|
||||
// }
|
||||
|
||||
func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
slotsSinceGenesis := primitives.Slot(1000)
|
||||
|
||||
@@ -4,11 +4,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -20,15 +22,19 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// sendBeaconBlocksRequest sends a recent beacon blocks request to a peer to get
|
||||
// those corresponding blocks from that peer.
|
||||
// sendBeaconBlocksRequest sends the `requests` beacon blocks by root requests to
|
||||
// the peer with the given `id`. For each received block, it inserts the block into the
|
||||
// pending queue. Then, for each received blocks, it checks if all corresponding sidecars
|
||||
// are stored, and, if not, sends the corresponding sidecar requests and stores the received sidecars.
|
||||
// For sidecars, only blob sidecars will be requested to the peer with the given `id`.
|
||||
// For other types of sidecars, the request will be sent to the best peers.
|
||||
func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.BeaconBlockByRootsReq, id peer.ID) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
requestedRoots := make(map[[32]byte]struct{})
|
||||
requestedRoots := make(map[[fieldparams.RootLength]byte]bool)
|
||||
for _, root := range *requests {
|
||||
requestedRoots[root] = struct{}{}
|
||||
requestedRoots[root] = true
|
||||
}
|
||||
|
||||
blks, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, id, requests, func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
@@ -36,39 +42,124 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := requestedRoots[blkRoot]; !ok {
|
||||
|
||||
if ok := requestedRoots[blkRoot]; !ok {
|
||||
return fmt.Errorf("received unexpected block with root %x", blkRoot)
|
||||
}
|
||||
|
||||
s.pendingQueueLock.Lock()
|
||||
defer s.pendingQueueLock.Unlock()
|
||||
|
||||
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blkRoot); err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "insert block to pending queue for block with root %x", blkRoot)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// The following part deals with sidecars.
|
||||
postFuluBlocks := make([]blocks.ROBlock, 0, len(blks))
|
||||
for _, blk := range blks {
|
||||
// Skip blocks before deneb because they have no blob.
|
||||
if blk.Version() < version.Deneb {
|
||||
blockVersion := blk.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
roBlock, err := blocks.NewROBlock(blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new ro block")
|
||||
}
|
||||
|
||||
postFuluBlocks = append(postFuluBlocks, roBlock)
|
||||
|
||||
continue
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request, err := s.pendingBlobsRequestForBlock(blkRoot, blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(request) == 0 {
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := s.requestAndSaveMissingBlobSidecars(blk, id); err != nil {
|
||||
return errors.Wrap(err, "request and save missing blob sidecars")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
if err := s.sendAndSaveBlobSidecars(ctx, request, id, blk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.requestAndSaveMissingDataColumnSidecars(postFuluBlocks); err != nil {
|
||||
return errors.Wrap(err, "request and save missing data columns")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
|
||||
// If so, requests them and saves them to the storage.
|
||||
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch custody group count from peer")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
info, _, err := peerdas.Info(s.cfg.p2p.NodeID(), samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody info")
|
||||
}
|
||||
|
||||
// Fetch missing data column sidecars.
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: s.ctx,
|
||||
Tor: s.cfg.clock,
|
||||
P2P: s.cfg.p2p,
|
||||
CtxMap: s.ctxMap,
|
||||
Storage: s.cfg.dataColumnStorage,
|
||||
NewVerifier: s.newColumnsVerifier,
|
||||
}
|
||||
|
||||
sidecarsByRoot, err := FetchDataColumnSidecars(params, blks, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
// Save the sidecars to the storage.
|
||||
count := 0
|
||||
for _, sidecars := range sidecarsByRoot {
|
||||
count += len(sidecars)
|
||||
}
|
||||
|
||||
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||
for _, sidecars := range sidecarsByRoot {
|
||||
sidecarsToSave = append(sidecarsToSave, sidecars...)
|
||||
}
|
||||
|
||||
if err := s.cfg.dataColumnStorage.Save(sidecarsToSave); err != nil {
|
||||
return errors.Wrap(err, "save")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) requestAndSaveMissingBlobSidecars(block interfaces.ReadOnlySignedBeaconBlock, peerID peer.ID) error {
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "hash tree root")
|
||||
}
|
||||
|
||||
request, err := s.pendingBlobsRequestForBlock(blockRoot, block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "pending blobs request for block")
|
||||
}
|
||||
|
||||
if len(request) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.sendAndSaveBlobSidecars(s.ctx, request, peerID, block); err != nil {
|
||||
return errors.Wrap(err, "send and save blob sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// beaconBlocksRootRPCHandler looks up the request blocks from the database from the given block roots.
|
||||
func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
@@ -207,4 +298,4 @@ func requestsForMissingIndices(stored filesystem.BlobStorageSummary, commitments
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
}
|
||||
@@ -216,4 +216,4 @@ func validateBlobsByRange(r *pb.BlobSidecarsByRangeRequest, current primitives.S
|
||||
}
|
||||
|
||||
return rp, nil
|
||||
}
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
return err
|
||||
}
|
||||
// Sort the identifiers so that requests for the same blob root will be adjacent, minimizing db lookups.
|
||||
sort.Sort(blobIdents)
|
||||
sort.Sort(&blobIdents)
|
||||
|
||||
batchSize := flags.Get().BlobBatchLimit
|
||||
var ticker *time.Ticker
|
||||
|
||||
@@ -190,7 +190,7 @@ func TestBlobsByRootValidation(t *testing.T) {
|
||||
}()
|
||||
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
dmc, clock := defaultMockChain(t)
|
||||
dmc, clock := defaultMockChain(t, 0)
|
||||
dmc.Slot = &capellaSlot
|
||||
dmc.FinalizedCheckPoint = ðpb.Checkpoint{Epoch: params.BeaconConfig().CapellaForkEpoch}
|
||||
cases := []*blobsTestCase{
|
||||
|
||||
@@ -36,12 +36,12 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Check if the message type is the one expected.
|
||||
ref, ok := msg.(*types.DataColumnsByRootIdentifiers)
|
||||
ref, ok := msg.(types.DataColumnsByRootIdentifiers)
|
||||
if !ok {
|
||||
return notDataColumnsByRootIdentifiersError
|
||||
}
|
||||
|
||||
requestedColumnIdents := *ref
|
||||
requestedColumnIdents := ref
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
|
||||
@@ -68,7 +68,7 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &types.DataColumnsByRootIdentifiers{{Columns: []uint64{1, 2, 3}}}
|
||||
msg := types.DataColumnsByRootIdentifiers{{Columns: []uint64{1, 2, 3}}}
|
||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||
|
||||
err = service.dataColumnSidecarByRootRPCHandler(t.Context(), msg, stream)
|
||||
@@ -169,7 +169,7 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
||||
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &types.DataColumnsByRootIdentifiers{
|
||||
msg := types.DataColumnsByRootIdentifiers{
|
||||
{
|
||||
BlockRoot: root0[:],
|
||||
Columns: []uint64{1, 2, 3},
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
goPeer "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -404,11 +405,8 @@ func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncod
|
||||
// SendDataColumnSidecarsByRangeRequest sends a request for data column sidecars by range
|
||||
// and returns the fetched data column sidecars.
|
||||
func SendDataColumnSidecarsByRangeRequest(
|
||||
ctx context.Context,
|
||||
tor blockchain.TemporalOracle,
|
||||
p2pApi p2p.P2P,
|
||||
p DataColumnSidecarsParams,
|
||||
pid peer.ID,
|
||||
ctxMap ContextByteVersions,
|
||||
request *ethpb.DataColumnSidecarsByRangeRequest,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Return early if nothing to request.
|
||||
@@ -428,7 +426,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
}
|
||||
|
||||
// Build the topic.
|
||||
currentSlot := tor.CurrentSlot()
|
||||
currentSlot := p.Tor.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRangeName, currentEpoch)
|
||||
if err != nil {
|
||||
@@ -453,7 +451,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
})
|
||||
|
||||
// Send the request.
|
||||
stream, err := p2pApi.Send(ctx, request, topic, pid)
|
||||
stream, err := p.P2P.Send(p.Ctx, request, topic, pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "p2p send")
|
||||
}
|
||||
@@ -463,7 +461,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, totalCount)
|
||||
for range totalCount {
|
||||
// Avoid reading extra chunks if the context is done.
|
||||
if err := ctx.Err(); err != nil {
|
||||
if err := p.Ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -473,7 +471,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
}
|
||||
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(
|
||||
stream, p2pApi, ctxMap,
|
||||
stream, p.P2P, p.CtxMap,
|
||||
validatorSlotWithinBounds,
|
||||
isSidecarIndexRequested(request),
|
||||
)
|
||||
@@ -492,7 +490,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
}
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", totalCount)
|
||||
}
|
||||
|
||||
@@ -539,22 +537,10 @@ func isSidecarIndexRequested(request *ethpb.DataColumnSidecarsByRangeRequest) Da
|
||||
|
||||
// SendDataColumnSidecarsByRootRequest sends a request for data column sidecars by root
|
||||
// and returns the fetched data column sidecars.
|
||||
func SendDataColumnSidecarsByRootRequest(
|
||||
ctx context.Context,
|
||||
tor blockchain.TemporalOracle,
|
||||
p2pApi p2p.P2P,
|
||||
pid peer.ID,
|
||||
ctxMap ContextByteVersions,
|
||||
request p2ptypes.DataColumnsByRootIdentifiers,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Return early if the request is nil.
|
||||
if request == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer.ID, identifiers p2ptypes.DataColumnsByRootIdentifiers) ([]blocks.RODataColumn, error) {
|
||||
// Compute how many sidecars are requested.
|
||||
count := uint64(0)
|
||||
for _, identifier := range request {
|
||||
for _, identifier := range identifiers {
|
||||
count += uint64(len(identifier.Columns))
|
||||
}
|
||||
|
||||
@@ -570,13 +556,15 @@ func SendDataColumnSidecarsByRootRequest(
|
||||
}
|
||||
|
||||
// Get the topic for the request.
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
currentSlot := p.Tor.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "topic from message")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
stream, err := p2pApi.Send(ctx, request, topic, pid)
|
||||
stream, err := p.P2P.Send(p.Ctx, identifiers, topic, peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "p2p api send")
|
||||
}
|
||||
@@ -587,7 +575,7 @@ func SendDataColumnSidecarsByRootRequest(
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
for range count {
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap, isSidecarIndexRootRequested(request))
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap, isSidecarIndexRootRequested(identifiers))
|
||||
if errors.Is(err, io.EOF) {
|
||||
return roDataColumns, nil
|
||||
}
|
||||
@@ -603,7 +591,7 @@ func SendDataColumnSidecarsByRootRequest(
|
||||
}
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", count)
|
||||
}
|
||||
|
||||
@@ -629,11 +617,11 @@ func isSidecarIndexRootRequested(request p2ptypes.DataColumnsByRootIdentifiers)
|
||||
indices, ok := columnsIndexFromRoot[root]
|
||||
|
||||
if !ok {
|
||||
return errors.Errorf("root #%x returned by peer but not requested", root)
|
||||
return errors.Errorf("root %#x returned by peer but not requested", root)
|
||||
}
|
||||
|
||||
if !indices[index] {
|
||||
return errors.Errorf("index %d for root #%x returned by peer but not requested", index, root)
|
||||
return errors.Errorf("index %d for root %#x returned by peer but not requested", index, root)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -915,7 +915,7 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
|
||||
for _, tc := range nilTestCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", tc.request)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
@@ -928,7 +928,7 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{Count: 1, Columns: []uint64{1, 2, 3}}
|
||||
_, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
|
||||
_, err := SendDataColumnSidecarsByRangeRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", request)
|
||||
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
||||
})
|
||||
|
||||
@@ -1040,7 +1040,14 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, requestSent)
|
||||
parameters := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p1,
|
||||
CtxMap: ctxMap,
|
||||
}
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(parameters, p2.PeerID(), requestSent)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
@@ -1208,7 +1215,7 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
|
||||
for _, tc := range nilTestCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", tc.request)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
@@ -1225,7 +1232,7 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
{Columns: []uint64{4, 5, 6}},
|
||||
}
|
||||
|
||||
_, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
|
||||
_, err := SendDataColumnSidecarsByRootRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", request)
|
||||
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
||||
})
|
||||
|
||||
@@ -1346,7 +1353,13 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, sentRequest)
|
||||
parameters := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p1,
|
||||
CtxMap: ctxMap,
|
||||
}
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(parameters, p2.PeerID(), sentRequest)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
|
||||
@@ -38,7 +38,10 @@ func (s *Service) maintainPeerStatuses() {
|
||||
go func(id peer.ID) {
|
||||
defer wg.Done()
|
||||
|
||||
log := log.WithField("peer", id)
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peer": id,
|
||||
"agent": agentString(id, s.cfg.p2p.Host()),
|
||||
})
|
||||
|
||||
// If our peer status has not been updated correctly we disconnect over here
|
||||
// and set the connection state over here instead.
|
||||
|
||||
@@ -123,6 +123,7 @@ type blockchainService interface {
|
||||
blockchain.OptimisticModeFetcher
|
||||
blockchain.SlashingReceiver
|
||||
blockchain.ForkchoiceFetcher
|
||||
blockchain.DataAvailabilityChecker
|
||||
}
|
||||
|
||||
// Service is responsible for handling all run time p2p related operations as the
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -230,6 +231,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
handle: s.dataColumnSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
// TODO: Should we find peers always? When validators are managed? When validators are managed AND when we are going to propose a block?
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -798,3 +800,17 @@ func errorIsIgnored(err error) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// sliceFromMap returns a sorted list of keys from a map.
|
||||
func sliceFromMap(m map[uint64]bool, sorted ...bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
if len(sorted) > 0 && sorted[0] {
|
||||
slices.Sort(result)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/io/file"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -108,6 +109,18 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, ro
|
||||
log.Warning("Data column storage is not enabled, skip saving data column, but continue to reconstruct and broadcast data column")
|
||||
}
|
||||
|
||||
// Check if data is already available to avoid unnecessary execution client calls
|
||||
switch err := s.cfg.chain.IsDataAvailable(ctx, blockRoot, roSignedBlock); {
|
||||
case err == nil:
|
||||
log.Debug("Data already available – skipping execution-client call")
|
||||
return
|
||||
case errors.Is(err, blockchain.ErrDataNotAvailable):
|
||||
// continue
|
||||
default:
|
||||
log.WithError(err).Error("Failed to check data availability")
|
||||
return
|
||||
}
|
||||
|
||||
// When this function is called, it's from the time when the block is received, so in almost all situations we need to get the data column from EL instead of the blob storage.
|
||||
sidecars, err := s.cfg.executionReconstructor.ReconstructDataColumnSidecars(ctx, roSignedBlock, blockRoot)
|
||||
if err != nil {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
lruwrpr "github.com/OffchainLabs/prysm/v6/cache/lru"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -214,8 +215,11 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
chainService := &chainMock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
// Create a chain service that returns ErrDataNotAvailable to trigger execution service calls
|
||||
chainService := &ChainServiceDataNotAvailable{
|
||||
ChainService: &chainMock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
allColumns := make([]blocks.VerifiedRODataColumn, 128)
|
||||
@@ -295,3 +299,193 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// TestProcessDataColumnSidecarsFromExecution_DataAvailabilityCheck tests the data availability optimization
|
||||
func TestProcessDataColumnSidecarsFromExecution_DataAvailabilityCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a test block with KZG commitments
|
||||
block := util.NewBeaconBlockDeneb()
|
||||
block.Block.Slot = 100
|
||||
commitment := [48]byte{1, 2, 3}
|
||||
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("skips execution call when data is available", func(t *testing.T) {
|
||||
mockChain := &MockChainServiceTrackingCalls{
|
||||
ChainService: &chainMock.ChainService{},
|
||||
dataAvailable: true, // Data is available
|
||||
availabilityError: nil,
|
||||
isDataAvailableCalled: false,
|
||||
}
|
||||
|
||||
mockExecutionClient := &MockExecutionClientTrackingCalls{
|
||||
EngineClient: &mockExecution.EngineClient{},
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
executionReconstructor: mockExecutionClient,
|
||||
},
|
||||
}
|
||||
|
||||
// This should call IsDataAvailable and return early without calling execution client
|
||||
s.processDataColumnSidecarsFromExecution(ctx, signedBlock)
|
||||
|
||||
// Verify the expected call pattern
|
||||
assert.Equal(t, true, mockChain.isDataAvailableCalled, "Expected IsDataAvailable to be called")
|
||||
assert.Equal(t, false, mockExecutionClient.reconstructCalled, "Expected execution client NOT to be called when data is available")
|
||||
})
|
||||
|
||||
t.Run("returns early when IsDataAvailable returns error", func(t *testing.T) {
|
||||
mockChain := &MockChainServiceTrackingCalls{
|
||||
ChainService: &chainMock.ChainService{},
|
||||
dataAvailable: false, // This should be ignored due to error
|
||||
availabilityError: errors.New("test error from IsDataAvailable"),
|
||||
isDataAvailableCalled: false,
|
||||
}
|
||||
|
||||
mockExecutionClient := &MockExecutionClientTrackingCalls{
|
||||
EngineClient: &mockExecution.EngineClient{},
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
executionReconstructor: mockExecutionClient,
|
||||
},
|
||||
}
|
||||
|
||||
// This should call IsDataAvailable, get an error, and return early without calling execution client
|
||||
s.processDataColumnSidecarsFromExecution(ctx, signedBlock)
|
||||
|
||||
// Verify the expected call pattern
|
||||
assert.Equal(t, true, mockChain.isDataAvailableCalled, "Expected IsDataAvailable to be called")
|
||||
assert.Equal(t, false, mockExecutionClient.reconstructCalled, "Expected execution client NOT to be called when IsDataAvailable returns error")
|
||||
})
|
||||
|
||||
t.Run("calls execution client when data not available", func(t *testing.T) {
|
||||
mockChain := &MockChainServiceTrackingCalls{
|
||||
ChainService: &chainMock.ChainService{},
|
||||
dataAvailable: false, // Data not available
|
||||
availabilityError: nil,
|
||||
isDataAvailableCalled: false,
|
||||
}
|
||||
|
||||
mockExecutionClient := &MockExecutionClientTrackingCalls{
|
||||
EngineClient: &mockExecution.EngineClient{
|
||||
DataColumnSidecars: []blocks.VerifiedRODataColumn{}, // Empty response is fine for this test
|
||||
},
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
executionReconstructor: mockExecutionClient,
|
||||
},
|
||||
}
|
||||
|
||||
// This should call IsDataAvailable, get false, and proceed to call execution client
|
||||
s.processDataColumnSidecarsFromExecution(ctx, signedBlock)
|
||||
|
||||
// Verify the expected call pattern
|
||||
assert.Equal(t, true, mockChain.isDataAvailableCalled, "Expected IsDataAvailable to be called")
|
||||
assert.Equal(t, true, mockExecutionClient.reconstructCalled, "Expected execution client to be called when data is not available")
|
||||
})
|
||||
|
||||
t.Run("returns early when block has no KZG commitments", func(t *testing.T) {
|
||||
// Create a block without KZG commitments
|
||||
blockNoCommitments := util.NewBeaconBlockDeneb()
|
||||
blockNoCommitments.Block.Slot = 100
|
||||
blockNoCommitments.Block.Body.BlobKzgCommitments = [][]byte{} // No commitments
|
||||
|
||||
signedBlockNoCommitments, err := blocks.NewSignedBeaconBlock(blockNoCommitments)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChain := &MockChainServiceTrackingCalls{
|
||||
ChainService: &chainMock.ChainService{},
|
||||
dataAvailable: false,
|
||||
availabilityError: nil,
|
||||
isDataAvailableCalled: false,
|
||||
}
|
||||
|
||||
mockExecutionClient := &MockExecutionClientTrackingCalls{
|
||||
EngineClient: &mockExecution.EngineClient{},
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
executionReconstructor: mockExecutionClient,
|
||||
},
|
||||
}
|
||||
|
||||
// This should return early before checking data availability or calling execution client
|
||||
s.processDataColumnSidecarsFromExecution(ctx, signedBlockNoCommitments)
|
||||
|
||||
// Verify neither method was called since there are no commitments
|
||||
assert.Equal(t, false, mockChain.isDataAvailableCalled, "Expected IsDataAvailable NOT to be called when no KZG commitments")
|
||||
assert.Equal(t, false, mockExecutionClient.reconstructCalled, "Expected execution client NOT to be called when no KZG commitments")
|
||||
})
|
||||
}
|
||||
|
||||
// MockChainServiceTrackingCalls tracks calls to IsDataAvailable for testing
|
||||
type MockChainServiceTrackingCalls struct {
|
||||
isDataAvailableCalled bool
|
||||
dataAvailable bool
|
||||
*chainMock.ChainService
|
||||
availabilityError error
|
||||
}
|
||||
|
||||
func (m *MockChainServiceTrackingCalls) IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
m.isDataAvailableCalled = true
|
||||
if m.availabilityError != nil {
|
||||
return m.availabilityError
|
||||
}
|
||||
if !m.dataAvailable {
|
||||
return blockchain.ErrDataNotAvailable
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockExecutionClientTrackingCalls tracks calls to ReconstructDataColumnSidecars for testing
|
||||
type MockExecutionClientTrackingCalls struct {
|
||||
*mockExecution.EngineClient
|
||||
reconstructCalled bool
|
||||
}
|
||||
|
||||
func (m *MockExecutionClientTrackingCalls) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
m.reconstructCalled = true
|
||||
return m.EngineClient.DataColumnSidecars, m.EngineClient.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
func (m *MockExecutionClientTrackingCalls) ReconstructFullBlock(ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
return m.EngineClient.ReconstructFullBlock(ctx, blindedBlock)
|
||||
}
|
||||
|
||||
func (m *MockExecutionClientTrackingCalls) ReconstructFullBellatrixBlockBatch(ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
|
||||
return m.EngineClient.ReconstructFullBellatrixBlockBatch(ctx, blindedBlocks)
|
||||
}
|
||||
|
||||
func (m *MockExecutionClientTrackingCalls) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hasIndex func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return m.EngineClient.ReconstructBlobSidecars(ctx, block, blockRoot, hasIndex)
|
||||
}
|
||||
|
||||
// ChainServiceDataNotAvailable wraps ChainService and overrides IsDataAvailable to return ErrDataNotAvailable
|
||||
type ChainServiceDataNotAvailable struct {
|
||||
*chainMock.ChainService
|
||||
}
|
||||
|
||||
func (c *ChainServiceDataNotAvailable) IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return blockchain.ErrDataNotAvailable
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -29,6 +30,11 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
||||
return errors.Wrap(err, "reconstruct/save/broadcast data column sidecars")
|
||||
}
|
||||
|
||||
// Trigger getBlobsV2 when receiving data column sidecar
|
||||
if err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "failed to trigger getBlobsV2 for data column sidecar")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -52,3 +58,55 @@ func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.V
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// triggerGetBlobsV2ForDataColumnSidecar triggers getBlobsV2 retry when receiving a data column sidecar.
|
||||
// This function attempts to fetch the block and trigger the execution service's retry mechanism.
|
||||
func (s *Service) triggerGetBlobsV2ForDataColumnSidecar(ctx context.Context, blockRoot [32]byte) error {
|
||||
// Get the specific block by root from database
|
||||
signedBlock, err := s.cfg.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not fetch block from database for getBlobsV2 retry trigger")
|
||||
return nil
|
||||
}
|
||||
if signedBlock == nil || signedBlock.IsNil() {
|
||||
log.Debug("Block not found in database for getBlobsV2 retry trigger")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if this block has blob commitments that would need getBlobsV2
|
||||
blockBody := signedBlock.Block().Body()
|
||||
commitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if data is already available
|
||||
switch err := s.cfg.chain.IsDataAvailable(ctx, blockRoot, signedBlock); {
|
||||
case err == nil:
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Data already available, skipping getBlobsV2 retry")
|
||||
return nil
|
||||
|
||||
case errors.Is(err, blockchain.ErrDataNotAvailable):
|
||||
// fall through and trigger getBlobsV2.
|
||||
default:
|
||||
return errors.Wrap(err, "Error checking data availability during getBlobsV2 trigger")
|
||||
}
|
||||
|
||||
// Trigger the retry by calling the execution service's reconstruct method
|
||||
// ReconstructDataColumnSidecars handles concurrent calls internally
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Triggering getBlobsV2 retry for data column sidecar")
|
||||
|
||||
if s.cfg.executionReconstructor == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = s.cfg.executionReconstructor.ReconstructDataColumnSidecars(ctx, signedBlock, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getBlobsV2 retry triggered by data column sidecar failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
326
beacon-chain/sync/subscriber_data_column_sidecar_trigger_test.go
Normal file
326
beacon-chain/sync/subscriber_data_column_sidecar_trigger_test.go
Normal file
@@ -0,0 +1,326 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
blockchaintesting "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
dbtesting "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TestDataColumnSubscriber_InvalidMessage tests error handling for invalid messages
|
||||
func TestDataColumnSubscriber_InvalidMessage(t *testing.T) {
|
||||
s := &Service{}
|
||||
|
||||
// Test with invalid message type (use a proto message that's not VerifiedRODataColumn)
|
||||
invalidMsg := ðpb.SignedBeaconBlock{}
|
||||
err := s.dataColumnSubscriber(context.Background(), invalidMsg)
|
||||
require.ErrorContains(t, "message was not type blocks.VerifiedRODataColumn", err)
|
||||
}
|
||||
|
||||
// TestTriggerGetBlobsV2ForDataColumnSidecar_BlockAvailability tests block availability checking
|
||||
func TestTriggerGetBlobsV2ForDataColumnSidecar_BlockAvailability(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
|
||||
// Test when block is not available
|
||||
t.Run("block not available", func(t *testing.T) {
|
||||
mockChain := &blockchaintesting.ChainService{}
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Test when HasBlock returns true but block is not in database
|
||||
t.Run("HasBlock true but not in database", func(t *testing.T) {
|
||||
mockChain := &blockchaintesting.ChainService{}
|
||||
// Mock HasBlock to return true
|
||||
mockChain.CanonicalRoots = map[[32]byte]bool{blockRoot: true}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestTriggerGetBlobsV2ForDataColumnSidecar_WithValidBlock tests with a valid block
|
||||
func TestTriggerGetBlobsV2ForDataColumnSidecar_WithValidBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a test block with KZG commitments
|
||||
slot := primitives.Slot(100)
|
||||
block := util.NewBeaconBlockDeneb()
|
||||
block.Block.Slot = slot
|
||||
|
||||
// Add KZG commitments to trigger getBlobsV2 retry logic
|
||||
commitment := [48]byte{1, 2, 3}
|
||||
block.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := signedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("block with KZG commitments triggers retry", func(t *testing.T) {
|
||||
// Mock execution reconstructor to track calls
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlock))
|
||||
|
||||
// Mock chain service that reports data is NOT available (to trigger execution service)
|
||||
mockChain := &MockChainServiceWithAvailability{
|
||||
ChainService: &blockchaintesting.ChainService{DB: db},
|
||||
dataAvailable: false, // Data not available, should trigger execution service
|
||||
availabilityError: nil,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait a bit for the goroutine to execute
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify that the execution reconstructor was called
|
||||
if !mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars to be called")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("does not start retry if data already available", func(t *testing.T) {
|
||||
// Mock execution reconstructor to track calls
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlock))
|
||||
|
||||
// Mock chain service that reports data is already available
|
||||
mockChain := &MockChainServiceWithAvailability{
|
||||
ChainService: &blockchaintesting.ChainService{DB: db},
|
||||
dataAvailable: true,
|
||||
availabilityError: nil,
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait a bit to ensure no goroutine was started
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify that the execution reconstructor was NOT called since data is already available
|
||||
if mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars NOT to be called when data is already available")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("calls execution service when data not available", func(t *testing.T) {
|
||||
// Mock execution reconstructor to track calls
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlock))
|
||||
|
||||
// Mock chain service that returns ErrDataNotAvailable
|
||||
mockChain := &MockChainServiceWithAvailability{
|
||||
ChainService: &blockchaintesting.ChainService{DB: db},
|
||||
dataAvailable: false, // Data not available
|
||||
availabilityError: blockchain.ErrDataNotAvailable, // Should trigger execution service call
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.NoError(t, err) // Function should succeed and call execution service
|
||||
|
||||
// Wait a bit for the goroutine to execute
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify that the execution reconstructor was called
|
||||
if !mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars to be called when data is not available")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("returns error when availability check returns error", func(t *testing.T) {
|
||||
// Mock execution reconstructor to track calls
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlock))
|
||||
|
||||
// Mock chain service that returns an error for availability check
|
||||
mockChain := &MockChainServiceWithAvailability{
|
||||
ChainService: &blockchaintesting.ChainService{DB: db},
|
||||
dataAvailable: false, // This should be ignored due to error
|
||||
availabilityError: errors.New("availability check error"), // Error should cause function to return error
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err := s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRoot)
|
||||
require.ErrorContains(t, "availability check error", err) // Function should return the availability check error
|
||||
|
||||
// Verify that the execution reconstructor was NOT called since function returned early with error
|
||||
if mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars NOT to be called when availability check returns error")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("block without KZG commitments does not trigger retry", func(t *testing.T) {
|
||||
// Create block without KZG commitments
|
||||
blockNoCommitments := util.NewBeaconBlockDeneb()
|
||||
blockNoCommitments.Block.Slot = slot
|
||||
blockNoCommitments.Block.Body.BlobKzgCommitments = [][]byte{} // No commitments
|
||||
|
||||
signedBlockNoCommitments, err := blocks.NewSignedBeaconBlock(blockNoCommitments)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRootNoCommitments, err := signedBlockNoCommitments.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
mockReconstructor := &MockExecutionReconstructor{
|
||||
reconstructCalled: false,
|
||||
}
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
// Save block to database
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlockNoCommitments))
|
||||
|
||||
mockChain := &blockchaintesting.ChainService{
|
||||
DB: db, // Set the DB so HasBlock can find the block
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
chain: mockChain,
|
||||
beaconDB: db,
|
||||
executionReconstructor: mockReconstructor,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.triggerGetBlobsV2ForDataColumnSidecar(ctx, blockRootNoCommitments)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait a bit to ensure no goroutine was started
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Verify that the execution reconstructor was NOT called
|
||||
if mockReconstructor.reconstructCalled {
|
||||
t.Errorf("Expected ReconstructDataColumnSidecars NOT to be called for block without commitments")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// MockExecutionReconstructor is a mock implementation for testing
|
||||
type MockExecutionReconstructor struct {
|
||||
reconstructCalled bool
|
||||
reconstructError error
|
||||
reconstructResult []blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
func (m *MockExecutionReconstructor) ReconstructFullBlock(ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock) (interfaces.SignedBeaconBlock, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockExecutionReconstructor) ReconstructFullBellatrixBlockBatch(ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockExecutionReconstructor) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockExecutionReconstructor) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
m.reconstructCalled = true
|
||||
return m.reconstructResult, m.reconstructError
|
||||
}
|
||||
|
||||
// MockChainServiceWithAvailability wraps the testing ChainService to allow configuring IsDataAvailable
|
||||
type MockChainServiceWithAvailability struct {
|
||||
*blockchaintesting.ChainService
|
||||
dataAvailable bool
|
||||
availabilityError error
|
||||
}
|
||||
|
||||
// IsDataAvailable overrides the default implementation to return configurable values for testing
|
||||
func (m *MockChainServiceWithAvailability) IsDataAvailable(ctx context.Context, blockRoot [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if m.availabilityError != nil {
|
||||
return m.availabilityError
|
||||
}
|
||||
if !m.dataAvailable {
|
||||
return blockchain.ErrDataNotAvailable
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -48,7 +49,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
|
||||
// Decode the message, reject if it fails.
|
||||
// Decode the message.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
@@ -68,6 +69,20 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationReject, errors.Wrap(err, "roDataColumn conversion failure")
|
||||
}
|
||||
|
||||
// Voluntary ignore messages (for debugging purposes).
|
||||
dataColumnsIgnoreSlotMultiple := features.Get().DataColumnsIgnoreSlotMultiple
|
||||
blockSlot := uint64(roDataColumn.SignedBlockHeader.Header.Slot)
|
||||
|
||||
if dataColumnsIgnoreSlotMultiple != 0 && blockSlot%dataColumnsIgnoreSlotMultiple == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockSlot,
|
||||
"columnIndex": roDataColumn.Index,
|
||||
"blockRoot": fmt.Sprintf("%#x", roDataColumn.BlockRoot()),
|
||||
}).Warning("Voluntary ignore data column sidecar gossip")
|
||||
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Compute a batch of only one data column sidecar.
|
||||
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
||||
|
||||
|
||||
@@ -17,9 +17,12 @@ var (
|
||||
|
||||
// BlobAlignsWithBlock verifies if the blob aligns with the block.
|
||||
func BlobAlignsWithBlock(blob blocks.ROBlob, block blocks.ROBlock) error {
|
||||
if block.Version() < version.Deneb {
|
||||
blockVersion := block.Version()
|
||||
|
||||
if blockVersion < version.Deneb || blockVersion >= version.Fulu {
|
||||
return nil
|
||||
}
|
||||
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blob.Slot())
|
||||
if blob.Index >= uint64(maxBlobsPerBlock) {
|
||||
return errors.Wrapf(ErrIncorrectBlobIndex, "index %d exceeds MAX_BLOBS_PER_BLOCK %d", blob.Index, maxBlobsPerBlock)
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -47,6 +47,15 @@ var (
|
||||
RequireSidecarKzgProofVerified,
|
||||
}
|
||||
|
||||
// ByRootRequestDataColumnSidecarRequirements defines the set of requirements that DataColumnSidecars received
|
||||
// via the by root request must satisfy in order to upgrade an RODataColumn to a VerifiedRODataColumn.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
||||
ByRootRequestDataColumnSidecarRequirements = []Requirement{
|
||||
RequireValidFields,
|
||||
RequireSidecarInclusionProven,
|
||||
RequireSidecarKzgProofVerified,
|
||||
}
|
||||
|
||||
errColumnsInvalid = errors.New("data columns failed verification")
|
||||
errBadTopicLength = errors.New("topic length is invalid")
|
||||
errBadTopic = errors.New("topic is not of the one expected")
|
||||
@@ -531,4 +540,4 @@ func inclusionProofKey(c blocks.RODataColumn) ([160]byte, error) {
|
||||
|
||||
copy(key[128:], root[:])
|
||||
return key, nil
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,24 @@ package verification
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type (
|
||||
DataColumnParams struct {
|
||||
Slot primitives.Slot
|
||||
ColumnIndex uint64
|
||||
KzgCommitments [][]byte
|
||||
DataColumn []byte // A whole data cell will be filled with the content of one item of this slice.
|
||||
}
|
||||
|
||||
DataColumnsParamsByRoot map[[fieldparams.RootLength]byte][]DataColumnParams
|
||||
)
|
||||
|
||||
// FakeVerifyForTest can be used by tests that need a VerifiedROBlob but don't want to do all the
|
||||
@@ -25,3 +42,85 @@ func FakeVerifySliceForTest(t *testing.T, b []blocks.ROBlob) []blocks.VerifiedRO
|
||||
}
|
||||
return vbs
|
||||
}
|
||||
|
||||
// FakeVerifyDataColumnForTest can be used by tests that need a VerifiedRODataColumn but don't want to do all the
|
||||
// expensive set up to perform full validation.
|
||||
func FakeVerifyDataColumnForTest(t *testing.T, b blocks.RODataColumn) blocks.VerifiedRODataColumn {
|
||||
// log so that t is truly required
|
||||
t.Log("producing fake VerifiedRODataColumn for a test")
|
||||
return blocks.NewVerifiedRODataColumn(b)
|
||||
}
|
||||
|
||||
// FakeVerifyDataColumnSliceForTest can be used by tests that need a []VerifiedRODataColumn but don't want to do all the
|
||||
// expensive set up to perform full validation.
|
||||
func FakeVerifyDataColumnSliceForTest(t *testing.T, dcs []blocks.RODataColumn) []blocks.VerifiedRODataColumn {
|
||||
// Log so that `t`` is truly required.
|
||||
t.Log("producing fake []VerifiedRODataColumn for a test")
|
||||
|
||||
vcs := make([]blocks.VerifiedRODataColumn, 0, len(dcs))
|
||||
for _, dc := range dcs {
|
||||
vcs = append(vcs, blocks.NewVerifiedRODataColumn(dc))
|
||||
}
|
||||
|
||||
return vcs
|
||||
}
|
||||
|
||||
func CreateTestVerifiedRoDataColumnSidecars(t *testing.T, dataColumnParamsByBlockRoot DataColumnsParamsByRoot) ([]blocks.RODataColumn, []blocks.VerifiedRODataColumn) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
count := 0
|
||||
for _, indices := range dataColumnParamsByBlockRoot {
|
||||
count += len(indices)
|
||||
}
|
||||
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||
rodataColumnSidecars := make([]blocks.RODataColumn, 0, count)
|
||||
for blockRoot, params := range dataColumnParamsByBlockRoot {
|
||||
for _, param := range params {
|
||||
dataColumn := make([][]byte, 0, len(param.DataColumn))
|
||||
for _, value := range param.DataColumn {
|
||||
cell := make([]byte, ckzg4844.BytesPerCell)
|
||||
for i := range ckzg4844.BytesPerCell {
|
||||
cell[i] = value
|
||||
}
|
||||
dataColumn = append(dataColumn, cell)
|
||||
}
|
||||
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 4)
|
||||
for i := range kzgCommitmentsInclusionProof {
|
||||
kzgCommitmentsInclusionProof[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
dataColumnSidecar := ðpb.DataColumnSidecar{
|
||||
Index: param.ColumnIndex,
|
||||
KzgCommitments: param.KzgCommitments,
|
||||
Column: dataColumn,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: param.Slot,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
}
|
||||
|
||||
roDataColumnSidecar, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rodataColumnSidecars = append(rodataColumnSidecars, roDataColumnSidecar)
|
||||
|
||||
verifiedRoDataColumnSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumnSidecar)
|
||||
}
|
||||
}
|
||||
|
||||
return rodataColumnSidecars, verifiedRoDataColumnSidecars
|
||||
}
|
||||
|
||||
2
changelog/manu-peerdas-sync.md
Normal file
2
changelog/manu-peerdas-sync.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Data columns syncing for Fusaka.
|
||||
@@ -2,4 +2,4 @@
|
||||
|
||||
- **Gzip Compression for Beacon API:**
|
||||
Fixed an issue where the beacon chain server ignored the `Accept-Encoding: gzip` header and returned uncompressed JSON responses. With this change, endpoints that use the `AcceptHeaderHandler` now also compress responses when a client requests gzip encoding.
|
||||
Fixes [#14593](https://github.com/prysmaticlabs/prysm/issues/14593).
|
||||
Fixes [#14593](https://github.com/prysmaticlabs/prysm/issues/14593).
|
||||
@@ -216,6 +216,7 @@ var (
|
||||
DataColumnBatchLimit = &cli.IntFlag{
|
||||
Name: "data-column-batch-limit",
|
||||
Usage: "The amount of data columns the local peer is bounded to request and respond to in a batch.",
|
||||
// TODO: determine a good default value for this flag.
|
||||
Value: 4096,
|
||||
}
|
||||
// DataColumnBatchLimitBurstFactor specifies the factor by which data column batch size may increase.
|
||||
|
||||
@@ -42,7 +42,7 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
cfg := &GlobalFlags{}
|
||||
|
||||
if ctx.Bool(SubscribeToAllSubnets.Name) {
|
||||
log.Warn("Subscribing to All Attestation Subnets")
|
||||
log.Warning("Subscribing to all attestation subnets")
|
||||
cfg.SubscribeToAllSubnets = true
|
||||
}
|
||||
|
||||
|
||||
@@ -59,10 +59,13 @@ var appFlags = []cli.Flag{
|
||||
flags.BlockBatchLimitBurstFactor,
|
||||
flags.BlobBatchLimit,
|
||||
flags.BlobBatchLimitBurstFactor,
|
||||
flags.DataColumnBatchLimit,
|
||||
flags.DataColumnBatchLimitBurstFactor,
|
||||
flags.InteropMockEth1DataVotesFlag,
|
||||
flags.SlotsPerArchivedPoint,
|
||||
flags.DisableDebugRPCEndpoints,
|
||||
flags.SubscribeToAllSubnets,
|
||||
flags.SubscribeAllDataSubnets,
|
||||
flags.HistoricalSlasherNode,
|
||||
flags.ChainID,
|
||||
flags.NetworkID,
|
||||
@@ -84,6 +87,7 @@ var appFlags = []cli.Flag{
|
||||
flags.BeaconDBPruning,
|
||||
flags.PrunerRetentionEpochs,
|
||||
flags.EnableBuilderSSZ,
|
||||
flags.SubscribeAllDataSubnets,
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
cmd.RPCMaxPageSizeFlag,
|
||||
@@ -143,6 +147,7 @@ var appFlags = []cli.Flag{
|
||||
storage.BlobStoragePathFlag,
|
||||
storage.BlobRetentionEpochFlag,
|
||||
storage.BlobStorageLayout,
|
||||
storage.DataColumnStoragePathFlag,
|
||||
bflags.EnableExperimentalBackfill,
|
||||
bflags.BackfillBatchSize,
|
||||
bflags.BackfillWorkerCount,
|
||||
|
||||
@@ -61,3 +61,12 @@ func TestConfigureBlobRetentionEpoch(t *testing.T) {
|
||||
_, err = blobRetentionEpoch(cliCtx)
|
||||
require.ErrorIs(t, err, errInvalidBlobRetentionEpochs)
|
||||
}
|
||||
func TestDataColumnStoragePath_FlagSpecified(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(DataColumnStoragePathFlag.Name, "/blah/blah", DataColumnStoragePathFlag.Usage)
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
storagePath := dataColumnStoragePath(cliCtx)
|
||||
|
||||
assert.Equal(t, "/blah/blah", storagePath)
|
||||
}
|
||||
|
||||
@@ -98,12 +98,15 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.StaticPeers,
|
||||
flags.BlobBatchLimit,
|
||||
flags.BlobBatchLimitBurstFactor,
|
||||
flags.DataColumnBatchLimit,
|
||||
flags.DataColumnBatchLimitBurstFactor,
|
||||
flags.BlockBatchLimit,
|
||||
flags.BlockBatchLimitBurstFactor,
|
||||
flags.MaxConcurrentDials,
|
||||
flags.MinPeersPerSubnet,
|
||||
flags.MinSyncPeers,
|
||||
flags.SubscribeToAllSubnets,
|
||||
flags.SubscribeAllDataSubnets,
|
||||
},
|
||||
},
|
||||
{ // Flags relevant to storing data on disk and configuring the beacon chain database.
|
||||
@@ -124,6 +127,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
storage.BlobRetentionEpochFlag,
|
||||
storage.BlobStorageLayout,
|
||||
storage.BlobStoragePathFlag,
|
||||
storage.DataColumnStoragePathFlag,
|
||||
},
|
||||
},
|
||||
{ // Flags relevant to configuring local block production or external builders such as mev-boost.
|
||||
|
||||
@@ -85,6 +85,12 @@ type Flags struct {
|
||||
// changed on disk. This feature is for advanced use cases only.
|
||||
KeystoreImportDebounceInterval time.Duration
|
||||
|
||||
// DataColumnsWithholdCount specifies the number of data columns that should be withheld when proposing a block.
|
||||
DataColumnsWithholdCount uint64
|
||||
|
||||
// DataColumnsIgnoreSlotMultiple specifies the multiple of slot number where data columns should be ignored.
|
||||
DataColumnsIgnoreSlotMultiple uint64
|
||||
|
||||
// AggregateIntervals specifies the time durations at which we aggregate attestations preparing for forkchoice.
|
||||
AggregateIntervals [3]time.Duration
|
||||
|
||||
@@ -280,6 +286,16 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
cfg.BlacklistedRoots = parseBlacklistedRoots(ctx.StringSlice(blacklistRoots.Name))
|
||||
}
|
||||
|
||||
if ctx.IsSet(DataColumnsWithholdCount.Name) {
|
||||
logEnabled(DataColumnsWithholdCount)
|
||||
cfg.DataColumnsWithholdCount = ctx.Uint64(DataColumnsWithholdCount.Name)
|
||||
}
|
||||
|
||||
if ctx.IsSet(DataColumnsIgnoreSlotMultiple.Name) {
|
||||
logEnabled(DataColumnsIgnoreSlotMultiple)
|
||||
cfg.DataColumnsIgnoreSlotMultiple = ctx.Uint64(DataColumnsIgnoreSlotMultiple.Name)
|
||||
}
|
||||
|
||||
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
|
||||
Init(cfg)
|
||||
return nil
|
||||
|
||||
@@ -172,6 +172,20 @@ var (
|
||||
Name: "enable-experimental-attestation-pool",
|
||||
Usage: "Enables an experimental attestation pool design.",
|
||||
}
|
||||
// DataColumnsWithholdCount is a flag for withholding data columns when proposing a block.
|
||||
DataColumnsWithholdCount = &cli.Uint64Flag{
|
||||
Name: "data-columns-withhold-count",
|
||||
Usage: "Number of columns to withhold when proposing a block. DO NOT USE IN PRODUCTION.",
|
||||
Value: 0,
|
||||
Hidden: true,
|
||||
}
|
||||
// DataColumnsWithholdCount is a flag for withholding data columns when proposing a block.
|
||||
DataColumnsIgnoreSlotMultiple = &cli.Uint64Flag{
|
||||
Name: "data-columns-ignore-slot-multiple",
|
||||
Usage: "Ignore all data columns for slots that are a multiple of this value. DO NOT USE IN PRODUCTION.",
|
||||
Value: 0,
|
||||
Hidden: true,
|
||||
}
|
||||
// forceHeadFlag is a flag to force the head of the beacon chain to a specific block.
|
||||
forceHeadFlag = &cli.StringFlag{
|
||||
Name: "sync-from",
|
||||
@@ -255,6 +269,8 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
DisableQUIC,
|
||||
EnableDiscoveryReboot,
|
||||
enableExperimentalAttestationPool,
|
||||
DataColumnsWithholdCount,
|
||||
DataColumnsIgnoreSlotMultiple,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
@@ -46,6 +46,9 @@ const (
|
||||
MaxRandomValueElectra = uint64(1<<16 - 1) // MaxRandomValueElectra defines max for a random value using for proposer and sync committee sampling.
|
||||
|
||||
// Introduced in Fulu network upgrade.
|
||||
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
|
||||
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
|
||||
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
|
||||
FieldElementsPerCell = 64 // FieldElementsPerCell refers to the number of field elements in a cell.
|
||||
BytesPerFieldElement = 32 // BytesPerFieldElement refers to the number of bytes in a field element.
|
||||
BytesPerCells = FieldElementsPerCell * BytesPerFieldElement // BytesPerCells refers to the number of bytes in a cell.
|
||||
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
|
||||
)
|
||||
|
||||
@@ -46,6 +46,9 @@ const (
|
||||
MaxRandomValueElectra = uint64(1<<16 - 1) // Maximum value for a random value using for proposer and sync committee sampling.
|
||||
|
||||
// Introduced in Fulu network upgrade.
|
||||
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
|
||||
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
|
||||
CellsPerBlob = 64 // CellsPerBlob refers to the number of cells in a (non-extended) blob.
|
||||
FieldElementsPerCell = 64 // FieldElementsPerCell refers to the number of field elements in a cell.
|
||||
BytesPerFieldElement = 32 // BytesPerFieldElement refers to the number of bytes in a field element.
|
||||
BytesPerCells = FieldElementsPerCell * BytesPerFieldElement // BytesPerCells refers to the number of bytes in a cell.
|
||||
NumberOfColumns = 128 // NumberOfColumns refers to the specified number of data columns that can exist in a network.
|
||||
)
|
||||
|
||||
@@ -13,17 +13,22 @@ const (
|
||||
func SetupTestConfigCleanup(t testing.TB) {
|
||||
prevDefaultBeaconConfig := mainnetBeaconConfig.Copy()
|
||||
temp := configs.getActive().Copy()
|
||||
|
||||
undo, err := SetActiveWithUndo(temp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
prevNetworkCfg := networkConfig.Copy()
|
||||
|
||||
t.Cleanup(func() {
|
||||
mainnetBeaconConfig = prevDefaultBeaconConfig
|
||||
|
||||
err = undo()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
networkConfig = prevNetworkCfg
|
||||
})
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
"roblob.go",
|
||||
"roblock.go",
|
||||
"rodatacolumn.go",
|
||||
"rosidecar.go",
|
||||
"setters.go",
|
||||
"types.go",
|
||||
],
|
||||
@@ -54,7 +53,6 @@ go_test(
|
||||
"roblob_test.go",
|
||||
"roblock_test.go",
|
||||
"rodatacolumn_test.go",
|
||||
"rosidecar_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -74,6 +72,5 @@ go_test(
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
@@ -398,7 +400,7 @@ func (b *BeaconBlock) Proto() (proto.Message, error) { // nolint:gocognit
|
||||
Body: body,
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.New("unsupported beacon block version")
|
||||
return nil, fmt.Errorf("unsupported beacon block version: %s", version.String(b.version))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -96,16 +96,17 @@ func (s ROBlockSlice) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// BlockWithROBlobs is a wrapper that collects the block and blob values together.
|
||||
// BlockWithROSidecars is a wrapper that collects the block and blob values together.
|
||||
// This is helpful because these values are collated from separate RPC requests.
|
||||
type BlockWithROBlobs struct {
|
||||
Block ROBlock
|
||||
Blobs []ROBlob
|
||||
type BlockWithROSidecars struct {
|
||||
Block ROBlock
|
||||
Blobs []ROBlob
|
||||
Columns []VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// BlockWithROBlobsSlice gives convenient access to getting a slice of just the ROBlocks,
|
||||
// and defines sorting helpers.
|
||||
type BlockWithROBlobsSlice []BlockWithROBlobs
|
||||
type BlockWithROBlobsSlice []BlockWithROSidecars
|
||||
|
||||
func (s BlockWithROBlobsSlice) ROBlocks() []ROBlock {
|
||||
r := make([]ROBlock, len(s))
|
||||
|
||||
@@ -66,16 +66,16 @@ func (dc *RODataColumn) Slot() primitives.Slot {
|
||||
return dc.SignedBlockHeader.Header.Slot
|
||||
}
|
||||
|
||||
// ParentRoot returns the parent root of the data column sidecar.
|
||||
func (dc *RODataColumn) ParentRoot() [fieldparams.RootLength]byte {
|
||||
return bytesutil.ToBytes32(dc.SignedBlockHeader.Header.ParentRoot)
|
||||
}
|
||||
|
||||
// ProposerIndex returns the proposer index of the data column sidecar.
|
||||
func (dc *RODataColumn) ProposerIndex() primitives.ValidatorIndex {
|
||||
return dc.SignedBlockHeader.Header.ProposerIndex
|
||||
}
|
||||
|
||||
// ParentRoot returns the parent root of the data column sidecar.
|
||||
func (dc *RODataColumn) ParentRoot() [fieldparams.RootLength]byte {
|
||||
return bytesutil.ToBytes32(dc.SignedBlockHeader.Header.ParentRoot)
|
||||
}
|
||||
|
||||
// VerifiedRODataColumn represents an RODataColumn that has undergone full verification (eg block sig, inclusion proof, commitment check).
|
||||
type VerifiedRODataColumn struct {
|
||||
RODataColumn
|
||||
@@ -84,4 +84,4 @@ type VerifiedRODataColumn struct {
|
||||
// NewVerifiedRODataColumn "upgrades" an RODataColumn to a VerifiedRODataColumn. This method should only be used by the verification package.
|
||||
func NewVerifiedRODataColumn(roDataColumn RODataColumn) VerifiedRODataColumn {
|
||||
return VerifiedRODataColumn{RODataColumn: roDataColumn}
|
||||
}
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ROSidecar represents a read-only sidecar with its block root.
|
||||
type ROSidecar struct {
|
||||
blob *ROBlob
|
||||
dataColumn *RODataColumn
|
||||
}
|
||||
|
||||
var (
|
||||
errBlobNeeded = errors.New("blob sidecar needed")
|
||||
errDataColumnNeeded = errors.New("data column sidecar needed")
|
||||
)
|
||||
|
||||
// NewSidecarFromBlobSidecar creates a new read-only (generic) sidecar from a read-only blob sidecar.
|
||||
func NewSidecarFromBlobSidecar(blob ROBlob) ROSidecar {
|
||||
return ROSidecar{blob: &blob}
|
||||
}
|
||||
|
||||
// NewSidecarFromDataColumnSidecar creates a new read-only (generic) sidecar from a read-only data column sidecar.
|
||||
func NewSidecarFromDataColumnSidecar(dataColumn RODataColumn) ROSidecar {
|
||||
return ROSidecar{dataColumn: &dataColumn}
|
||||
}
|
||||
|
||||
// NewSidecarsFromBlobSidecars creates a new slice of read-only (generic) sidecars from a slice of read-only blobs sidecars.
|
||||
func NewSidecarsFromBlobSidecars(blobSidecars []ROBlob) []ROSidecar {
|
||||
sidecars := make([]ROSidecar, 0, len(blobSidecars))
|
||||
for _, blob := range blobSidecars {
|
||||
blobSidecar := ROSidecar{blob: &blob} // #nosec G601
|
||||
sidecars = append(sidecars, blobSidecar)
|
||||
}
|
||||
|
||||
return sidecars
|
||||
}
|
||||
|
||||
// NewSidecarsFromDataColumnSidecars creates a new slice of read-only (generic) sidecars from a slice of read-only data column sidecars.
|
||||
func NewSidecarsFromDataColumnSidecars(dataColumnSidecars []RODataColumn) []ROSidecar {
|
||||
sidecars := make([]ROSidecar, 0, len(dataColumnSidecars))
|
||||
for _, dataColumn := range dataColumnSidecars {
|
||||
dataColumnSidecar := ROSidecar{dataColumn: &dataColumn} // #nosec G601
|
||||
sidecars = append(sidecars, dataColumnSidecar)
|
||||
}
|
||||
|
||||
return sidecars
|
||||
}
|
||||
|
||||
// Blob returns the blob sidecar.
|
||||
func (sc *ROSidecar) Blob() (ROBlob, error) {
|
||||
if sc.blob == nil {
|
||||
return ROBlob{}, errBlobNeeded
|
||||
}
|
||||
|
||||
return *sc.blob, nil
|
||||
}
|
||||
|
||||
// DataColumn returns the data column sidecar.
|
||||
func (sc *ROSidecar) DataColumn() (RODataColumn, error) {
|
||||
if sc.dataColumn == nil {
|
||||
return RODataColumn{}, errDataColumnNeeded
|
||||
}
|
||||
|
||||
return *sc.dataColumn, nil
|
||||
}
|
||||
|
||||
// BlobSidecarsFromSidecars creates a new slice of read-only blobs sidecars from a slice of read-only (generic) sidecars.
|
||||
func BlobSidecarsFromSidecars(sidecars []ROSidecar) ([]ROBlob, error) {
|
||||
blobSidecars := make([]ROBlob, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
blob, err := sidecar.Blob()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob")
|
||||
}
|
||||
|
||||
blobSidecars = append(blobSidecars, blob)
|
||||
}
|
||||
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsFromSidecars creates a new slice of read-only data column sidecars from a slice of read-only (generic) sidecars.
|
||||
func DataColumnSidecarsFromSidecars(sidecars []ROSidecar) ([]RODataColumn, error) {
|
||||
dataColumnSidecars := make([]RODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
dataColumn, err := sidecar.DataColumn()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column")
|
||||
}
|
||||
|
||||
dataColumnSidecars = append(dataColumnSidecars, dataColumn)
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewSidecarFromBlobSidecar(t *testing.T) {
|
||||
blob := ROBlob{}
|
||||
sidecar := NewSidecarFromBlobSidecar(blob)
|
||||
|
||||
// Check that the blob is set
|
||||
retrievedBlob, err := sidecar.Blob()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blob, retrievedBlob)
|
||||
|
||||
// Check that data column is not set
|
||||
_, err = sidecar.DataColumn()
|
||||
require.ErrorIs(t, err, errDataColumnNeeded)
|
||||
}
|
||||
|
||||
func TestNewSidecarFromDataColumnSidecar(t *testing.T) {
|
||||
dataColumn := RODataColumn{}
|
||||
sidecar := NewSidecarFromDataColumnSidecar(dataColumn)
|
||||
|
||||
// Check that the data column is set
|
||||
retrievedDataColumn, err := sidecar.DataColumn()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dataColumn, retrievedDataColumn)
|
||||
|
||||
// Check that blob is not set
|
||||
_, err = sidecar.Blob()
|
||||
require.ErrorIs(t, err, errBlobNeeded)
|
||||
}
|
||||
|
||||
func TestNewSidecarsFromBlobSidecars(t *testing.T) {
|
||||
blobSidecars := []ROBlob{{}, {}}
|
||||
sidecars := NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
require.Equal(t, len(blobSidecars), len(sidecars))
|
||||
|
||||
for i, sidecar := range sidecars {
|
||||
retrievedBlob, err := sidecar.Blob()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobSidecars[i], retrievedBlob)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSidecarsFromDataColumnSidecars(t *testing.T) {
|
||||
dataColumnSidecars := []RODataColumn{{}, {}}
|
||||
sidecars := NewSidecarsFromDataColumnSidecars(dataColumnSidecars)
|
||||
|
||||
require.Equal(t, len(dataColumnSidecars), len(sidecars))
|
||||
|
||||
for i, sidecar := range sidecars {
|
||||
retrievedDataColumn, err := sidecar.DataColumn()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, dataColumnSidecars[i], retrievedDataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobSidecarsFromSidecars(t *testing.T) {
|
||||
// Create sidecars with blobs
|
||||
blobSidecars := []ROBlob{{}, {}}
|
||||
sidecars := NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Convert back to blob sidecars
|
||||
retrievedBlobSidecars, err := BlobSidecarsFromSidecars(sidecars)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(blobSidecars), len(retrievedBlobSidecars))
|
||||
|
||||
for i, blob := range retrievedBlobSidecars {
|
||||
require.Equal(t, blobSidecars[i], blob)
|
||||
}
|
||||
|
||||
// Test with a mix of sidecar types
|
||||
mixedSidecars := []ROSidecar{
|
||||
NewSidecarFromBlobSidecar(ROBlob{}),
|
||||
NewSidecarFromDataColumnSidecar(RODataColumn{}),
|
||||
}
|
||||
|
||||
_, err = BlobSidecarsFromSidecars(mixedSidecars)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestDataColumnSidecarsFromSidecars(t *testing.T) {
|
||||
// Create sidecars with data columns
|
||||
dataColumnSidecars := []RODataColumn{{}, {}}
|
||||
sidecars := NewSidecarsFromDataColumnSidecars(dataColumnSidecars)
|
||||
|
||||
// Convert back to data column sidecars
|
||||
retrievedDataColumnSidecars, err := DataColumnSidecarsFromSidecars(sidecars)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(dataColumnSidecars), len(retrievedDataColumnSidecars))
|
||||
|
||||
for i, dataColumn := range retrievedDataColumnSidecars {
|
||||
require.Equal(t, dataColumnSidecars[i], dataColumn)
|
||||
}
|
||||
|
||||
// Test with a mix of sidecar types
|
||||
mixedSidecars := []ROSidecar{
|
||||
NewSidecarFromDataColumnSidecar(RODataColumn{}),
|
||||
NewSidecarFromBlobSidecar(ROBlob{}),
|
||||
}
|
||||
|
||||
_, err = DataColumnSidecarsFromSidecars(mixedSidecars)
|
||||
require.Error(t, err)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user