mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
180 Commits
d929e1dcaa
...
fulu-devne
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c9278ea5b | ||
|
|
09a511dbb5 | ||
|
|
d9d95f357d | ||
|
|
738da6205c | ||
|
|
589042df20 | ||
|
|
312b93e9b1 | ||
|
|
f86f76e447 | ||
|
|
c311e652eb | ||
|
|
022f913374 | ||
|
|
6a5d78a331 | ||
|
|
6cc6c7e73d | ||
|
|
a910e3cb17 | ||
|
|
4d6457775b | ||
|
|
66e2138aaa | ||
|
|
3b98d4ac78 | ||
|
|
6fca970bfb | ||
|
|
2c2ac4aa67 | ||
|
|
4bee860907 | ||
|
|
39bfe243fd | ||
|
|
eb4e2d1b89 | ||
|
|
9fa502fb0d | ||
|
|
a2fd30497e | ||
|
|
a94561f8dc | ||
|
|
af875b78c9 | ||
|
|
61207bd3ac | ||
|
|
0b6fcd7d17 | ||
|
|
fe2766e716 | ||
|
|
9135d765e1 | ||
|
|
eca87f29d1 | ||
|
|
00821c8f55 | ||
|
|
4b9e92bcd7 | ||
|
|
b01d9005b8 | ||
|
|
8d812d5f0e | ||
|
|
24a3cb2a8b | ||
|
|
66d1d3e248 | ||
|
|
99933678ea | ||
|
|
34f8e1e92b | ||
|
|
a6a41a8755 | ||
|
|
f110b94fac | ||
|
|
33023aa282 | ||
|
|
eeb3cdc99e | ||
|
|
1e7147f060 | ||
|
|
8936beaff3 | ||
|
|
c00283f247 | ||
|
|
a4269cf308 | ||
|
|
91f3c8a4d0 | ||
|
|
30c7ee9c7b | ||
|
|
456d8b9eb9 | ||
|
|
4fe3e6d31a | ||
|
|
01ee1c80b4 | ||
|
|
c14fe47a81 | ||
|
|
b9deabbf0a | ||
|
|
5d66a98e78 | ||
|
|
2d46d6ffae | ||
|
|
57107e50a7 | ||
|
|
47271254f6 | ||
|
|
f304028874 | ||
|
|
8abc5e159a | ||
|
|
b1ac53c4dd | ||
|
|
27ab68c856 | ||
|
|
ddf5a3953b | ||
|
|
92d2fc101d | ||
|
|
8996000d2b | ||
|
|
a2fcba2349 | ||
|
|
abe8638991 | ||
|
|
0b5064b474 | ||
|
|
da9d4cf5b9 | ||
|
|
a62cca15dd | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
a216cb4105 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
14
WORKSPACE
14
WORKSPACE
@@ -1,7 +1,7 @@
|
||||
workspace(name = "prysm")
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "rules_pkg",
|
||||
@@ -16,8 +16,6 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "toolchains_protoc",
|
||||
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
|
||||
@@ -255,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.0"
|
||||
consensus_spec_version = "v1.6.0-alpha.1"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-W7oKvoM0nAkyitykRxAw6kmCvjYC01IqiNJy0AmCnMM=",
|
||||
"minimal": "sha256-ig7/zxomjv6buBWMom4IxAJh3lFJ9+JnY44E7c8ZNP8=",
|
||||
"mainnet": "sha256-mjx+MkXtPhCNv4c4knLYLIkvIdpF7WTjx/ElvGPQzSo=",
|
||||
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
|
||||
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
|
||||
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -280,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-u0RkIZIeGttb3sInR31mO64aBSwxALqO5SYIPlqEvPo=",
|
||||
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -923,7 +923,14 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcLookahead, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookahead := make([]string, len(srcLookahead))
|
||||
for i, v := range srcLookahead {
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
return &BeaconStateFulu{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
@@ -962,5 +969,6 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
ProposerLookahead: lookahead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -219,4 +219,5 @@ type BeaconStateFulu struct {
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
}
|
||||
|
||||
@@ -639,7 +639,12 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
signedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
startWaiting ...chan<- bool, // For tests purposes only
|
||||
) error {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
@@ -647,7 +652,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBloc
|
||||
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
return s.areDataColumnsAvailable(ctx, root, block, startWaiting...)
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
@@ -659,7 +664,12 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBloc
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
startWaiting ...chan<- bool, // For tests purposes only
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
@@ -726,6 +736,11 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notify the caller that we are waiting for data columns.
|
||||
if len(startWaiting) > 0 && startWaiting[0] != nil {
|
||||
startWaiting[0] <- true
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
|
||||
|
||||
@@ -3332,17 +3332,27 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := signedBeaconBlock.Block.HashTreeRoot()
|
||||
block := signedBeaconBlock.Block
|
||||
bodyRoot, err := block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParams, 0, len(params.columnsToSave))
|
||||
root, err := block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnParam := util.DataColumnParams{ColumnIndex: i}
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: block.Slot,
|
||||
ProposerIndex: block.ProposerIndex,
|
||||
ParentRoot: block.ParentRoot,
|
||||
StateRoot: block.StateRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParam)
|
||||
}
|
||||
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{root: dataColumnsParams}
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
err = dataColumnStorage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
@@ -3410,30 +3420,39 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
var wrongRoot [fieldparams.RootLength]byte
|
||||
copy(wrongRoot[:], root[:])
|
||||
wrongRoot[0]++ // change the root to simulate a wrong root
|
||||
_, verifiedSidecarsWrongRoot := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
[]util.DataColumnParam{
|
||||
{Index: 42, Slot: slot + 1}, // Needed index, but not for this slot.
|
||||
})
|
||||
|
||||
_, verifiedSidecarsWrongRoot := util.CreateTestVerifiedRoDataColumnSidecars(t, util.DataColumnsParamsByRoot{wrongRoot: {
|
||||
{ColumnIndex: 42}, // needed
|
||||
}})
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Index: 87, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Needed index
|
||||
{Index: 1, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Not needed index
|
||||
{Index: 42, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Needed index
|
||||
})
|
||||
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, util.DataColumnsParamsByRoot{root: {
|
||||
{ColumnIndex: 87}, // needed
|
||||
{ColumnIndex: 1}, // not needed
|
||||
{ColumnIndex: 42}, // needed
|
||||
}})
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
go func() {
|
||||
<-startWaiting
|
||||
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
err := service.dataColumnStorage.Save(verifiedSidecarsWrongRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.dataColumnStorage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
err = service.isDataAvailable(ctx, root, signed, startWaiting)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3460,21 +3479,40 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnParams := make([]util.DataColumnParams, 0, missingColumns)
|
||||
dataColumnParams := make([]util.DataColumnParam, 0, missingColumns)
|
||||
for i := minimumColumnsCountToReconstruct - missingColumns; i < minimumColumnsCountToReconstruct; i++ {
|
||||
dataColumnParam := util.DataColumnParams{ColumnIndex: i}
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnParams = append(dataColumnParams, dataColumnParam)
|
||||
}
|
||||
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, util.DataColumnsParamsByRoot{root: dataColumnParams})
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParams)
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
go func() {
|
||||
<-startWaiting
|
||||
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
err := service.dataColumnStorage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
err = service.isDataAvailable(ctx, root, signed, startWaiting)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3486,11 +3524,13 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
ctx, cancel, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
startWaiting := make(chan bool)
|
||||
go func() {
|
||||
<-startWaiting
|
||||
cancel()
|
||||
})
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
err := service.isDataAvailable(ctx, root, signed, startWaiting)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -109,22 +110,26 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -135,7 +140,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -145,12 +152,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -176,7 +186,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -587,7 +587,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -37,8 +37,11 @@ const (
|
||||
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
|
||||
SingleAttReceived = 9
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 10
|
||||
|
||||
// BlockGossipReceived is sent after a block has been received from gossip or API that passes validation rules.
|
||||
BlockGossipReceived = 10
|
||||
BlockGossipReceived = 11
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -90,6 +93,11 @@ type SingleAttReceivedData struct {
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
|
||||
// DataColumnSidecarReceivedData is the data sent with DataColumnSidecarReceived events.
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// BlockGossipReceivedData is the data sent with BlockGossipReceived events.
|
||||
type BlockGossipReceivedData struct {
|
||||
// SignedBlock is the block that was received.
|
||||
|
||||
@@ -2,23 +2,33 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["upgrade.go"],
|
||||
srcs = [
|
||||
"transition.go",
|
||||
"upgrade.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/fulu",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["upgrade_test.go"],
|
||||
srcs = [
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
47
beacon-chain/core/fulu/transition.go
Normal file
47
beacon-chain/core/fulu/transition.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package fulu
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
if err := electra.ProcessEpoch(ctx, state); err != nil {
|
||||
return errors.Wrap(err, "could not process epoch in fulu transition")
|
||||
}
|
||||
return processProposerLookahead(ctx, state)
|
||||
}
|
||||
|
||||
func processProposerLookahead(ctx context.Context, state state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "fulu.processProposerLookahead")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
lookAhead, err := state.ProposerLookahead()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
lastEpochStart := len(lookAhead) - int(params.BeaconConfig().SlotsPerEpoch)
|
||||
copy(lookAhead[:lastEpochStart], lookAhead[params.BeaconConfig().SlotsPerEpoch:])
|
||||
lastEpoch := slots.ToEpoch(state.Slot()) + params.BeaconConfig().MinSeedLookahead + 1
|
||||
indices, err := helpers.ActiveValidatorIndices(ctx, state, lastEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lastEpochProposers, err := helpers.PrecomputeProposerIndices(state, indices, lastEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not precompute proposer indices")
|
||||
}
|
||||
copy(lookAhead[lastEpochStart:], lastEpochProposers)
|
||||
return state.SetProposerLookahead(lookAhead)
|
||||
}
|
||||
28
beacon-chain/core/fulu/transition_test.go
Normal file
28
beacon-chain/core/fulu/transition_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package fulu_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/fulu"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessEpoch_CanProcessFulu(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
st, err := fulu.UpgradeToFulu(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
preLookahead, err := st.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
err = fulu.ProcessEpoch(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
postLookahead, err := st.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, preLookahead[0], postLookahead[0])
|
||||
for i, v := range preLookahead[params.BeaconConfig().SlotsPerEpoch:] {
|
||||
require.Equal(t, v, postLookahead[i])
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,22 @@
|
||||
package fulu
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/fork.md#upgrading-the-state
|
||||
func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -101,8 +105,12 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
s := ðpb.BeaconStateFulu{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
@@ -163,6 +171,7 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
PendingDeposits: pendingDeposits,
|
||||
PendingPartialWithdrawals: pendingPartialWithdrawals,
|
||||
PendingConsolidations: pendingConsolidations,
|
||||
ProposerLookahead: proposerLookahead,
|
||||
}
|
||||
|
||||
// Need to cast the beaconState to use in helper functions
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestUpgradeToFulu(t *testing.T) {
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
preForkState := st.Copy()
|
||||
mSt, err := fulu.UpgradeToFulu(st)
|
||||
mSt, err := fulu.UpgradeToFulu(t.Context(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
|
||||
@@ -555,21 +555,31 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip cache update if the key already exists
|
||||
_, ok := proposerIndicesCache.ProposerIndices(epoch, [32]byte(root))
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return errors.New("invalid proposer length returned from state")
|
||||
var proposerIndices []primitives.ValidatorIndex
|
||||
// use the state if post fulu (EIP-7917)
|
||||
if state.Version() >= version.Fulu {
|
||||
lookAhead, err := state.ProposerLookahead()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
proposerIndices = lookAhead[:params.BeaconConfig().SlotsPerEpoch]
|
||||
} else {
|
||||
// Skip cache update if the key already exists
|
||||
_, ok := proposerIndicesCache.ProposerIndices(epoch, [32]byte(root))
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndices, err = PrecomputeProposerIndices(state, indices, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return errors.New("invalid proposer length returned from state")
|
||||
}
|
||||
}
|
||||
// This is here to deal with tests only
|
||||
var indicesArray [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
|
||||
@@ -656,6 +666,25 @@ func ComputeCommittee(
|
||||
return shuffledList[start:end], nil
|
||||
}
|
||||
|
||||
// InitializeProposerLookahead computes the list of the proposer indices for the next MIN_SEED_LOOKAHEAD + 1 epochs.
|
||||
func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]uint64, error) {
|
||||
lookAhead := make([]uint64, 0, uint64(params.BeaconConfig().MinSeedLookahead+1)*uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
for i := range params.BeaconConfig().MinSeedLookahead + 1 {
|
||||
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch+i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute proposer indices")
|
||||
}
|
||||
for _, proposerIndex := range proposerIndices {
|
||||
lookAhead = append(lookAhead, uint64(proposerIndex))
|
||||
}
|
||||
}
|
||||
return lookAhead, nil
|
||||
}
|
||||
|
||||
// PrecomputeProposerIndices computes proposer indices of the current epoch and returns a list of proposer indices,
|
||||
// the index of the list represents the slot number.
|
||||
func PrecomputeProposerIndices(state state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, e primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -299,9 +299,29 @@ func ProposerIndexAtSlotFromCheckpoint(c *forkchoicetypes.Checkpoint, slot primi
|
||||
return proposerIndices[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
|
||||
func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(state.Slot())
|
||||
if e < stateEpoch || e > stateEpoch+1 {
|
||||
return 0, errors.Errorf("slot %d is not in the current epoch %d or the next epoch", slot, stateEpoch)
|
||||
}
|
||||
lookAhead, err := state.ProposerLookahead()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
if e == stateEpoch {
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
// The caller is requesting the proposer for the next epoch
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch+params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
|
||||
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
|
||||
// point of view of the given state as head state
|
||||
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
if state.Version() >= version.Fulu {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
e := slots.ToEpoch(slot)
|
||||
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
|
||||
// For simplicity, the node will skip caching of genesis epoch.
|
||||
|
||||
@@ -1171,3 +1171,29 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
|
||||
assert.Equal(t, params.BeaconConfig().MinActivationBalance, params.BeaconConfig().MaxEffectiveBalance)
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
lookahead := make([]uint64, 64)
|
||||
lookahead[0] = 15
|
||||
lookahead[1] = 16
|
||||
lookahead[34] = 42
|
||||
pbState := ethpb.BeaconStateFulu{
|
||||
Slot: 100,
|
||||
ProposerLookahead: lookahead,
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoFulu(&pbState)
|
||||
require.NoError(t, err)
|
||||
idx, err := helpers.BeaconProposerIndexAtSlot(t.Context(), st, 96)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(15), idx)
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 97)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(16), idx)
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 130)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(42), idx)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 95)
|
||||
require.ErrorContains(t, "slot 95 is not in the current epoch 3 or the next epoch", err)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 160)
|
||||
require.ErrorContains(t, "slot 160 is not in the current epoch 3 or the next epoch", err)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"info.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"peer_sampling.go",
|
||||
"reconstruction.go",
|
||||
"validator.go",
|
||||
"verification.go",
|
||||
@@ -44,6 +45,7 @@ go_test(
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"peer_sampling_test.go",
|
||||
"reconstruction_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
|
||||
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
)
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
validatorsIndex[primitives.ValidatorIndex(i)] = true
|
||||
}
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateElectra{Validators: validators})
|
||||
beaconState, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateFulu{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := peerdas.ValidatorsCustodyRequirement(beaconState, validatorsIndex)
|
||||
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -303,7 +303,11 @@ func ProcessSlotsCore(ctx context.Context, span trace.Span, state state.BeaconSt
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
var err error
|
||||
if time.CanProcessEpoch(state) {
|
||||
if state.Version() >= version.Electra {
|
||||
if state.Version() >= version.Fulu {
|
||||
if err = fulu.ProcessEpoch(ctx, state); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
} else if state.Version() >= version.Electra {
|
||||
if err = electra.ProcessEpoch(ctx, state); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
@@ -377,7 +381,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
}
|
||||
|
||||
if time.CanUpgradeToFulu(slot) {
|
||||
state, err = fulu.UpgradeToFulu(state)
|
||||
state, err = fulu.UpgradeToFulu(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
|
||||
@@ -38,9 +38,9 @@ func TestPersist(t *testing.T) {
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]util.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
{2}: {{ColumnIndex: 2}},
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
@@ -54,8 +54,8 @@ func TestPersist(t *testing.T) {
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]util.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
@@ -67,21 +67,24 @@ func TestPersist(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]util.DataColumnParams{
|
||||
{}: {{ColumnIndex: 1}, {ColumnIndex: 5}},
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := cacheKey{slot: 0, root: [fieldparams.RootLength]byte{}}
|
||||
entry := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
@@ -121,24 +124,37 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
dataColumnsParams := make([]util.DataColumnParams, 0, len(indices))
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParams{
|
||||
ColumnIndex: index,
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
dataColumnsParamsByBlockRoot := util.DataColumnsParamsByRoot{root: dataColumnsParams}
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParamsByBlockRoot)
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
key := cacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
@@ -149,7 +165,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
@@ -224,8 +240,8 @@ func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, dataColumnParamsByBlockRoot util.DataColumnsParamsByRoot) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
|
||||
@@ -28,8 +28,7 @@ func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
|
||||
func TestStash(t *testing.T) {
|
||||
t.Run("Index too high", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 10_000}}}
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 10_000}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
@@ -37,8 +36,7 @@ func TestStash(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Nominal and already existing", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
@@ -76,36 +74,30 @@ func TestFilterDataColumns(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Commitments not equal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{root: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[1] = &roDataColumns[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
_, err := dataColumnCacheEntry.filter(roDataColumns[0].BlockRoot(), &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{root: {{ColumnIndex: 3, KzgCommitments: [][]byte{[]byte{3}}}}}
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 3, KzgCommitments: [][]byte{[]byte{3}}}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[3] = &expected[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
actual, err := dataColumnCacheEntry.filter(expected[0].BlockRoot(), &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected, actual)
|
||||
|
||||
@@ -59,6 +59,7 @@ go_test(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -41,32 +42,18 @@ func TestWarmCache(t *testing.T) {
|
||||
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 128_034, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4001
|
||||
{Slot: 128_034, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4001
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 131_138, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{6}: {
|
||||
{Slot: 131_168, ColumnIndex: 0, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 33, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 1
|
||||
{Slot: 128_002, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 1, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 3, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_034, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4001
|
||||
{Slot: 128_034, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4001
|
||||
{Slot: 131_138, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, Index: 1, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_168, Index: 0, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
)
|
||||
|
||||
@@ -77,29 +64,25 @@ func TestWarmCache(t *testing.T) {
|
||||
|
||||
storage.WarmCache()
|
||||
require.Equal(t, primitives.Epoch(4_000), storage.cache.lowestCachedEpoch)
|
||||
require.Equal(t, 6, len(storage.cache.cache))
|
||||
require.Equal(t, 5, len(storage.cache.cache))
|
||||
|
||||
summary, ok := storage.cache.get([fieldparams.RootLength]byte{1})
|
||||
summary, ok := storage.cache.get(verifiedRoDataColumnSidecars[2].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{2})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[4].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{3})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[6].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_001, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{4})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[8].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, false, true}}, summary)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, true, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{5})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{6})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[10].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_099, mask: [fieldparams.NumberOfColumns]bool{true}}, summary)
|
||||
}
|
||||
@@ -113,9 +96,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}},
|
||||
},
|
||||
[]util.DataColumnParam{{Index: 12}, {Index: 1_000_000}, {Index: 48}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
@@ -126,7 +107,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("one of the column index is too large", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}}},
|
||||
[]util.DataColumnParam{{Index: 12}, {Index: 1_000_000}, {Index: 48}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
@@ -137,23 +118,34 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("different slots", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{Slot: 1, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 2, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Create a sidecar with a different slot but the same root.
|
||||
alteredVerifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, 2)
|
||||
alteredVerifiedRoDataColumnSidecars = append(alteredVerifiedRoDataColumnSidecars, verifiedRoDataColumnSidecars[0])
|
||||
|
||||
altered, err := blocks.NewRODataColumnWithRoot(
|
||||
verifiedRoDataColumnSidecars[1].RODataColumn.DataColumnSidecar,
|
||||
verifiedRoDataColumnSidecars[0].BlockRoot(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedAltered := blocks.NewVerifiedRODataColumn(altered)
|
||||
alteredVerifiedRoDataColumnSidecars = append(alteredVerifiedRoDataColumnSidecars, verifiedAltered)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
err = dataColumnStorage.Save(alteredVerifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errDataColumnSidecarsFromDifferentSlots)
|
||||
})
|
||||
|
||||
t.Run("new file - no data columns to save", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{}: {}},
|
||||
[]util.DataColumnParam{},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
@@ -164,11 +156,9 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("new file - different data column size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{1, 2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -180,7 +170,9 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("existing file - wrong incoming SSZ encoded size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Save data columns into a file.
|
||||
@@ -192,7 +184,9 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
// column index and an different SSZ encoded size.
|
||||
_, verifiedRoDataColumnSidecars = util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 13, DataColumn: []byte{1, 2, 3, 4}}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Try to rewrite the file.
|
||||
@@ -203,17 +197,13 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
{2}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 11, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}}, // OK if duplicate
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
{Slot: 2, Index: 12, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -223,16 +213,12 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
|
||||
_, inputVerifiedRoDataColumnSidecars = util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{3}: {
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}}, // OK if duplicate
|
||||
{Slot: 1, Index: 15, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 1, Index: 1, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 3, Index: 6, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 3, Index: 2, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -241,51 +227,47 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
|
||||
type fixture struct {
|
||||
fileName string
|
||||
blockRoot [fieldparams.RootLength]byte
|
||||
expectedIndices [mandatoryNumberOfColumns]byte
|
||||
dataColumnParams []util.DataColumnParams
|
||||
dataColumnParams []util.DataColumnParam
|
||||
}
|
||||
|
||||
fixtures := []fixture{
|
||||
{
|
||||
fileName: "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{1},
|
||||
fileName: "0/0/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs",
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, nonZeroOffset + 4, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, nonZeroOffset + 1, nonZeroOffset, nonZeroOffset + 2, 0, nonZeroOffset + 3,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []util.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
dataColumnParams: []util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 11, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
{Slot: 1, Index: 15, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 1, Index: 1, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{2},
|
||||
fileName: "0/0/0x221f88cae2219050d4e9d8c2d0d83cb4c8ce4c84ab1bb3e0b89f3dec36077c4f.sszs",
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, nonZeroOffset, nonZeroOffset + 1, 0, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []util.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
dataColumnParams: []util.DataColumnParam{
|
||||
{Slot: 2, Index: 12, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0300000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{3},
|
||||
fileName: "0/0/0x7b163bd57e1c4c8b5048c5389698098f4c957d62d7ce86f4ffa9bdc75c16a18b.sszs",
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, nonZeroOffset + 1, 0, 0, 0, nonZeroOffset, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []util.DataColumnParams{
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
dataColumnParams: []util.DataColumnParam{
|
||||
{Slot: 3, Index: 6, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 3, Index: 2, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -294,7 +276,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
// Build expected data column sidecars.
|
||||
_, expectedDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{fixture.blockRoot: fixture.dataColumnParams},
|
||||
fixture.dataColumnParams,
|
||||
)
|
||||
|
||||
// Build expected bytes.
|
||||
@@ -321,6 +303,8 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
expectedBytes = append(expectedBytes, fixture.expectedIndices[:]...)
|
||||
expectedBytes = append(expectedBytes, sszEncodedDataColumnSidecars...)
|
||||
|
||||
blockRoot := expectedDataColumnSidecars[0].BlockRoot()
|
||||
|
||||
// Check the actual content of the file.
|
||||
actualBytes, err := afero.ReadFile(dataColumnStorage.fs, fixture.fileName)
|
||||
require.NoError(t, err)
|
||||
@@ -329,18 +313,18 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
// Check the summary.
|
||||
indices := map[uint64]bool{}
|
||||
for _, dataColumnParam := range fixture.dataColumnParams {
|
||||
indices[dataColumnParam.ColumnIndex] = true
|
||||
indices[dataColumnParam.Index] = true
|
||||
}
|
||||
|
||||
summary := dataColumnStorage.Summary(fixture.blockRoot)
|
||||
summary := dataColumnStorage.Summary(blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, indices[index], summary.HasIndex(index))
|
||||
}
|
||||
|
||||
err = dataColumnStorage.Remove(fixture.blockRoot)
|
||||
err = dataColumnStorage.Remove(blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary = dataColumnStorage.Summary(fixture.blockRoot)
|
||||
summary = dataColumnStorage.Summary(blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, false, summary.HasIndex(index))
|
||||
}
|
||||
@@ -363,11 +347,9 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
t.Run("indices not found", func(t *testing.T) {
|
||||
_, savedVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Index: 14, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -375,7 +357,7 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
err := dataColumnStorage.Save(savedVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{3, 1, 2})
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get(savedVerifiedRoDataColumnSidecars[0].BlockRoot(), []uint64{3, 1, 2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedRODataColumnSidecars))
|
||||
})
|
||||
@@ -383,11 +365,9 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, expectedVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Index: 14, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -395,11 +375,13 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
err := dataColumnStorage.Save(expectedVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
root := expectedVerifiedRoDataColumnSidecars[0].BlockRoot()
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get(root, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
|
||||
verifiedRODataColumnSidecars, err = dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
|
||||
verifiedRODataColumnSidecars, err = dataColumnStorage.Get(root, []uint64{12, 13, 14})
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
})
|
||||
@@ -415,15 +397,11 @@ func TestRemove(t *testing.T) {
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{Slot: 32, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 32, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 33, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 33, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 32, Index: 10, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 32, Index: 11, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 33, Index: 10, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 33, Index: 11, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -431,22 +409,22 @@ func TestRemove(t *testing.T) {
|
||||
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dataColumnStorage.Remove([fieldparams.RootLength]byte{1})
|
||||
err = dataColumnStorage.Remove(inputVerifiedRoDataColumnSidecars[0].BlockRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary([fieldparams.RootLength]byte{1})
|
||||
summary := dataColumnStorage.Summary(inputVerifiedRoDataColumnSidecars[0].BlockRoot())
|
||||
require.Equal(t, primitives.Epoch(0), summary.epoch)
|
||||
require.Equal(t, uint64(0), summary.Count())
|
||||
|
||||
summary = dataColumnStorage.Summary([fieldparams.RootLength]byte{2})
|
||||
summary = dataColumnStorage.Summary(inputVerifiedRoDataColumnSidecars[3].BlockRoot())
|
||||
require.Equal(t, primitives.Epoch(1), summary.epoch)
|
||||
require.Equal(t, uint64(2), summary.Count())
|
||||
|
||||
actual, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
actual, err := dataColumnStorage.Get(inputVerifiedRoDataColumnSidecars[0].BlockRoot(), nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(actual))
|
||||
|
||||
actual, err = dataColumnStorage.Get([fieldparams.RootLength]byte{2}, nil)
|
||||
actual, err = dataColumnStorage.Get(inputVerifiedRoDataColumnSidecars[3].BlockRoot(), nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(actual))
|
||||
})
|
||||
@@ -455,9 +433,9 @@ func TestRemove(t *testing.T) {
|
||||
func TestClear(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
{2}: {{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -466,8 +444,8 @@ func TestClear(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
filePaths := []string{
|
||||
"0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0/0/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs",
|
||||
"0/0/0x221f88cae2219050d4e9d8c2d0d83cb4c8ce4c84ab1bb3e0b89f3dec36077c4f.sszs",
|
||||
}
|
||||
|
||||
for _, filePath := range filePaths {
|
||||
@@ -493,8 +471,8 @@ func TestMetadata(t *testing.T) {
|
||||
t.Run("wrong version", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -504,7 +482,7 @@ func TestMetadata(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Alter the version.
|
||||
const filePath = "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs"
|
||||
const filePath = "0/0/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs"
|
||||
file, err := dataColumnStorage.fs.OpenFile(filePath, os.O_WRONLY, os.FileMode(0600))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -644,31 +622,19 @@ func TestPrune(t *testing.T) {
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_169, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
{Slot: 131_169, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 262_144, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8192
|
||||
{Slot: 262_144, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8292
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 33, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 1
|
||||
{Slot: 128_002, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 1, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 3, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 131_138, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, Index: 3, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_169, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4099
|
||||
{Slot: 131_169, Index: 3, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4099
|
||||
{Slot: 262_144, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 2 - Epoch 8192
|
||||
{Slot: 262_144, Index: 3, Column: [][]byte{{1}, {2}, {3}}}, // Period 2 - Epoch 8292
|
||||
},
|
||||
)
|
||||
|
||||
@@ -697,31 +663,31 @@ func TestPrune(t *testing.T) {
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0000000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x775283f428813c949b7e8af07f01fef9790137f021b3597ad2d0d81e8be8f0f0.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/4000")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{
|
||||
"0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0x9977031132157ebb9c81bce952003ce07a4f54e921ca63b7693d1562483fdf9f.sszs",
|
||||
"0xb2b14d9d858fa99b70f0405e4e39f38e51e36dd9a70343c109e24eeb5f77e369.sszs",
|
||||
}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0300000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x5106745cdd6b1aa3602ef4d000ef373af672019264c167fa4bd641a1094aa5c5.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x4e5f2bd5bb84bf0422af8edd1cc5a52cc6cea85baf3d66d172fe41831ac1239c.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0xa8adba7446eb56a01a9dd6d55e9c3990b10c91d43afb77847b4a21ac4ee62527.sszs"}, dirs))
|
||||
|
||||
_, verifiedRoDataColumnSidecars = util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{6}: {{Slot: 451_141, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}}, // Period 3 - Epoch 14_098
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 451_141, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 3 - Epoch 14_098
|
||||
},
|
||||
)
|
||||
|
||||
@@ -749,14 +715,14 @@ func TestPrune(t *testing.T) {
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x4e5f2bd5bb84bf0422af8edd1cc5a52cc6cea85baf3d66d172fe41831ac1239c.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0xa8adba7446eb56a01a9dd6d55e9c3990b10c91d43afb77847b4a21ac4ee62527.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "3/14098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0600000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x0de28a18cae63cbc6f0b20dc1afb0b1df38da40824a5f09f92d485ade04de97f.sszs"}, dirs))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -518,7 +518,7 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
|
||||
switch {
|
||||
case hasFuluKey(enc):
|
||||
protoState := ðpb.BeaconStateElectra{}
|
||||
protoState := ðpb.BeaconStateFulu{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Fulu")
|
||||
}
|
||||
@@ -690,7 +690,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
}
|
||||
return snappy.Encode(nil, append(ElectraKey, rawObj...)), nil
|
||||
case version.Fulu:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateFulu)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
@@ -97,6 +98,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -44,11 +45,16 @@ var (
|
||||
GetPayloadMethodV3,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
GetBlobsV1,
|
||||
}
|
||||
electraEngineEndpoints = []string{
|
||||
NewPayloadMethodV4,
|
||||
GetPayloadMethodV4,
|
||||
}
|
||||
fuluEngineEndpoints = []string{
|
||||
GetPayloadMethodV5,
|
||||
GetBlobsV2,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -73,6 +79,8 @@ const (
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// GetPayloadMethodV4 is the get payload method added for electra
|
||||
GetPayloadMethodV4 = "engine_getPayloadV4"
|
||||
// GetPayloadMethodV5 is the get payload method added for fulu
|
||||
GetPayloadMethodV5 = "engine_getPayloadV5"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
@@ -85,11 +93,21 @@ const (
|
||||
ExchangeCapabilities = "engine_exchangeCapabilities"
|
||||
// GetBlobsV1 request string for JSON-RPC.
|
||||
GetBlobsV1 = "engine_getBlobsV1"
|
||||
// GetBlobsV2 request string for JSON-RPC.
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
// TODO: Remove temporarily needed hack since geth takes an input blobs txs with blobs proofs, and
|
||||
// does the heavy lifting of building cells proofs, while normally this is done by the tx sender.
|
||||
// This is a cool hack because it lets the CL to act as if the tx sender actually computed the cells proofs.
|
||||
// The only counter part is the `engine_getPayloadv<x>` takes a lot of time.
|
||||
// defaultEngineTimeout = time.Second
|
||||
defaultEngineTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
var (
|
||||
errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
errMissingBlobsAndProofsFromEL = errors.New("engine api payload body response is missing blobs and proofs")
|
||||
)
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
// engine_forkchoiceUpdatedV1 endpoint.
|
||||
@@ -108,6 +126,7 @@ type Reconstructor interface {
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -257,6 +276,9 @@ func (s *Service) ForkchoiceUpdated(
|
||||
|
||||
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
|
||||
pe := slots.ToEpoch(slot)
|
||||
if pe >= params.BeaconConfig().FuluForkEpoch {
|
||||
return GetPayloadMethodV5, &pb.ExecutionBundleFulu{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return GetPayloadMethodV4, &pb.ExecutionBundleElectra{}
|
||||
}
|
||||
@@ -289,7 +311,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
}
|
||||
res, err := blocks.NewGetPayloadResponse(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "new get payload response")
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -298,33 +320,36 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
// Only check for electra related engine methods if it has been activated.
|
||||
if params.ElectraEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, electraEngineEndpoints...)
|
||||
}
|
||||
var result []string
|
||||
err := s.rpcClient.CallContext(ctx, &result, ExchangeCapabilities, supportedEngineEndpoints)
|
||||
if err != nil {
|
||||
|
||||
if params.FuluEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, fuluEngineEndpoints...)
|
||||
}
|
||||
|
||||
elSupportedEndpointsSlice := make([]string, len(supportedEngineEndpoints))
|
||||
if err := s.rpcClient.CallContext(ctx, &elSupportedEndpointsSlice, ExchangeCapabilities, supportedEngineEndpoints); err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
|
||||
var unsupported []string
|
||||
for _, s1 := range supportedEngineEndpoints {
|
||||
supported := false
|
||||
for _, s2 := range result {
|
||||
if s1 == s2 {
|
||||
supported = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !supported {
|
||||
unsupported = append(unsupported, s1)
|
||||
elSupportedEndpoints := make(map[string]bool, len(elSupportedEndpointsSlice))
|
||||
for _, method := range elSupportedEndpointsSlice {
|
||||
elSupportedEndpoints[method] = true
|
||||
}
|
||||
|
||||
unsupported := make([]string, 0, len(supportedEngineEndpoints))
|
||||
for _, method := range supportedEngineEndpoints {
|
||||
if !elSupportedEndpoints[method] {
|
||||
unsupported = append(unsupported, method)
|
||||
}
|
||||
}
|
||||
|
||||
if len(unsupported) != 0 {
|
||||
log.Warnf("Please update client, detected the following unsupported engine methods: %s", unsupported)
|
||||
log.WithField("methods", unsupported).Warning("Connected execution client does not support some requested engine methods")
|
||||
}
|
||||
return result, handleRPCError(err)
|
||||
|
||||
return elSupportedEndpointsSlice, nil
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
|
||||
@@ -495,9 +520,10 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProof, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobs")
|
||||
defer span.End()
|
||||
|
||||
// If the execution engine does not support `GetBlobsV1`, return early to prevent encountering an error later.
|
||||
if !s.capabilityCache.has(GetBlobsV1) {
|
||||
return nil, nil
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV1))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProof, len(versionedHashes))
|
||||
@@ -505,6 +531,19 @@ func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) (
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -615,6 +654,73 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": block.Slot(),
|
||||
})
|
||||
|
||||
kzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Collect KZG hashes for all blobs
|
||||
var kzgHashes []common.Hash
|
||||
for _, commitment := range kzgCommitments {
|
||||
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
|
||||
}
|
||||
|
||||
// Fetch all blobsAndCellsProofs from EL
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, kzgHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
log.Debug("No blobs returned from EL")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Extract the blobs and proofs from the blobAndProofV2s.
|
||||
blobs := make([][]byte, 0, len(blobAndProofV2s))
|
||||
cellProofs := make([][]byte, 0, len(blobAndProofV2s))
|
||||
for _, blobsAndProofs := range blobAndProofV2s {
|
||||
if blobsAndProofs == nil {
|
||||
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
|
||||
}
|
||||
blobs = append(blobs, blobsAndProofs.Blob)
|
||||
cellProofs = append(cellProofs, blobsAndProofs.KzgProofs...)
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
|
||||
}
|
||||
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
|
||||
}
|
||||
|
||||
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
log.Debug("Data columns successfully reconstructed from EL")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
@@ -902,3 +1008,8 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
mocks "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -167,6 +168,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
@@ -317,11 +319,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundler.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundler.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundler.GetBlobs())
|
||||
})
|
||||
t.Run(GetPayloadMethodV4, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
@@ -372,11 +374,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundler.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundler.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundler.GetBlobs())
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
@@ -405,7 +407,52 @@ func TestClient_HTTP(t *testing.T) {
|
||||
|
||||
require.DeepEqual(t, requests, resp.ExecutionRequests)
|
||||
})
|
||||
t.Run(GetPayloadMethodV5, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionBundleFulu"].(*pb.GetPayloadV5ResponseJson)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
_, ok = resp.BlobsBundler.(*pb.BlobsBundleV2)
|
||||
if !ok {
|
||||
t.Logf("resp.BlobsBundler has unexpected type: %T", resp.BlobsBundler)
|
||||
}
|
||||
require.Equal(t, ok, true)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
@@ -1539,6 +1586,7 @@ func fixtures() map[string]interface{} {
|
||||
"ExecutionPayloadCapellaWithValue": s.ExecutionPayloadWithValueCapella,
|
||||
"ExecutionPayloadDenebWithValue": s.ExecutionPayloadWithValueDeneb,
|
||||
"ExecutionBundleElectra": s.ExecutionBundleElectra,
|
||||
"ExecutionBundleFulu": s.ExecutionBundleFulu,
|
||||
"ValidPayloadStatus": s.ValidPayloadStatus,
|
||||
"InvalidBlockHashStatus": s.InvalidBlockHashStatus,
|
||||
"AcceptedStatus": s.AcceptedStatus,
|
||||
@@ -1774,6 +1822,36 @@ func fixturesStruct() *payloadFixtures {
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
}
|
||||
executionBundleFixtureFulu := &pb.GetPayloadV5ResponseJson{
|
||||
ShouldOverrideBuilder: true,
|
||||
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
|
||||
ParentHash: &common.Hash{'a'},
|
||||
FeeRecipient: &common.Address{'b'},
|
||||
StateRoot: &common.Hash{'c'},
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlockNumber: &hexUint,
|
||||
GasLimit: &hexUint,
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
BlobGasUsed: &bgu,
|
||||
ExcessBlobGas: &ebg,
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
BlobsBundle: &pb.BlobBundleV2JSON{
|
||||
Commitments: []hexutil.Bytes{[]byte("commitment1"), []byte("commitment2")},
|
||||
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
ExecutionRequests: []hexutil.Bytes{append([]byte{pb.DepositRequestType}, depositRequestBytes...),
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
@@ -1868,6 +1946,7 @@ func fixturesStruct() *payloadFixtures {
|
||||
ExecutionPayloadWithValueCapella: executionPayloadWithValueFixtureCapella,
|
||||
ExecutionPayloadWithValueDeneb: executionPayloadWithValueFixtureDeneb,
|
||||
ExecutionBundleElectra: executionBundleFixtureElectra,
|
||||
ExecutionBundleFulu: executionBundleFixtureFulu,
|
||||
ValidPayloadStatus: validStatus,
|
||||
InvalidBlockHashStatus: inValidBlockHashStatus,
|
||||
AcceptedStatus: acceptedStatus,
|
||||
@@ -1892,6 +1971,7 @@ type payloadFixtures struct {
|
||||
ExecutionPayloadWithValueCapella *pb.GetPayloadV2ResponseJson
|
||||
ExecutionPayloadWithValueDeneb *pb.GetPayloadV3ResponseJson
|
||||
ExecutionBundleElectra *pb.GetPayloadV4ResponseJson
|
||||
ExecutionBundleFulu *pb.GetPayloadV5ResponseJson
|
||||
ValidPayloadStatus *pb.PayloadStatus
|
||||
InvalidBlockHashStatus *pb.PayloadStatus
|
||||
AcceptedStatus *pb.PayloadStatus
|
||||
@@ -2361,7 +2441,7 @@ func Test_ExchangeCapabilities(t *testing.T) {
|
||||
for _, item := range results {
|
||||
require.NotNil(t, item)
|
||||
}
|
||||
assert.LogsContain(t, logHook, "Please update client, detected the following unsupported engine methods:")
|
||||
assert.LogsContain(t, logHook, "Connected execution client does not support some requested engine methods")
|
||||
})
|
||||
t.Run("list of items", func(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -2424,7 +2504,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
t.Run("get-blobs end point is not supported", func(t *testing.T) {
|
||||
hi := mockSummary(t, []bool{true, true, true, true, true, false})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "engine_getBlobsV1 is not supported", err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
@@ -2476,6 +2556,76 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup right fork epoch
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
b := util.NewBeaconBlockFulu()
|
||||
b.Block.Slot = 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
kzgCommitments := createRandomKzgCommitments(t, 6)
|
||||
b.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
})
|
||||
|
||||
t.Run("nothing received", func(t *testing.T) {
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("receiving all blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{true, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("missing some blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{false, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
kzgCommitments := make([][]byte, num)
|
||||
for i := range kzgCommitments {
|
||||
@@ -2511,6 +2661,42 @@ func createBlobServer(t *testing.T, numBlobs int, callbackFuncs ...func()) *http
|
||||
}))
|
||||
}
|
||||
|
||||
func createBlobServerV2(t *testing.T, numBlobs int, blobMasks []bool) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
require.Equal(t, len(blobMasks), numBlobs)
|
||||
|
||||
blobAndCellProofs := make([]*pb.BlobAndProofV2Json, numBlobs)
|
||||
for i := range blobAndCellProofs {
|
||||
if !blobMasks[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
blobAndCellProofs[i] = &pb.BlobAndProofV2Json{
|
||||
Blob: []byte("0xblob"),
|
||||
KzgProofs: []hexutil.Bytes{},
|
||||
}
|
||||
for j := 0; j < int(params.BeaconConfig().NumberOfColumns); j++ {
|
||||
cellProof := make([]byte, 48)
|
||||
blobAndCellProofs[i].KzgProofs = append(blobAndCellProofs[i].KzgProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": blobAndCellProofs,
|
||||
}
|
||||
|
||||
err := json.NewEncoder(w).Encode(respJSON)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
}
|
||||
|
||||
func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, err := rpc.DialHTTP(url)
|
||||
require.NoError(t, err)
|
||||
@@ -2522,6 +2708,12 @@ func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Se
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func setupRpcClientV2(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, client := setupRpcClient(t, url, client)
|
||||
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV2: nil}}
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return &verification.MockBlobVerifier{
|
||||
|
||||
@@ -38,6 +38,8 @@ type EngineClient struct {
|
||||
ErrGetPayload error
|
||||
BlobSidecars []blocks.VerifiedROBlob
|
||||
ErrorBlobSidecars error
|
||||
DataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
ErrorDataColumnSidecars error
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -113,6 +115,10 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
|
||||
@@ -87,45 +87,47 @@ type serviceFlagOpts struct {
|
||||
// full PoS node. It handles the lifecycle of the entire system and registers
|
||||
// services to a service registry.
|
||||
type BeaconNode struct {
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -193,6 +195,15 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
|
||||
if beacon.DataColumnStorage == nil {
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new data column storage")
|
||||
}
|
||||
|
||||
beacon.DataColumnStorage = dataColumnStorage
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
@@ -285,6 +296,7 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
@@ -495,6 +507,10 @@ func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath str
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if err := b.DataColumnStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
@@ -780,6 +796,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
blockchain.WithBlobStorage(b.BlobStorage),
|
||||
blockchain.WithDataColumnStorage(b.DataColumnStorage),
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
@@ -868,8 +885,11 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
regularsync.WithCustodyInfo(b.custodyInfo),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
@@ -895,6 +915,8 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -989,6 +1011,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1016,6 +1039,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
@@ -1157,6 +1181,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
// TODO: Add backfill for data column storage
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
cmd.ValidatorMonitorIndicesFlag.Value.SetInt(1)
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
|
||||
node.Close()
|
||||
@@ -75,7 +75,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
go func() {
|
||||
@@ -99,7 +99,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
node.Start()
|
||||
@@ -130,7 +130,7 @@ func TestClearDB(t *testing.T) {
|
||||
context, cancel := newCliContextWithCancel(&app, set)
|
||||
options := []Option{
|
||||
WithExecutionChainOptions([]execution.Option{execution.WithHttpEndpoint(endpoint)}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
}
|
||||
_, err = New(context, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -50,3 +50,20 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorage = bs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
|
||||
// to be used when initializing data column storage.
|
||||
func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorageOptions = append(bn.DataColumnStorageOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -712,7 +712,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet)
|
||||
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, util.DataColumnsParamsByRoot{{}: {{ColumnIndex: columnIndex}}})
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
sidecar := roSidecars[0].DataColumnSidecar
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
|
||||
@@ -222,7 +222,6 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
|
||||
@@ -54,7 +54,7 @@ type PeerData struct {
|
||||
NextValidTime time.Time
|
||||
// Chain related data.
|
||||
MetaData metadata.Metadata
|
||||
ChainState *ethpb.Status
|
||||
ChainState *ethpb.StatusV2
|
||||
ChainStateLastUpdated time.Time
|
||||
ChainStateValidationError error
|
||||
// Scorers internal data.
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Scorer = (*BadResponsesScorer)(nil)
|
||||
@@ -129,13 +128,14 @@ func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
// if peerData, ok := s.store.PeerData(pid); ok {
|
||||
// TODO: Remote this out of devnet
|
||||
// if peerData.BadResponses >= s.config.Threshold {
|
||||
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
// return nil
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package scorers_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
@@ -13,39 +12,41 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
// const pid = "peer1"
|
||||
|
||||
ctx := t.Context()
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 4,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
// scorer.Increment(pid)
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
@@ -137,56 +138,60 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
|
||||
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pid := peer.ID("peer1")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pid)
|
||||
// if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// } else {
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
for i := 0; i < len(pids); i++ {
|
||||
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
}
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pids[1])
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
return badPeers[i] < badPeers[j]
|
||||
})
|
||||
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
}
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
// for i := 0; i < len(pids); i++ {
|
||||
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
// }
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pids[1])
|
||||
// scorer.Increment(pids[2])
|
||||
// scorer.Increment(pids[4])
|
||||
// }
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
// want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
// badPeers := scorer.BadPeers()
|
||||
// sort.Slice(badPeers, func(i, j int) bool {
|
||||
// return badPeers[i] < badPeers[j]
|
||||
// })
|
||||
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
// }
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -112,7 +112,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
}
|
||||
|
||||
// SetPeerStatus sets chain state data for a given peer.
|
||||
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, validationError error) {
|
||||
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.StatusV2, validationError error) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
|
||||
@@ -130,14 +130,14 @@ func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, val
|
||||
// PeerStatus gets the chain state of the given remote peer.
|
||||
// This can return nil if there is no known chain state for the peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.StatusV2, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.peerStatusNoLock(pid)
|
||||
}
|
||||
|
||||
// peerStatusNoLock lock-free version of PeerStatus.
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.Status, error) {
|
||||
func (s *PeerStatusScorer) peerStatusNoLock(pid peer.ID) (*pb.StatusV2, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.ChainState == nil {
|
||||
return nil, peerdata.ErrNoPeerStatus
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent bad peer",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
@@ -49,7 +49,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer no head slot for the host node is known",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
@@ -62,7 +62,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer head is before ours",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(128)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
@@ -76,12 +76,12 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := primitives.Slot(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
// Set another peer to a higher score.
|
||||
scorer.SetPeerStatus("peer2", &pb.Status{
|
||||
scorer.SetPeerStatus("peer2", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 128,
|
||||
}, nil)
|
||||
@@ -96,7 +96,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := primitives.Slot(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
@@ -109,7 +109,7 @@ func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
name: "existent peer no max known slot",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
scorer.SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 0,
|
||||
}, nil)
|
||||
@@ -142,7 +142,7 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
}
|
||||
@@ -161,9 +161,9 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.StatusV2{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.StatusV2{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
@@ -180,12 +180,12 @@ func TestScorers_PeerStatus_PeerStatus(t *testing.T) {
|
||||
})
|
||||
status, err := peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
require.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err)
|
||||
assert.Equal(t, (*pb.Status)(nil), status)
|
||||
assert.Equal(t, (*pb.StatusV2)(nil), status)
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.Status{
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.StatusV2{
|
||||
HeadSlot: 128,
|
||||
}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.Status{
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.StatusV2{
|
||||
HeadSlot: 128,
|
||||
}, p2ptypes.ErrInvalidEpoch)
|
||||
status, err = peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
|
||||
@@ -211,99 +211,102 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestScorers_Service_loop(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_loop(t *testing.T) {
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 5,
|
||||
DecayInterval: 50 * time.Millisecond,
|
||||
},
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
DecayInterval: 25 * time.Millisecond,
|
||||
Decay: 64,
|
||||
},
|
||||
},
|
||||
})
|
||||
s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 5,
|
||||
// DecayInterval: 50 * time.Millisecond,
|
||||
// },
|
||||
// BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
// DecayInterval: 25 * time.Millisecond,
|
||||
// Decay: 64,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
pid1 := peer.ID("peer1")
|
||||
peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
// pid1 := peer.ID("peer1")
|
||||
// peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
// for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
// s1.Increment(pid1)
|
||||
// }
|
||||
// assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
// s2.IncrementProcessedBlocks("peer1", 221)
|
||||
// assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- struct{}{}
|
||||
}()
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Error("Timed out")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// done := make(chan struct{}, 1)
|
||||
// go func() {
|
||||
// defer func() {
|
||||
// done <- struct{}{}
|
||||
// }()
|
||||
// ticker := time.NewTicker(50 * time.Millisecond)
|
||||
// defer ticker.Stop()
|
||||
// for {
|
||||
// select {
|
||||
// case <-ticker.C:
|
||||
// if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
// return
|
||||
// }
|
||||
// case <-ctx.Done():
|
||||
// t.Error("Timed out")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
|
||||
<-done
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
// <-done
|
||||
// assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
// assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
// }
|
||||
|
||||
func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// }
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
// for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
// }
|
||||
|
||||
@@ -205,14 +205,14 @@ func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
|
||||
}
|
||||
|
||||
// SetChainState sets the chain state of the given remote peer.
|
||||
func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) {
|
||||
func (p *Status) SetChainState(pid peer.ID, chainState *pb.StatusV2) {
|
||||
p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
|
||||
}
|
||||
|
||||
// ChainState gets the chain state of the given remote peer.
|
||||
// This will error if the peer does not exist.
|
||||
// This will error if there is no known chain state for the peer.
|
||||
func (p *Status) ChainState(pid peer.ID) (*pb.Status, error) {
|
||||
func (p *Status) ChainState(pid peer.ID) (*pb.StatusV2, error) {
|
||||
return p.scorers.PeerStatusScorer().PeerStatus(pid)
|
||||
}
|
||||
|
||||
@@ -705,31 +705,47 @@ func (p *Status) deprecatedPrune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
|
||||
// upon by the majority of peers. This method may not return the absolute highest finalized, but
|
||||
// the finalized epoch in which most peers can serve blocks (plurality voting).
|
||||
// Ideally, all peers would be reporting the same finalized epoch but some may be behind due to their
|
||||
// own latency, or because of their finalized epoch at the time we queried them.
|
||||
// Returns epoch number and list of peers that are at or beyond that epoch.
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than `ourFinalizedEpoch`
|
||||
// that is agreed upon by the majority of peers, and the peers agreeing on this finalized epoch.
|
||||
// This method may not return the absolute highest finalized epoch, but the finalized epoch in which
|
||||
// most peers can serve blocks (plurality voting). Ideally, all peers would be reporting the same
|
||||
// finalized epoch but some may be behind due to their own latency, or because of their finalized
|
||||
// epoch at the time we queried them. Returns epoch number and list of peers that are at or beyond
|
||||
// that epoch.
|
||||
func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
|
||||
// key: finalized epoch, value: number of peers that support this finalized epoch.
|
||||
finalizedEpochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: finalized epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch >= ourFinalizedEpoch {
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
// Skip if the peer's finalized epoch is not defined, or if the peer's finalized epoch is
|
||||
// lower than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.FinalizedEpoch < ourFinalizedEpoch {
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which is the epoch most peers agree upon.
|
||||
var targetEpoch primitives.Epoch
|
||||
var mostVotes uint64
|
||||
// If there is a tie, select the highest epoch.
|
||||
targetEpoch, mostVotes := primitives.Epoch(0), uint64(0)
|
||||
for epoch, count := range finalizedEpochVotes {
|
||||
if count > mostVotes || (count == mostVotes && epoch > targetEpoch) {
|
||||
mostVotes = count
|
||||
@@ -737,11 +753,12 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort PIDs by finalized epoch, in decreasing order.
|
||||
// Sort PIDs by finalized (epoch, head), in decreasing order.
|
||||
sort.Slice(potentialPIDs, func(i, j int) bool {
|
||||
if pidEpoch[potentialPIDs[i]] == pidEpoch[potentialPIDs[j]] {
|
||||
return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
|
||||
}
|
||||
|
||||
return pidEpoch[potentialPIDs[i]] > pidEpoch[potentialPIDs[j]]
|
||||
})
|
||||
|
||||
@@ -764,26 +781,42 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
// and is shared by at least minPeers.
|
||||
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
|
||||
// Calculate our head slot.
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
|
||||
// key: head epoch, value: number of peers that support this epoch.
|
||||
epochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: head epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
|
||||
ourHeadSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.HeadSlot > ourHeadSlot {
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
// Skip if the peer's head epoch is not defined, or if the peer's head slot is
|
||||
// lower or equal than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.HeadSlot <= ourHeadSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which has enough peers' votes (>= minPeers).
|
||||
var targetEpoch primitives.Epoch
|
||||
targetEpoch := primitives.Epoch(0)
|
||||
for epoch, votes := range epochVotes {
|
||||
if votes >= uint64(minPeers) && targetEpoch < epoch {
|
||||
targetEpoch = epoch
|
||||
@@ -1012,16 +1045,23 @@ func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "to ip")
|
||||
}
|
||||
// ip, err := manet.ToIP(peerData.Address)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "to ip")
|
||||
// }
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
}
|
||||
}
|
||||
// if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
// if val > CollocationLimit {
|
||||
// TODO: Remove this out of denvet.
|
||||
// return errors.Errorf("colocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
// log.WithFields(logrus.Fields{
|
||||
// "pid": pid,
|
||||
// "ip": ip.String(),
|
||||
// "colocationCount": val,
|
||||
// "colocationLimit": CollocationLimit,
|
||||
// }).Debug("Colocation limit exceeded. Peer should be banned.")
|
||||
// }
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package peers_test
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -290,7 +289,7 @@ func TestPeerChainState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedEpoch := primitives.Epoch(123)
|
||||
p.SetChainState(id, &pb.Status{FinalizedEpoch: finalizedEpoch})
|
||||
p.SetChainState(id, &pb.StatusV2{FinalizedEpoch: finalizedEpoch})
|
||||
|
||||
resChainState, err := p.ChainState(id)
|
||||
require.NoError(t, err)
|
||||
@@ -325,59 +324,60 @@ func TestPeerWithNilChainState(t *testing.T) {
|
||||
|
||||
resChainState, err := p.ChainState(id)
|
||||
require.Equal(t, peerdata.ErrNoPeerStatus, err)
|
||||
var nothing *pb.Status
|
||||
var nothing *pb.StatusV2
|
||||
require.Equal(t, resChainState, nothing)
|
||||
}
|
||||
|
||||
func TestPeerBadResponses(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerBadResponses(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
{
|
||||
_, err := id.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
// require.NoError(t, err)
|
||||
// {
|
||||
// _, err := id.MarshalBinary()
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
direction := network.DirInbound
|
||||
p.Add(new(enr.Record), id, address, direction)
|
||||
// address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
// require.NoError(t, err, "Failed to create address")
|
||||
// direction := network.DirInbound
|
||||
// p.Add(new(enr.Record), id, address, direction)
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
// resBadResponses, err := scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// }
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
@@ -496,100 +496,102 @@ func TestPeerValidTime(t *testing.T) {
|
||||
assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers")
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPrune(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// if i%7 == 0 {
|
||||
// // Peer added as disconnected.
|
||||
// _ = addPeer(t, p, peers.PeerDisconnected)
|
||||
// }
|
||||
// // Peer added to peer handler.
|
||||
// _ = addPeer(t, p, peers.PeerConnected)
|
||||
// }
|
||||
|
||||
disPeers := p.Disconnected()
|
||||
firstPID := disPeers[0]
|
||||
secondPID := disPeers[1]
|
||||
thirdPID := disPeers[2]
|
||||
// disPeers := p.Disconnected()
|
||||
// firstPID := disPeers[0]
|
||||
// secondPID := disPeers[1]
|
||||
// thirdPID := disPeers[2]
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
|
||||
// Make first peer a bad peer
|
||||
scorer.Increment(firstPID)
|
||||
scorer.Increment(firstPID)
|
||||
// // Make first peer a bad peer
|
||||
// scorer.Increment(firstPID)
|
||||
// scorer.Increment(firstPID)
|
||||
|
||||
// Add bad response for p2.
|
||||
scorer.Increment(secondPID)
|
||||
// // Add bad response for p2.
|
||||
// scorer.Increment(secondPID)
|
||||
|
||||
// Prune peers
|
||||
p.Prune()
|
||||
// // Prune peers
|
||||
// p.Prune()
|
||||
|
||||
// Bad peer is expected to still be kept in handler.
|
||||
badRes, err := scorer.Count(firstPID)
|
||||
assert.NoError(t, err, "error is supposed to be nil")
|
||||
assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
// // Bad peer is expected to still be kept in handler.
|
||||
// badRes, err := scorer.Count(firstPID)
|
||||
// assert.NoError(t, err, "error is supposed to be nil")
|
||||
// assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
|
||||
// Not so good peer is pruned away so that we can reduce the
|
||||
// total size of the handler.
|
||||
_, err = scorer.Count(secondPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
// // Not so good peer is pruned away so that we can reduce the
|
||||
// // total size of the handler.
|
||||
// _, err = scorer.Count(secondPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
|
||||
// Last peer has been removed.
|
||||
_, err = scorer.Count(thirdPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
}
|
||||
// // Last peer has been removed.
|
||||
// _, err = scorer.Count(thirdPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
// }
|
||||
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerIPTracker(t *testing.T) {
|
||||
// resetCfg := features.InitWithReset(&features.Flags{
|
||||
// EnablePeerScorer: false,
|
||||
// })
|
||||
// defer resetCfg()
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
// badIP := "211.227.218.116"
|
||||
// var badPeers []peer.ID
|
||||
// for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
// port := strconv.Itoa(3000 + i)
|
||||
// addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
// }
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
// }
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
// // Add in bad peers, so that our records are trimmed out
|
||||
// // from the peer store.
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// // Peer added to peer handler.
|
||||
// pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
// p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
@@ -617,7 +619,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
p.SetChainState(pid1, &pb.StatusV2{
|
||||
HeadSlot: 3 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 3,
|
||||
FinalizedRoot: mockroot3[:],
|
||||
@@ -625,7 +627,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
p.SetChainState(pid2, &pb.StatusV2{
|
||||
HeadSlot: 4 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 4,
|
||||
FinalizedRoot: mockroot4[:],
|
||||
@@ -633,7 +635,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
p.SetChainState(pid3, &pb.StatusV2{
|
||||
HeadSlot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 5,
|
||||
FinalizedRoot: mockroot5[:],
|
||||
@@ -641,7 +643,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 4
|
||||
pid4 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid4, &pb.Status{
|
||||
p.SetChainState(pid4, &pb.StatusV2{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
@@ -649,7 +651,7 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
|
||||
// Peer 5
|
||||
pid5 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid5, &pb.Status{
|
||||
p.SetChainState(pid5, &pb.StatusV2{
|
||||
HeadSlot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedEpoch: 2,
|
||||
FinalizedRoot: mockroot2[:],
|
||||
@@ -1013,7 +1015,7 @@ func TestStatus_BestPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
for _, peerConfig := range tt.peers {
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.Status{
|
||||
p.SetChainState(addPeer(t, p, peers.Connected), &pb.StatusV2{
|
||||
FinalizedEpoch: peerConfig.finalizedEpoch,
|
||||
HeadSlot: peerConfig.headSlot,
|
||||
})
|
||||
@@ -1040,7 +1042,7 @@ func TestBestFinalized_returnsMaxValue(t *testing.T) {
|
||||
for i := 0; i <= maxPeers+100; i++ {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
|
||||
FinalizedEpoch: 10,
|
||||
})
|
||||
}
|
||||
@@ -1063,7 +1065,7 @@ func TestStatus_BestNonFinalized(t *testing.T) {
|
||||
for i, headSlot := range peerSlots {
|
||||
p.Add(new(enr.Record), peer.ID(rune(i)), nil, network.DirOutbound)
|
||||
p.SetConnectionState(peer.ID(rune(i)), peers.Connected)
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.Status{
|
||||
p.SetChainState(peer.ID(rune(i)), &pb.StatusV2{
|
||||
HeadSlot: headSlot,
|
||||
})
|
||||
}
|
||||
@@ -1086,17 +1088,17 @@ func TestStatus_CurrentEpoch(t *testing.T) {
|
||||
})
|
||||
// Peer 1
|
||||
pid1 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid1, &pb.Status{
|
||||
p.SetChainState(pid1, &pb.StatusV2{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
// Peer 2
|
||||
pid2 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid2, &pb.Status{
|
||||
p.SetChainState(pid2, &pb.StatusV2{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
// Peer 3
|
||||
pid3 := addPeer(t, p, peers.Connected)
|
||||
p.SetChainState(pid3, &pb.Status{
|
||||
p.SetChainState(pid3, &pb.StatusV2{
|
||||
HeadSlot: params.BeaconConfig().SlotsPerEpoch * 4,
|
||||
})
|
||||
|
||||
|
||||
@@ -22,44 +22,52 @@ const (
|
||||
SchemaVersionV3 = "/3"
|
||||
)
|
||||
|
||||
// Specifies the protocol prefix for all our Req/Resp topics.
|
||||
const protocolPrefix = "/eth2/beacon_chain/req"
|
||||
const (
|
||||
// Specifies the protocol prefix for all our Req/Resp topics.
|
||||
protocolPrefix = "/eth2/beacon_chain/req"
|
||||
|
||||
// StatusMessageName specifies the name for the status message topic.
|
||||
const StatusMessageName = "/status"
|
||||
// StatusMessageName specifies the name for the status message topic.
|
||||
StatusMessageName = "/status"
|
||||
|
||||
// GoodbyeMessageName specifies the name for the goodbye message topic.
|
||||
const GoodbyeMessageName = "/goodbye"
|
||||
// GoodbyeMessageName specifies the name for the goodbye message topic.
|
||||
GoodbyeMessageName = "/goodbye"
|
||||
|
||||
// BeaconBlocksByRangeMessageName specifies the name for the beacon blocks by range message topic.
|
||||
const BeaconBlocksByRangeMessageName = "/beacon_blocks_by_range"
|
||||
// BeaconBlocksByRangeMessageName specifies the name for the beacon blocks by range message topic.
|
||||
BeaconBlocksByRangeMessageName = "/beacon_blocks_by_range"
|
||||
|
||||
// BeaconBlocksByRootsMessageName specifies the name for the beacon blocks by root message topic.
|
||||
const BeaconBlocksByRootsMessageName = "/beacon_blocks_by_root"
|
||||
// BeaconBlocksByRootsMessageName specifies the name for the beacon blocks by root message topic.
|
||||
BeaconBlocksByRootsMessageName = "/beacon_blocks_by_root"
|
||||
|
||||
// PingMessageName Specifies the name for the ping message topic.
|
||||
const PingMessageName = "/ping"
|
||||
// PingMessageName Specifies the name for the ping message topic.
|
||||
PingMessageName = "/ping"
|
||||
|
||||
// MetadataMessageName specifies the name for the metadata message topic.
|
||||
const MetadataMessageName = "/metadata"
|
||||
// MetadataMessageName specifies the name for the metadata message topic.
|
||||
MetadataMessageName = "/metadata"
|
||||
|
||||
// BlobSidecarsByRangeName is the name for the BlobSidecarsByRange v1 message topic.
|
||||
const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
// BlobSidecarsByRangeName is the name for the BlobSidecarsByRange v1 message topic.
|
||||
BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
|
||||
// LightClientBootstrapName is the name for the LightClientBootstrap message topic,
|
||||
const LightClientBootstrapName = "/light_client_bootstrap"
|
||||
// LightClientBootstrapName is the name for the LightClientBootstrap message topic,
|
||||
LightClientBootstrapName = "/light_client_bootstrap"
|
||||
|
||||
// LightClientUpdatesByRangeName is the name for the LightClientUpdatesByRange topic.
|
||||
const LightClientUpdatesByRangeName = "/light_client_updates_by_range"
|
||||
// LightClientUpdatesByRangeName is the name for the LightClientUpdatesByRange topic.
|
||||
LightClientUpdatesByRangeName = "/light_client_updates_by_range"
|
||||
|
||||
// LightClientFinalityUpdateName is the name for the LightClientFinalityUpdate topic.
|
||||
const LightClientFinalityUpdateName = "/light_client_finality_update"
|
||||
// LightClientFinalityUpdateName is the name for the LightClientFinalityUpdate topic.
|
||||
LightClientFinalityUpdateName = "/light_client_finality_update"
|
||||
|
||||
// LightClientOptimisticUpdateName is the name for the LightClientOptimisticUpdate topic.
|
||||
const LightClientOptimisticUpdateName = "/light_client_optimistic_update"
|
||||
// LightClientOptimisticUpdateName is the name for the LightClientOptimisticUpdate topic.
|
||||
LightClientOptimisticUpdateName = "/light_client_optimistic_update"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
)
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
@@ -92,8 +100,16 @@ const (
|
||||
RPCLightClientFinalityUpdateTopicV1 = protocolPrefix + LightClientFinalityUpdateName + SchemaVersionV1
|
||||
// RPCLightClientOptimisticUpdateTopicV1 is a topic for requesting a light client Optimistic update.
|
||||
RPCLightClientOptimisticUpdateTopicV1 = protocolPrefix + LightClientOptimisticUpdateName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCStatusTopicV2 defines the v1 topic for the status rpc method.
|
||||
RPCStatusTopicV2 = protocolPrefix + StatusMessageName + SchemaVersionV2
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
RPCBlocksByRangeTopicV2 = protocolPrefix + BeaconBlocksByRangeMessageName + SchemaVersionV2
|
||||
// RPCBlocksByRootTopicV2 defines the v2 topic for the blocks by root rpc method.
|
||||
@@ -112,87 +128,106 @@ const (
|
||||
)
|
||||
|
||||
// RPCTopicMappings map the base message type to the rpc request.
|
||||
var RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Status Message
|
||||
RPCStatusTopicV1: new(pb.Status),
|
||||
// RPC Goodbye Message
|
||||
RPCGoodByeTopicV1: new(primitives.SSZUint64),
|
||||
// RPC Block By Range Message
|
||||
RPCBlocksByRangeTopicV1: new(pb.BeaconBlocksByRangeRequest),
|
||||
RPCBlocksByRangeTopicV2: new(pb.BeaconBlocksByRangeRequest),
|
||||
// RPC Block By Root Message
|
||||
RPCBlocksByRootTopicV1: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
RPCBlocksByRootTopicV2: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
// RPC Ping Message
|
||||
RPCPingTopicV1: new(primitives.SSZUint64),
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
RPCMetaDataTopicV3: new(interface{}),
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
var (
|
||||
RPCTopicMappings = map[string]interface{}{
|
||||
// RPC Status Message
|
||||
RPCStatusTopicV1: new(pb.Status),
|
||||
RPCStatusTopicV2: new(pb.StatusV2),
|
||||
|
||||
// Light client
|
||||
RPCLightClientBootstrapTopicV1: new([fieldparams.RootLength]byte),
|
||||
RPCLightClientUpdatesByRangeTopicV1: new(pb.LightClientUpdatesByRangeRequest),
|
||||
RPCLightClientFinalityUpdateTopicV1: new(interface{}),
|
||||
RPCLightClientOptimisticUpdateTopicV1: new(interface{}),
|
||||
}
|
||||
// RPC Goodbye Message
|
||||
RPCGoodByeTopicV1: new(primitives.SSZUint64),
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
var protocolMapping = map[string]bool{
|
||||
protocolPrefix: true,
|
||||
}
|
||||
// RPC Block By Range Message
|
||||
RPCBlocksByRangeTopicV1: new(pb.BeaconBlocksByRangeRequest),
|
||||
RPCBlocksByRangeTopicV2: new(pb.BeaconBlocksByRangeRequest),
|
||||
|
||||
// Maps all the protocol message names for the different rpc
|
||||
// topics.
|
||||
var messageMapping = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
LightClientBootstrapName: true,
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
}
|
||||
// RPC Block By Root Message
|
||||
RPCBlocksByRootTopicV1: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
RPCBlocksByRootTopicV2: new(p2ptypes.BeaconBlockByRootsReq),
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
var altairMapping = map[string]bool{
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
// RPC Ping Message
|
||||
RPCPingTopicV1: new(primitives.SSZUint64),
|
||||
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
var fuluMapping = map[string]bool{
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
// RPC Metadata Message
|
||||
RPCMetaDataTopicV1: new(interface{}),
|
||||
RPCMetaDataTopicV2: new(interface{}),
|
||||
RPCMetaDataTopicV3: new(interface{}),
|
||||
|
||||
var versionMapping = map[string]bool{
|
||||
SchemaVersionV1: true,
|
||||
SchemaVersionV2: true,
|
||||
SchemaVersionV3: true,
|
||||
}
|
||||
// BlobSidecarsByRange v1 Message
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
|
||||
// OmitContextBytesV1 keeps track of which RPC methods do not write context bytes in their v1 incarnations.
|
||||
// Phase0 did not have the notion of context bytes, which prefix wire-encoded values with a [4]byte identifier
|
||||
// to convey the schema for the receiver to use. These RPCs had a version bump to V2 when the context byte encoding
|
||||
// was introduced. For other RPC methods, context bytes are always required.
|
||||
var OmitContextBytesV1 = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
|
||||
// Light client
|
||||
RPCLightClientBootstrapTopicV1: new([fieldparams.RootLength]byte),
|
||||
RPCLightClientUpdatesByRangeTopicV1: new(pb.LightClientUpdatesByRangeRequest),
|
||||
RPCLightClientFinalityUpdateTopicV1: new(interface{}),
|
||||
RPCLightClientOptimisticUpdateTopicV1: new(interface{}),
|
||||
|
||||
// DataColumnSidecarsByRange v1 Message
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
protocolMapping = map[string]bool{
|
||||
protocolPrefix: true,
|
||||
}
|
||||
|
||||
// Maps all the protocol message names for the different rpc
|
||||
// topics.
|
||||
messageMapping = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
LightClientBootstrapName: true,
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
altairMapping = map[string]string{
|
||||
BeaconBlocksByRangeMessageName: SchemaVersionV2,
|
||||
BeaconBlocksByRootsMessageName: SchemaVersionV2,
|
||||
MetadataMessageName: SchemaVersionV2,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in fulu.
|
||||
fuluMapping = map[string]string{
|
||||
StatusMessageName: SchemaVersionV2,
|
||||
MetadataMessageName: SchemaVersionV3,
|
||||
}
|
||||
|
||||
versionMapping = map[string]bool{
|
||||
SchemaVersionV1: true,
|
||||
SchemaVersionV2: true,
|
||||
SchemaVersionV3: true,
|
||||
}
|
||||
|
||||
// OmitContextBytesV1 keeps track of which RPC methods do not write context bytes in their v1 incarnations.
|
||||
// Phase0 did not have the notion of context bytes, which prefix wire-encoded values with a [4]byte identifier
|
||||
// to convey the schema for the receiver to use. These RPCs had a version bump to V2 when the context byte encoding
|
||||
// was introduced. For other RPC methods, context bytes are always required.
|
||||
OmitContextBytesV1 = map[string]bool{
|
||||
StatusMessageName: true,
|
||||
GoodbyeMessageName: true,
|
||||
BeaconBlocksByRangeMessageName: true,
|
||||
BeaconBlocksByRootsMessageName: true,
|
||||
PingMessageName: true,
|
||||
MetadataMessageName: true,
|
||||
}
|
||||
)
|
||||
|
||||
// VerifyTopicMapping verifies that the topic and its accompanying
|
||||
// message type is correct.
|
||||
@@ -314,13 +349,17 @@ func TopicFromMessage(msg string, epoch primitives.Epoch) (string, error) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
// Check if the message is to be updated in fulu.
|
||||
if epoch >= beaconConfig.FuluForkEpoch && fuluMapping[msg] {
|
||||
return protocolPrefix + msg + SchemaVersionV3, nil
|
||||
if epoch >= beaconConfig.FuluForkEpoch {
|
||||
if version, ok := fuluMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the message is to be updated in altair.
|
||||
if epoch >= beaconConfig.AltairForkEpoch && altairMapping[msg] {
|
||||
return protocolPrefix + msg + SchemaVersionV2, nil
|
||||
if epoch >= beaconConfig.AltairForkEpoch {
|
||||
if version, ok := altairMapping[msg]; ok {
|
||||
return protocolPrefix + msg + version, nil
|
||||
}
|
||||
}
|
||||
|
||||
return protocolPrefix + msg + SchemaVersionV1, nil
|
||||
|
||||
@@ -119,50 +119,36 @@ func TestTopicFromMessage_CorrectType(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("after altair fork but before fulu fork", func(t *testing.T) {
|
||||
for m := range messageMapping {
|
||||
topic, err := TopicFromMessage(m, altairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
// Not modified in altair fork.
|
||||
topic, err := TopicFromMessage(GoodbyeMessageName, altairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/goodbye/1", topic)
|
||||
|
||||
if altairMapping[m] {
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV2))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV2, version)
|
||||
continue
|
||||
}
|
||||
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV1))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV1, version)
|
||||
}
|
||||
// Modified in altair fork.
|
||||
topic, err = TopicFromMessage(MetadataMessageName, altairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/metadata/2", topic)
|
||||
})
|
||||
|
||||
t.Run("after fulu fork", func(t *testing.T) {
|
||||
for m := range messageMapping {
|
||||
topic, err := TopicFromMessage(m, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
// Not modified in any fork.
|
||||
topic, err := TopicFromMessage(GoodbyeMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/goodbye/1", topic)
|
||||
|
||||
if fuluMapping[m] {
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV3))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV3, version)
|
||||
continue
|
||||
}
|
||||
// Modified in altair fork.
|
||||
topic, err = TopicFromMessage(BeaconBlocksByRangeMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/beacon_blocks_by_range/2", topic)
|
||||
|
||||
if altairMapping[m] {
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV2))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV2, version)
|
||||
continue
|
||||
}
|
||||
// Modified in fulu fork.
|
||||
topic, err = TopicFromMessage(StatusMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/status/2", topic)
|
||||
|
||||
require.Equal(t, true, strings.Contains(topic, SchemaVersionV1))
|
||||
_, _, version, err := TopicDeconstructor(topic)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, SchemaVersionV1, version)
|
||||
}
|
||||
// Modified both in altair and fulu fork.
|
||||
topic, err = TopicFromMessage(MetadataMessageName, fuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/eth2/beacon_chain/req/metadata/3", topic)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -358,48 +356,49 @@ func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.C
|
||||
return fd
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
peers *peers.Status
|
||||
info peer.AddrInfo
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "bad peer",
|
||||
peers: func() *peers.Status {
|
||||
ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
for i := 0; i < 10; i++ {
|
||||
ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
}
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "refused to connect to bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _, _ := createHost(t, 34567)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
host: h,
|
||||
peers: tt.peers,
|
||||
}
|
||||
err := s.connectWithPeer(ctx, tt.info)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestService_connectWithPeer(t *testing.T) {
|
||||
// params.SetupTestConfigCleanup(t)
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// peers *peers.Status
|
||||
// info peer.AddrInfo
|
||||
// wantErr string
|
||||
// }{
|
||||
// {
|
||||
// name: "bad peer",
|
||||
// peers: func() *peers.Status {
|
||||
// ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// for i := 0; i < 10; i++ {
|
||||
// ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
// }
|
||||
// return ps
|
||||
// }(),
|
||||
// info: peer.AddrInfo{ID: "bad"},
|
||||
// wantErr: "refused to connect to bad peer",
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// h, _, _ := createHost(t, 34567)
|
||||
// defer func() {
|
||||
// if err := h.Close(); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// }()
|
||||
// ctx := context.Background()
|
||||
// s := &Service{
|
||||
// host: h,
|
||||
// peers: tt.peers,
|
||||
// }
|
||||
// err := s.connectWithPeer(ctx, tt.info)
|
||||
// if len(tt.wantErr) > 0 {
|
||||
// require.ErrorContains(t, tt.wantErr, err)
|
||||
// } else {
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -74,8 +73,8 @@ func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node)
|
||||
|
||||
// searchForPeers performs a network search for peers subscribed to a particular subnet.
|
||||
// It exits as soon as one of these conditions is met:
|
||||
// - It looped through `batchSize` nodes.
|
||||
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria.
|
||||
// - It looped during `batchPeriod` duration, or
|
||||
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria, or
|
||||
// - Iterator is exhausted.
|
||||
func searchForPeers(
|
||||
iterator enode.Iterator,
|
||||
@@ -147,8 +146,6 @@ func (s *Service) FindPeersWithSubnet(
|
||||
index uint64,
|
||||
threshold int,
|
||||
) (bool, error) {
|
||||
const minLogInterval = 1 * time.Minute
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
@@ -168,41 +165,29 @@ func (s *Service) FindPeersWithSubnet(
|
||||
return false, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
peersSummary := func(topic string, threshold int) (int, int) {
|
||||
peersSummary := func(topic string, threshold int) int {
|
||||
// Retrieve how many peers we have for this topic.
|
||||
peerCountForTopic := len(s.pubsub.ListPeers(topic))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
missingPeerCountForTopic := max(0, threshold-peerCountForTopic)
|
||||
|
||||
return peerCountForTopic, missingPeerCountForTopic
|
||||
return missingPeerCountForTopic
|
||||
}
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// Exit early if we have enough peers.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"targetPeerCount": threshold,
|
||||
})
|
||||
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - start")
|
||||
|
||||
lastLogTime := time.Now()
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
// If the context is done, we can exit the loop. This is the unhappy path.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf(
|
||||
"unable to find requisite number of peers for topic %s - only %d out of %d peers available after searching",
|
||||
topic, peerCountForTopic, threshold,
|
||||
)
|
||||
if ctx.Err() != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Search for new peers in the network.
|
||||
@@ -225,20 +210,14 @@ func (s *Service) FindPeersWithSubnet(
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// If we have enough peers, we can exit the loop. This is the happy path.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - continue")
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("currentPeerCount", threshold).Debug("Searching for new peers for a subnet - success")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +65,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
}
|
||||
m.peers.Add(createENR(), id0, ma0, network.DirInbound)
|
||||
m.peers.SetConnectionState(id0, peers.Connected)
|
||||
m.peers.SetChainState(id0, &pb.Status{FinalizedEpoch: 10})
|
||||
m.peers.SetChainState(id0, &pb.StatusV2{FinalizedEpoch: 10})
|
||||
id1, err := peer.Decode(MockRawPeerId1)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Cannot decode")
|
||||
@@ -76,7 +76,7 @@ func (m *MockPeersProvider) Peers() *peers.Status {
|
||||
}
|
||||
m.peers.Add(createENR(), id1, ma1, network.DirOutbound)
|
||||
m.peers.SetConnectionState(id1, peers.Connected)
|
||||
m.peers.SetChainState(id1, &pb.Status{FinalizedEpoch: 11})
|
||||
m.peers.SetChainState(id1, &pb.StatusV2{FinalizedEpoch: 11})
|
||||
}
|
||||
return m.peers
|
||||
}
|
||||
|
||||
@@ -9,10 +9,13 @@ var (
|
||||
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
|
||||
ErrGeneric = errors.New("internal service error")
|
||||
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
|
||||
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrMaxDataColumnReqExceeded = errors.New("requested more than MAX_REQUEST_DATA_COLUMN_SIDECARS")
|
||||
|
||||
ErrResourceUnavailable = errors.New("resource requested unavailable")
|
||||
)
|
||||
|
||||
@@ -206,8 +206,8 @@ func (s BlobSidecarsByRootReq) Swap(i, j int) {
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s BlobSidecarsByRootReq) Len() int {
|
||||
return len(s)
|
||||
func (s *BlobSidecarsByRootReq) Len() int {
|
||||
return len(*s)
|
||||
}
|
||||
|
||||
// ====================================
|
||||
|
||||
@@ -35,6 +35,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -219,9 +220,10 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
resp := &structs.GetBeaconStateV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, version.String(version.Fulu), resp.Version)
|
||||
st := &structs.BeaconStateElectra{}
|
||||
st := &structs.BeaconStateFulu{}
|
||||
require.NoError(t, json.Unmarshal(resp.Data, st))
|
||||
assert.Equal(t, "123", st.Slot)
|
||||
assert.Equal(t, int(params.BeaconConfig().MinSeedLookahead+1)*int(params.BeaconConfig().SlotsPerEpoch), len(st.ProposerLookahead))
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
parentRoot := [32]byte{'a'}
|
||||
|
||||
@@ -10,11 +10,13 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -36,6 +38,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
@@ -47,12 +50,16 @@ go_test(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,12 +3,15 @@ package lookup
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -49,6 +52,7 @@ type BeaconDbBlocker struct {
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// Block returns the beacon block for a given identifier. The identifier can be one of:
|
||||
@@ -129,6 +133,137 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// blobsFromStoredBlobs retrieves blobs corresponding to `indices` and `root` from the store.
|
||||
// This function expects blobs to be stored directly (aka. no data columns).
|
||||
func (p *BeaconDbBlocker) blobsFromStoredBlobs(indices []int, root []byte, commitments [][]byte) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
sum := p.BlobStorage.Summary(bytesutil.ToBytes32(root))
|
||||
|
||||
if len(indices) == 0 {
|
||||
for index := range commitments {
|
||||
if sum.HasIndex(uint64(index)) {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, index := range indices {
|
||||
if uint64(index) >= sum.MaxBlobsForEpoch() {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", index, sum.MaxBlobsForEpoch()),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if !sum.HasIndex(uint64(index)) {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d not found", index),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
blobs := make([]*blocks.VerifiedROBlob, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), uint64(index))
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
blobs = append(blobs, &vblob)
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
// blobsFromStoredDataColumns retrieves data columns from the store, reconstruct the whole matrix if needed, convert the matrix to blobs,
|
||||
// and then returns blobs corresponding to `indices` and `root` from the store,
|
||||
// This function expects data columns to be stored (aka. no blobs).
|
||||
// If not enough data columns are available to extract blobs from them (either directly or after reconstruction), an error is returned.
|
||||
func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indices []int, rootBytes []byte) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
root := bytesutil.ToBytes32(rootBytes)
|
||||
|
||||
// Use all indices if none are provided.
|
||||
if len(indices) == 0 {
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "could not retrieve blob commitments"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
for index := range commitments {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
}
|
||||
|
||||
// Count how many columns we have in the store.
|
||||
summary := p.DataColumnStorage.Summary(root)
|
||||
stored := summary.Stored()
|
||||
count := uint64(len(stored))
|
||||
|
||||
if count < peerdas.MinimumColumnsCountToReconstruct() {
|
||||
// There is no way to reconstruct the data columns.
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs. Please start the beacon node with the `--%s` flag to ensure this call to success, or retry later if it is already the case.", flags.SubscribeAllDataSubnets.Name),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve from the database needed data columns.
|
||||
verifiedRoDataColumnSidecars, err := p.neededDataColumnSidecars(root, stored)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "needed data column sidecars"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
verifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(block, verifiedRoDataColumnSidecars, indices)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "blobs from data columns"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedRoBlobSidecars, nil
|
||||
}
|
||||
|
||||
func (p *BeaconDbBlocker) neededDataColumnSidecars(root [fieldparams.RootLength]byte, stored map[uint64]bool) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if we have all the non-extended data columns.
|
||||
cellsPerBlob := fieldparams.CellsPerBlob
|
||||
blobIndices := make([]uint64, 0, cellsPerBlob)
|
||||
hasAllBlobColumns := true
|
||||
for i := range uint64(cellsPerBlob) {
|
||||
if !stored[i] {
|
||||
hasAllBlobColumns = false
|
||||
break
|
||||
}
|
||||
blobIndices = append(blobIndices, i)
|
||||
}
|
||||
|
||||
if hasAllBlobColumns {
|
||||
// Retrieve only the non-extended data columns.
|
||||
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, blobIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns storage get")
|
||||
}
|
||||
|
||||
return verifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// Retrieve all the data columns.
|
||||
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns storage get")
|
||||
}
|
||||
|
||||
return verifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// Blobs returns the blobs for a given block id identifier and blob indices. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
@@ -212,64 +347,55 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) (
|
||||
|
||||
root := bytesutil.ToBytes32(rootSlice)
|
||||
|
||||
b, err := p.BeaconDB.Block(ctx, root)
|
||||
roSignedBlock, err := p.BeaconDB.Block(ctx, root)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve block %#x from db", rootSlice), Reason: core.Internal}
|
||||
}
|
||||
if b == nil {
|
||||
|
||||
if roSignedBlock == nil {
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("block %#x not found in db", rootSlice), Reason: core.NotFound}
|
||||
}
|
||||
|
||||
// if block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
// If block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(roSignedBlock.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
commitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
roBlock := roSignedBlock.Block()
|
||||
|
||||
commitments, err := roBlock.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", rootSlice), Reason: core.Internal}
|
||||
}
|
||||
|
||||
// if there are no commitments return 200 w/ empty list
|
||||
if len(commitments) == 0 {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
sum := p.BlobStorage.Summary(root)
|
||||
// Get the slot of the block.
|
||||
blockSlot := roBlock.Slot()
|
||||
|
||||
if len(indices) == 0 {
|
||||
for i := range commitments {
|
||||
if sum.HasIndex(uint64(i)) {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, ix := range indices {
|
||||
if uint64(ix) >= sum.MaxBlobsForEpoch() {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", ix, sum.MaxBlobsForEpoch()),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
if !sum.HasIndex(uint64(ix)) {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d not found", ix),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Get the first peerDAS epoch.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
blobs := make([]*blocks.VerifiedROBlob, len(indices))
|
||||
for i, index := range indices {
|
||||
vblob, err := p.BlobStorage.Get(root, uint64(index))
|
||||
// Compute the first peerDAS slot.
|
||||
fuluForkSlot := primitives.Slot(math.MaxUint64)
|
||||
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
|
||||
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", rootSlice, index),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate peerDAS start slot"), Reason: core.Internal}
|
||||
}
|
||||
blobs[i] = &vblob
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
if blockSlot >= fuluForkSlot {
|
||||
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, root)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to create roBlock with root %#x", root), Reason: core.Internal}
|
||||
}
|
||||
|
||||
return p.blobsFromStoredDataColumns(roBlock, indices, rootSlice)
|
||||
}
|
||||
|
||||
return p.blobsFromStoredBlobs(indices, rootSlice, commitments)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package lookup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
@@ -9,6 +12,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
@@ -17,12 +21,16 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestGetBlock(t *testing.T) {
|
||||
@@ -158,6 +166,118 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
for i := 0; i < len(blob); i += 32 {
|
||||
fieldElementBytes := getRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+32], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
func generateRandomBlocSignedBeaconBlockkAndVerifiedRoBlobs(t *testing.T, blobCount int) (interfaces.SignedBeaconBlock, []*blocks.VerifiedROBlob) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Generate random blobs and their corresponding commitments and proofs.
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
|
||||
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
// Create a random blob.
|
||||
blob := getRandBlob(int64(blobIndex))
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
// Generate a blobKZGCommitment for the blob.
|
||||
blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
|
||||
blobKzgProofs = append(blobKzgProofs, proof)
|
||||
}
|
||||
|
||||
// Set the commitments into the block.
|
||||
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
|
||||
for _, blobKZGCommitment := range blobKzgCommitments {
|
||||
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
|
||||
}
|
||||
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
|
||||
|
||||
// Generate verified RO blobs.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
|
||||
require.NoError(t, err)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
blob := blobs[blobIndex]
|
||||
blobKZGCommitment := blobKzgCommitments[blobIndex]
|
||||
blobKzgProof := blobKzgProofs[blobIndex]
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: signedBeaconBlockHeader,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
return signedBeaconBlock, verifiedROBlobs
|
||||
}
|
||||
|
||||
func TestGetBlob(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -109,6 +109,8 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
peerInfo.MetadataV0 = metadata.MetadataObjV0()
|
||||
case metadata.MetadataObjV1() != nil:
|
||||
peerInfo.MetadataV1 = metadata.MetadataObjV1()
|
||||
case metadata.MetadataObjV2() != nil:
|
||||
peerInfo.MetadataV2 = metadata.MetadataObjV2()
|
||||
}
|
||||
}
|
||||
addresses := peerStore.Addrs(pid)
|
||||
@@ -127,7 +129,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
if err != nil {
|
||||
// In the event chain state is non existent, we
|
||||
// initialize with the zero value.
|
||||
pStatus = new(ethpb.Status)
|
||||
pStatus = new(ethpb.StatusV2)
|
||||
}
|
||||
lastUpdated, err := peers.ChainStateLastUpdated(pid)
|
||||
if err != nil {
|
||||
@@ -150,6 +152,17 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
BehaviourPenalty: float32(bPenalty),
|
||||
ValidationError: errorToString(peers.Scorers().ValidationError(pid)),
|
||||
}
|
||||
|
||||
// Convert statusV2 into status
|
||||
// TODO: Should we do it this way or the other way around?
|
||||
peerStatus := ðpb.Status{
|
||||
ForkDigest: pStatus.ForkDigest,
|
||||
FinalizedRoot: pStatus.FinalizedRoot,
|
||||
FinalizedEpoch: pStatus.FinalizedEpoch,
|
||||
HeadRoot: pStatus.HeadRoot,
|
||||
HeadSlot: pStatus.HeadSlot,
|
||||
}
|
||||
|
||||
return ðpb.DebugPeerResponse{
|
||||
ListeningAddresses: stringAddrs,
|
||||
Direction: pbDirection,
|
||||
@@ -157,7 +170,7 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
PeerId: pid.String(),
|
||||
Enr: enr,
|
||||
PeerInfo: peerInfo,
|
||||
PeerStatus: pStatus,
|
||||
PeerStatus: peerStatus,
|
||||
LastUpdated: unixTime,
|
||||
ScoreInfo: scoreInfo,
|
||||
}, nil
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# gazelle:ignore
|
||||
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
@@ -37,6 +39,7 @@ go_library(
|
||||
"//api/client/builder:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
@@ -47,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -180,7 +184,6 @@ common_deps = [
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
]
|
||||
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
timeout = "moderate",
|
||||
|
||||
@@ -8,13 +8,18 @@ import (
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// constructGenericBeaconBlock constructs a `GenericBeaconBlock` based on the block version and other parameters.
|
||||
func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock, blobsBundle *enginev1.BlobsBundle, winningBid primitives.Wei) (*ethpb.GenericBeaconBlock, error) {
|
||||
func (vs *Server) constructGenericBeaconBlock(
|
||||
sBlk interfaces.SignedBeaconBlock,
|
||||
blobsBundler enginev1.BlobsBundler,
|
||||
winningBid primitives.Wei,
|
||||
) (*ethpb.GenericBeaconBlock, error) {
|
||||
if sBlk == nil || sBlk.Block() == nil {
|
||||
return nil, fmt.Errorf("block cannot be nil")
|
||||
return nil, errors.New("block cannot be nil")
|
||||
}
|
||||
|
||||
blockProto, err := sBlk.Block().Proto()
|
||||
@@ -34,12 +39,21 @@ func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock,
|
||||
return vs.constructBellatrixBlock(blockProto, isBlinded, bidStr), nil
|
||||
case version.Capella:
|
||||
return vs.constructCapellaBlock(blockProto, isBlinded, bidStr), nil
|
||||
case version.Deneb:
|
||||
return vs.constructDenebBlock(blockProto, isBlinded, bidStr, blobsBundle), nil
|
||||
case version.Electra:
|
||||
return vs.constructElectraBlock(blockProto, isBlinded, bidStr, blobsBundle), nil
|
||||
case version.Deneb, version.Electra:
|
||||
bundle, ok := blobsBundler.(*enginev1.BlobsBundle)
|
||||
if blobsBundler != nil && !ok {
|
||||
return nil, fmt.Errorf("expected *BlobsBundler, got %T", blobsBundler)
|
||||
}
|
||||
if sBlk.Version() == version.Deneb {
|
||||
return vs.constructDenebBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
}
|
||||
return vs.constructElectraBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
case version.Fulu:
|
||||
return vs.constructFuluBlock(blockProto, isBlinded, bidStr, blobsBundle), nil
|
||||
bundle, ok := blobsBundler.(*enginev1.BlobsBundleV2)
|
||||
if blobsBundler != nil && !ok {
|
||||
return nil, fmt.Errorf("expected *BlobsBundleV2, got %T", blobsBundler)
|
||||
}
|
||||
return vs.constructFuluBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown block version: %d", sBlk.Version())
|
||||
}
|
||||
@@ -92,7 +106,7 @@ func (vs *Server) constructElectraBlock(blockProto proto.Message, isBlinded bool
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Electra{Electra: electraContents}, IsBlinded: false, PayloadValue: payloadValue}
|
||||
}
|
||||
|
||||
func (vs *Server) constructFuluBlock(blockProto proto.Message, isBlinded bool, payloadValue string, bundle *enginev1.BlobsBundle) *ethpb.GenericBeaconBlock {
|
||||
func (vs *Server) constructFuluBlock(blockProto proto.Message, isBlinded bool, payloadValue string, bundle *enginev1.BlobsBundleV2) *ethpb.GenericBeaconBlock {
|
||||
if isBlinded {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedFulu{BlindedFulu: blockProto.(*ethpb.BlindedBeaconBlockFulu)}, IsBlinded: true, PayloadValue: payloadValue}
|
||||
}
|
||||
|
||||
@@ -29,12 +29,19 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r1, err := eb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
result, err := vs.constructGenericBeaconBlock(b, nil, primitives.ZeroWei())
|
||||
bundle := &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{{1, 2, 3}},
|
||||
Proofs: [][]byte{{4, 5, 6}},
|
||||
Blobs: [][]byte{{7, 8, 9}},
|
||||
}
|
||||
result, err := vs.constructGenericBeaconBlock(b, bundle, primitives.ZeroWei())
|
||||
require.NoError(t, err)
|
||||
r2, err := result.GetFulu().Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, r1, r2)
|
||||
require.Equal(t, result.IsBlinded, false)
|
||||
require.DeepEqual(t, bundle.Blobs, result.GetFulu().GetBlobs())
|
||||
require.DeepEqual(t, bundle.Proofs, result.GetFulu().GetKzgProofs())
|
||||
})
|
||||
|
||||
// Test for Electra version
|
||||
|
||||
@@ -15,9 +15,13 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -58,28 +62,31 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert slot to time")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
}).Info("Begin building block")
|
||||
|
||||
log := log.WithField("slot", req.Slot)
|
||||
log.WithField("sinceSlotStartTime", time.Since(t)).Info("Begin building block")
|
||||
|
||||
// A syncing validator should not produce a block.
|
||||
if vs.SyncChecker.Syncing() {
|
||||
log.Error("Fail to build block: node is syncing")
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
log.WithError(err).Error("Fail to build block: node is optimistic")
|
||||
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
head, parentRoot, err := vs.getParentState(ctx, req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get parent state")
|
||||
return nil, err
|
||||
}
|
||||
sBlk, err := getEmptyBlock(req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get empty block")
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
|
||||
}
|
||||
// Set slot, graffiti, randao reveal, and parent root.
|
||||
@@ -91,6 +98,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
// Set proposer index.
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, head)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not calculate proposer index")
|
||||
return nil, fmt.Errorf("could not calculate proposer index %w", err)
|
||||
}
|
||||
sBlk.SetProposerIndex(idx)
|
||||
@@ -101,7 +109,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
|
||||
resp, err := vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor)
|
||||
log := log.WithFields(logrus.Fields{
|
||||
log = log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
"validator": sBlk.Block().ProposerIndex(),
|
||||
@@ -232,7 +240,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
}()
|
||||
|
||||
winningBid := primitives.ZeroWei()
|
||||
var bundle *enginev1.BlobsBundle
|
||||
var bundle enginev1.BlobsBundler
|
||||
if sBlk.Version() >= version.Bellatrix {
|
||||
local, err := vs.getLocalPayload(ctx, sBlk.Block(), head)
|
||||
if err != nil {
|
||||
@@ -274,7 +282,13 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// ProposeBeaconBlock handles the proposal of beacon blocks.
|
||||
// TODO: Add tests
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -286,12 +300,12 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
|
||||
}
|
||||
isPeerDASEnabled := coreTime.PeerDASIsActive(block.Block().Slot())
|
||||
|
||||
var sidecars []*ethpb.BlobSidecar
|
||||
if block.IsBlinded() {
|
||||
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
block, blobSidecars, dataColumnSideCars, err = vs.handleBlindedBlock(ctx, block, isPeerDASEnabled)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
|
||||
blobSidecars, dataColumnSideCars, err = vs.handleUnblindedBlock(block, req, isPeerDASEnabled)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
@@ -302,9 +316,10 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
|
||||
}
|
||||
|
||||
slot := block.Block().Slot()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -315,8 +330,14 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
if isPeerDASEnabled {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root, slot); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive data columns: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@@ -328,46 +349,75 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock, isPeerDASEnabled bool) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
return nil, nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
}
|
||||
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return nil, nil, errors.New("unconfigured block builder")
|
||||
return nil, nil, nil, errors.New("unconfigured block builder")
|
||||
}
|
||||
|
||||
copiedBlock, err := block.Copy()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, errors.Wrap(err, "block copy")
|
||||
}
|
||||
|
||||
payload, bundle, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, block)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "submit blinded block failed")
|
||||
return nil, nil, nil, errors.Wrap(err, "submit blinded block")
|
||||
}
|
||||
|
||||
if err := copiedBlock.Unblind(payload); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unblind failed")
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind")
|
||||
}
|
||||
|
||||
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
if isPeerDASEnabled {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, bundle.GetBlobs(), bundle.GetProofs())
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return copiedBlock, nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
|
||||
}
|
||||
|
||||
return copiedBlock, sidecars, nil
|
||||
return copiedBlock, blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
isPeerDASEnabled bool,
|
||||
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
|
||||
if isPeerDASEnabled {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -383,7 +433,7 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
}
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
|
||||
eg, eCtx := errgroup.WithContext(ctx)
|
||||
for i, sc := range sidecars {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
@@ -412,6 +462,69 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
sidecars []*ethpb.DataColumnSidecar,
|
||||
root [fieldparams.RootLength]byte,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
dataColumnsWithholdCount := features.Get().DataColumnsWithholdCount
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, sd := range sidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sd, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
sidecar := sd
|
||||
eg.Go(func() error {
|
||||
if sidecar.Index < dataColumnsWithholdCount {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"index": sidecar.Index,
|
||||
}).Warning("Withholding data column")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumn(root, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
|
||||
@@ -54,7 +54,7 @@ const blockBuilderTimeout = 1 * time.Second
|
||||
const gasLimitAdjustmentFactor = 1024
|
||||
|
||||
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
|
||||
func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, local *blocks.GetPayloadResponse, bid builder.Bid, builderBoostFactor primitives.Gwei) (primitives.Wei, *enginev1.BlobsBundle, error) {
|
||||
func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, local *blocks.GetPayloadResponse, bid builder.Bid, builderBoostFactor primitives.Gwei) (primitives.Wei, enginev1.BlobsBundler, error) {
|
||||
_, span := trace.StartSpan(ctx, "ProposerServer.setExecutionData")
|
||||
defer span.End()
|
||||
|
||||
@@ -69,13 +69,13 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
|
||||
// Use local payload if builder payload is nil.
|
||||
if bid == nil {
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
builderPayload, err := bid.Header()
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to retrieve header from BuilderBid")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -84,7 +84,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Warn("Proposer: failed to match withdrawals root")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
// Compare payload values between local and builder. Default to the local value if it is higher.
|
||||
@@ -97,7 +97,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
"minBuilderBid": minBid,
|
||||
"builderGweiValue": builderValueGwei,
|
||||
}).Warn("Proposer: using local execution payload because min bid not attained")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
// Use local block if min difference is not attained
|
||||
@@ -108,7 +108,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
"minBidDiff": minDiff,
|
||||
"builderGweiValue": builderValueGwei,
|
||||
}).Warn("Proposer: using local execution payload because min difference with local value was not attained")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
// Use builder payload if the following in true:
|
||||
@@ -133,7 +133,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
bidDeneb, ok := bid.(builder.BidDeneb)
|
||||
if !ok {
|
||||
log.Warnf("bid type %T does not implement builder.BidDeneb", bid)
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
builderKzgCommitments = bidDeneb.BlobKzgCommitments()
|
||||
}
|
||||
@@ -144,14 +144,14 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
bidElectra, ok := bid.(builder.BidElectra)
|
||||
if !ok {
|
||||
log.Warnf("bid type %T does not implement builder.BidElectra", bid)
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
executionRequests = bidElectra.ExecutionRequests()
|
||||
}
|
||||
}
|
||||
if err := setBuilderExecution(blk, builderPayload, builderKzgCommitments, executionRequests); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
return bid.Value(), nil, nil
|
||||
}
|
||||
@@ -171,11 +171,11 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
trace.Int64Attribute("builderGweiValue", int64(builderValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
|
||||
trace.Int64Attribute("builderBoostFactor", int64(builderBoostFactor)), // lint:ignore uintcast -- This is OK for tracing.
|
||||
)
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
default: // Bellatrix case.
|
||||
if err := setBuilderExecution(blk, builderPayload, nil, nil); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
return bid.Value(), nil, nil
|
||||
}
|
||||
@@ -375,8 +375,8 @@ func matchingWithdrawalsRoot(local, builder interfaces.ExecutionData) (bool, err
|
||||
// It delegates to setExecution for the actual work.
|
||||
func setLocalExecution(blk interfaces.SignedBeaconBlock, local *blocks.GetPayloadResponse) error {
|
||||
var kzgCommitments [][]byte
|
||||
if local.BlobsBundle != nil {
|
||||
kzgCommitments = local.BlobsBundle.KzgCommitments
|
||||
if local.BlobsBundler != nil {
|
||||
kzgCommitments = local.BlobsBundler.GetKzgCommitments()
|
||||
}
|
||||
if local.ExecutionRequests != nil {
|
||||
if err := blk.SetExecutionRequests(local.ExecutionRequests); err != nil {
|
||||
|
||||
@@ -520,7 +520,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
PayloadIDBytes: id,
|
||||
GetPayloadResponse: &blocks.GetPayloadResponse{
|
||||
ExecutionData: ed,
|
||||
BlobsBundle: blobsBundle,
|
||||
BlobsBundler: blobsBundle,
|
||||
Bid: primitives.ZeroWei(),
|
||||
},
|
||||
}
|
||||
@@ -528,7 +528,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
res, err := vs.getLocalPayload(ctx, blk.Block(), capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), res.ExecutionData.BlockNumber())
|
||||
require.DeepEqual(t, res.BlobsBundle, blobsBundle)
|
||||
require.DeepEqual(t, res.BlobsBundler, blobsBundle)
|
||||
})
|
||||
t.Run("Can get builder payload and blobs in Deneb", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -529,7 +529,7 @@ func TestServer_GetBeaconBlock_Deneb(t *testing.T) {
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
GetPayloadResponse: &blocks.GetPayloadResponse{
|
||||
ExecutionData: ed,
|
||||
BlobsBundle: bundle,
|
||||
BlobsBundler: bundle,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ type Server struct {
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
|
||||
@@ -89,6 +89,7 @@ type Config struct {
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
@@ -120,6 +121,7 @@ type Config struct {
|
||||
Router *http.ServeMux
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
LCStore *lightClient.Store
|
||||
@@ -196,6 +198,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlobStorage: s.cfg.BlobStorage,
|
||||
DataColumnStorage: s.cfg.DataColumnStorage,
|
||||
}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
|
||||
coreService := &core.Service{
|
||||
@@ -236,6 +239,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
P2P: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
BlobReceiver: s.cfg.BlobReceiver,
|
||||
DataColumnReceiver: s.cfg.DataColumnReceiver,
|
||||
MockEth1Votes: s.cfg.MockEth1Votes,
|
||||
Eth1BlockFetcher: s.cfg.ExecutionChainService,
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
|
||||
@@ -61,6 +61,7 @@ type ReadOnlyBeaconState interface {
|
||||
ReadOnlySyncCommittee
|
||||
ReadOnlyDeposits
|
||||
ReadOnlyConsolidations
|
||||
ReadOnlyProposerLookahead
|
||||
ToProtoUnsafe() interface{}
|
||||
ToProto() interface{}
|
||||
GenesisTime() uint64
|
||||
@@ -95,6 +96,7 @@ type WriteOnlyBeaconState interface {
|
||||
WriteOnlyConsolidations
|
||||
WriteOnlyWithdrawals
|
||||
WriteOnlyDeposits
|
||||
WriteOnlyProposerLookahead
|
||||
SetGenesisTime(val uint64) error
|
||||
SetGenesisValidatorsRoot(val []byte) error
|
||||
SetSlot(val primitives.Slot) error
|
||||
@@ -239,6 +241,10 @@ type ReadOnlyConsolidations interface {
|
||||
NumPendingConsolidations() (uint64, error)
|
||||
}
|
||||
|
||||
type ReadOnlyProposerLookahead interface {
|
||||
ProposerLookahead() ([]primitives.ValidatorIndex, error)
|
||||
}
|
||||
|
||||
// WriteOnlyBlockRoots defines a struct which only has write access to block roots methods.
|
||||
type WriteOnlyBlockRoots interface {
|
||||
SetBlockRoots(val [][]byte) error
|
||||
@@ -340,3 +346,7 @@ type WriteOnlyDeposits interface {
|
||||
SetPendingDeposits(val []*ethpb.PendingDeposit) error
|
||||
SetDepositBalanceToConsume(primitives.Gwei) error
|
||||
}
|
||||
|
||||
type WriteOnlyProposerLookahead interface {
|
||||
SetProposerLookahead([]primitives.ValidatorIndex) error
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"getters_misc.go",
|
||||
"getters_participation.go",
|
||||
"getters_payload_header.go",
|
||||
"getters_proposer_lookahead.go",
|
||||
"getters_randao.go",
|
||||
"getters_state.go",
|
||||
"getters_sync_committee.go",
|
||||
@@ -37,6 +38,7 @@ go_library(
|
||||
"setters_misc.go",
|
||||
"setters_participation.go",
|
||||
"setters_payload_header.go",
|
||||
"setters_proposer_lookahead.go",
|
||||
"setters_randao.go",
|
||||
"setters_state.go",
|
||||
"setters_sync_committee.go",
|
||||
@@ -97,6 +99,7 @@ go_test(
|
||||
"getters_deposits_test.go",
|
||||
"getters_exit_test.go",
|
||||
"getters_participation_test.go",
|
||||
"getters_setters_lookahead_test.go",
|
||||
"getters_test.go",
|
||||
"getters_validator_test.go",
|
||||
"getters_withdrawal_test.go",
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Note: Whenever only the name of a file is provided, it's assumed to be in the `/beacon-chain/state/state-native` package.
|
||||
|
||||
- Add a `BeaconState[Version]FieldCount` configuration item to `/config/params/config.go` and set it in `/config/params/mainnet_config.go`.
|
||||
- Add the field to the `BeaconState` struct in `beacon_state_mainnet.go` and `beacon_state_minimal.go`. Update the marshaling code too.
|
||||
- Add the field to the `BeaconState` struct in `beacon_state.go`. Update the marshaling structs in the same file too.
|
||||
- Add the field's metadata to `/beacon-chain/state/state-native/types/types.go`.
|
||||
- Add a getter and a setter for the field, either to existing `getter_XXX.go`/`setter_XXX.go` files or create new ones if the field doesn't fit anywhere.
|
||||
Add the new getter and setter to `/beacon-chain/state/interfaces.go`.
|
||||
@@ -19,6 +19,6 @@ between states.
|
||||
- Add the following functions: `InitializeFromProto[Version]()`, `InitializeFromProtoUnsafe[Version]()`.
|
||||
- Update the following functions: `Copy()`, `initializeMerkleLayers()`, `RecordStateMetrics()` (applies only to multi-value slice fields), `rootSelector()`,
|
||||
`finalizerCleanup()` (applies only to multi-value slice fields).
|
||||
- If the field is a slice, add it to the field map in `types.go`.
|
||||
- If the field is a slice, add it to the field map in `types.go`. This only applies to large slices that need to be rehashed only in part. In particular, this mostly applies for arrays of objects, and not for arrays of basic SSZ types as these are not hashed by taking the root of each element.
|
||||
- If the field is a slice, update the `fieldConverters()` function in `/beacon-chain/state/fieldtrie/field_trie_helpers.go`. The exact implementation will vary
|
||||
depending on a few factors (is the field similar to an existing one, is it a multi-value slice etc.)
|
||||
depending on a few factors (is the field similar to an existing one, is it a multi-value slice etc). This applies only for the slices as mentioned in the previous comment.
|
||||
|
||||
@@ -70,6 +70,7 @@ type BeaconState struct {
|
||||
pendingDeposits []*ethpb.PendingDeposit // pending_deposits: List[PendingDeposit, PENDING_DEPOSITS_LIMIT]
|
||||
pendingPartialWithdrawals []*ethpb.PendingPartialWithdrawal // pending_partial_withdrawals: List[PartialWithdrawal, PENDING_PARTIAL_WITHDRAWALS_LIMIT]
|
||||
pendingConsolidations []*ethpb.PendingConsolidation // pending_consolidations: List[PendingConsolidation, PENDING_CONSOLIDATIONS_LIMIT]
|
||||
proposerLookahead []primitives.ValidatorIndex // proposer_look_ahead: List[uint64, (MIN_LOOKAHEAD + 1)*SLOTS_PER_EPOCH]
|
||||
|
||||
id uint64
|
||||
lock sync.RWMutex
|
||||
@@ -125,6 +126,7 @@ type beaconStateMarshalable struct {
|
||||
PendingDeposits []*ethpb.PendingDeposit `json:"pending_deposits" yaml:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*ethpb.PendingPartialWithdrawal `json:"pending_partial_withdrawals" yaml:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*ethpb.PendingConsolidation `json:"pending_consolidations" yaml:"pending_consolidations"`
|
||||
ProposerLookahead []primitives.ValidatorIndex `json:"proposer_look_ahead" yaml:"proposer_look_ahead"`
|
||||
}
|
||||
|
||||
func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
@@ -194,6 +196,7 @@ func (b *BeaconState) MarshalJSON() ([]byte, error) {
|
||||
PendingDeposits: b.pendingDeposits,
|
||||
PendingPartialWithdrawals: b.pendingPartialWithdrawals,
|
||||
PendingConsolidations: b.pendingConsolidations,
|
||||
ProposerLookahead: b.proposerLookahead,
|
||||
}
|
||||
return json.Marshal(marshalable)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
)
|
||||
|
||||
// ProposerLookahead is a non-mutating call to the beacon state which returns a slice of
|
||||
// validator indices that hold the proposers in the next few slots.
|
||||
func (b *BeaconState) ProposerLookahead() ([]primitives.ValidatorIndex, error) {
|
||||
if b.version < version.Fulu {
|
||||
return nil, errNotSupported("ProposerLookahead", b.version)
|
||||
}
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
return slices.Clone(b.proposerLookahead), nil
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
package state_native_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestProposerLookahead(t *testing.T) {
|
||||
t.Run("Fulu expected values", func(t *testing.T) {
|
||||
lookahead := make([]uint64, int(params.BeaconConfig().MinSeedLookahead+1)*int(params.BeaconConfig().SlotsPerEpoch))
|
||||
want := make([]primitives.ValidatorIndex, int(params.BeaconConfig().MinSeedLookahead+1)*int(params.BeaconConfig().SlotsPerEpoch))
|
||||
st, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateFulu{
|
||||
ProposerLookahead: lookahead,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
got, err := st.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(want), len(got))
|
||||
for i, w := range want {
|
||||
require.Equal(t, w, got[i], "index %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Fulu error on invalid size", func(t *testing.T) {
|
||||
lookahead := make([]primitives.ValidatorIndex, int(params.BeaconConfig().MinSeedLookahead+1)*int(params.BeaconConfig().SlotsPerEpoch)+1)
|
||||
st, err := state_native.InitializeFromProtoFulu(ðpb.BeaconStateFulu{})
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "invalid size for proposer lookahead", st.SetProposerLookahead(lookahead))
|
||||
})
|
||||
|
||||
t.Run("earlier than electra returns error", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{})
|
||||
require.NoError(t, err)
|
||||
_, err = st.ProposerLookahead()
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
lookahead := make([]primitives.ValidatorIndex, int(params.BeaconConfig().MinSeedLookahead+1)*int(params.BeaconConfig().SlotsPerEpoch))
|
||||
require.ErrorContains(t, "is not supported", st.SetProposerLookahead(lookahead))
|
||||
})
|
||||
}
|
||||
@@ -182,7 +182,7 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
}
|
||||
case version.Electra, version.Fulu:
|
||||
case version.Electra:
|
||||
return ðpb.BeaconStateElectra{
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: gvrCopy[:],
|
||||
@@ -222,6 +222,51 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
|
||||
PendingPartialWithdrawals: b.pendingPartialWithdrawals,
|
||||
PendingConsolidations: b.pendingConsolidations,
|
||||
}
|
||||
case version.Fulu:
|
||||
lookahead := make([]uint64, len(b.proposerLookahead))
|
||||
for i, v := range b.proposerLookahead {
|
||||
lookahead[i] = uint64(v)
|
||||
}
|
||||
return ðpb.BeaconStateFulu{
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: gvrCopy[:],
|
||||
Slot: b.slot,
|
||||
Fork: b.fork,
|
||||
LatestBlockHeader: b.latestBlockHeader,
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: b.historicalRoots.Slice(),
|
||||
Eth1Data: b.eth1Data,
|
||||
Eth1DataVotes: b.eth1DataVotes,
|
||||
Eth1DepositIndex: b.eth1DepositIndex,
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: b.slashings,
|
||||
PreviousEpochParticipation: b.previousEpochParticipation,
|
||||
CurrentEpochParticipation: b.currentEpochParticipation,
|
||||
JustificationBits: b.justificationBits,
|
||||
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
|
||||
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
|
||||
FinalizedCheckpoint: b.finalizedCheckpoint,
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: b.currentSyncCommittee,
|
||||
NextSyncCommittee: b.nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderDeneb,
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
DepositRequestsStartIndex: b.depositRequestsStartIndex,
|
||||
DepositBalanceToConsume: b.depositBalanceToConsume,
|
||||
ExitBalanceToConsume: b.exitBalanceToConsume,
|
||||
EarliestExitEpoch: b.earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: b.consolidationBalanceToConsume,
|
||||
EarliestConsolidationEpoch: b.earliestConsolidationEpoch,
|
||||
PendingDeposits: b.pendingDeposits,
|
||||
PendingPartialWithdrawals: b.pendingPartialWithdrawals,
|
||||
PendingConsolidations: b.pendingConsolidations,
|
||||
ProposerLookahead: lookahead,
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -388,7 +433,7 @@ func (b *BeaconState) ToProto() interface{} {
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummariesVal(),
|
||||
}
|
||||
case version.Electra, version.Fulu:
|
||||
case version.Electra:
|
||||
return ðpb.BeaconStateElectra{
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: gvrCopy[:],
|
||||
@@ -428,6 +473,51 @@ func (b *BeaconState) ToProto() interface{} {
|
||||
PendingPartialWithdrawals: b.pendingPartialWithdrawalsVal(),
|
||||
PendingConsolidations: b.pendingConsolidationsVal(),
|
||||
}
|
||||
case version.Fulu:
|
||||
lookahead := make([]uint64, len(b.proposerLookahead))
|
||||
for i, v := range b.proposerLookahead {
|
||||
lookahead[i] = uint64(v)
|
||||
}
|
||||
return ðpb.BeaconStateFulu{
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: gvrCopy[:],
|
||||
Slot: b.slot,
|
||||
Fork: b.forkVal(),
|
||||
LatestBlockHeader: b.latestBlockHeaderVal(),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: b.historicalRoots.Slice(),
|
||||
Eth1Data: b.eth1DataVal(),
|
||||
Eth1DataVotes: b.eth1DataVotesVal(),
|
||||
Eth1DepositIndex: b.eth1DepositIndex,
|
||||
Validators: b.validatorsVal(),
|
||||
Balances: b.balancesVal(),
|
||||
RandaoMixes: rm,
|
||||
Slashings: b.slashingsVal(),
|
||||
PreviousEpochParticipation: b.previousEpochParticipationVal(),
|
||||
CurrentEpochParticipation: b.currentEpochParticipationVal(),
|
||||
JustificationBits: b.justificationBitsVal(),
|
||||
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpointVal(),
|
||||
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpointVal(),
|
||||
FinalizedCheckpoint: b.finalizedCheckpointVal(),
|
||||
InactivityScores: b.inactivityScoresVal(),
|
||||
CurrentSyncCommittee: b.currentSyncCommitteeVal(),
|
||||
NextSyncCommittee: b.nextSyncCommitteeVal(),
|
||||
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeaderDeneb.Copy(),
|
||||
NextWithdrawalIndex: b.nextWithdrawalIndex,
|
||||
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
|
||||
HistoricalSummaries: b.historicalSummariesVal(),
|
||||
DepositRequestsStartIndex: b.depositRequestsStartIndex,
|
||||
DepositBalanceToConsume: b.depositBalanceToConsume,
|
||||
ExitBalanceToConsume: b.exitBalanceToConsume,
|
||||
EarliestExitEpoch: b.earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: b.consolidationBalanceToConsume,
|
||||
EarliestConsolidationEpoch: b.earliestConsolidationEpoch,
|
||||
PendingDeposits: b.pendingDepositsVal(),
|
||||
PendingPartialWithdrawals: b.pendingPartialWithdrawalsVal(),
|
||||
PendingConsolidations: b.pendingConsolidationsVal(),
|
||||
ProposerLookahead: lookahead,
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
@@ -554,4 +644,12 @@ func ProtobufBeaconStateElectra(s interface{}) (*ethpb.BeaconStateElectra, error
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
var ProtobufBeaconStateFulu = ProtobufBeaconStateElectra
|
||||
// ProtobufBeaconStateFulu transforms an input into beacon state Fulu in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateFulu(s interface{}) (*ethpb.BeaconStateFulu, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateFulu)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateFulu")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
@@ -320,5 +320,13 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
fieldRoots[types.PendingConsolidations.RealPosition()] = pcRoot[:]
|
||||
}
|
||||
|
||||
if state.version >= version.Fulu {
|
||||
// Proposer lookahead root.
|
||||
proposerLookaheadRoot, err := stateutil.ProposerLookaheadRoot(state.proposerLookahead)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute proposer lookahead merkleization")
|
||||
}
|
||||
fieldRoots[types.ProposerLookahead.RealPosition()] = proposerLookaheadRoot[:]
|
||||
}
|
||||
return fieldRoots, nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
)
|
||||
|
||||
// SetProposerLookahead is a mutating call to the beacon state which sets the proposer lookahead
|
||||
func (b *BeaconState) SetProposerLookahead(lookahead []primitives.ValidatorIndex) error {
|
||||
if b.version < version.Fulu {
|
||||
return errNotSupported("SetProposerLookahead", b.version)
|
||||
}
|
||||
if len(lookahead) != int((params.BeaconConfig().MinSeedLookahead+1))*int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return errors.New("invalid size for proposer lookahead")
|
||||
}
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
b.sharedFieldReferences[types.ProposerLookahead].MinusRef()
|
||||
b.sharedFieldReferences[types.ProposerLookahead] = stateutil.NewRef(1)
|
||||
|
||||
b.proposerLookahead = lookahead
|
||||
|
||||
b.markFieldAsDirty(types.ProposerLookahead)
|
||||
return nil
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
mvslice "github.com/OffchainLabs/prysm/v6/container/multi-value-slice"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -108,7 +109,10 @@ var (
|
||||
types.PendingConsolidations,
|
||||
)
|
||||
|
||||
fuluFields = electraFields
|
||||
fuluFields = append(
|
||||
electraFields,
|
||||
types.ProposerLookahead,
|
||||
)
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -118,14 +122,14 @@ const (
|
||||
capellaSharedFieldRefCount = 13
|
||||
denebSharedFieldRefCount = 13
|
||||
electraSharedFieldRefCount = 16
|
||||
fuluSharedFieldRefCount = 16
|
||||
fuluSharedFieldRefCount = 17
|
||||
experimentalStatePhase0SharedFieldRefCount = 5
|
||||
experimentalStateAltairSharedFieldRefCount = 5
|
||||
experimentalStateBellatrixSharedFieldRefCount = 6
|
||||
experimentalStateCapellaSharedFieldRefCount = 7
|
||||
experimentalStateDenebSharedFieldRefCount = 7
|
||||
experimentalStateElectraSharedFieldRefCount = 10
|
||||
experimentalStateFuluSharedFieldRefCount = 10
|
||||
experimentalStateFuluSharedFieldRefCount = 11
|
||||
)
|
||||
|
||||
// InitializeFromProtoPhase0 the beacon state from a protobuf representation.
|
||||
@@ -159,8 +163,8 @@ func InitializeFromProtoElectra(st *ethpb.BeaconStateElectra) (state.BeaconState
|
||||
}
|
||||
|
||||
// InitializeFromProtoFulu the beacon state from a protobuf representation.
|
||||
func InitializeFromProtoFulu(st *ethpb.BeaconStateElectra) (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeFulu(proto.Clone(st).(*ethpb.BeaconStateElectra))
|
||||
func InitializeFromProtoFulu(st *ethpb.BeaconStateFulu) (state.BeaconState, error) {
|
||||
return InitializeFromProtoUnsafeFulu(proto.Clone(st).(*ethpb.BeaconStateFulu))
|
||||
}
|
||||
|
||||
// InitializeFromProtoUnsafePhase0 directly uses the beacon state protobuf fields
|
||||
@@ -842,7 +846,7 @@ func InitializeFromProtoUnsafeElectra(st *ethpb.BeaconStateElectra) (state.Beaco
|
||||
|
||||
// InitializeFromProtoUnsafeFulu directly uses the beacon state protobuf fields
|
||||
// and sets them as fields of the BeaconState type.
|
||||
func InitializeFromProtoUnsafeFulu(st *ethpb.BeaconStateElectra) (state.BeaconState, error) {
|
||||
func InitializeFromProtoUnsafeFulu(st *ethpb.BeaconStateFulu) (state.BeaconState, error) {
|
||||
if st == nil {
|
||||
return nil, errors.New("received nil state")
|
||||
}
|
||||
@@ -852,6 +856,10 @@ func InitializeFromProtoUnsafeFulu(st *ethpb.BeaconStateElectra) (state.BeaconSt
|
||||
hRoots[i] = bytesutil.ToBytes32(r)
|
||||
}
|
||||
|
||||
proposerLookahead := make([]primitives.ValidatorIndex, len(st.ProposerLookahead))
|
||||
for i, v := range st.ProposerLookahead {
|
||||
proposerLookahead[i] = primitives.ValidatorIndex(v)
|
||||
}
|
||||
fieldCount := params.BeaconConfig().BeaconStateFuluFieldCount
|
||||
b := &BeaconState{
|
||||
version: version.Fulu,
|
||||
@@ -886,6 +894,7 @@ func InitializeFromProtoUnsafeFulu(st *ethpb.BeaconStateElectra) (state.BeaconSt
|
||||
pendingDeposits: st.PendingDeposits,
|
||||
pendingPartialWithdrawals: st.PendingPartialWithdrawals,
|
||||
pendingConsolidations: st.PendingConsolidations,
|
||||
proposerLookahead: proposerLookahead,
|
||||
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
@@ -950,6 +959,7 @@ func InitializeFromProtoUnsafeFulu(st *ethpb.BeaconStateElectra) (state.BeaconSt
|
||||
b.sharedFieldReferences[types.PendingDeposits] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PendingConsolidations] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.ProposerLookahead] = stateutil.NewRef(1) // New in Fulu.
|
||||
if !features.Get().EnableExperimentalState {
|
||||
b.sharedFieldReferences[types.BlockRoots] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.StateRoots] = stateutil.NewRef(1)
|
||||
@@ -1015,6 +1025,7 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
currentEpochAttestations: b.currentEpochAttestations,
|
||||
eth1DataVotes: b.eth1DataVotes,
|
||||
slashings: b.slashings,
|
||||
proposerLookahead: b.proposerLookahead,
|
||||
|
||||
// Large arrays, increases over time.
|
||||
balances: b.balances,
|
||||
@@ -1441,6 +1452,8 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
return stateutil.PendingPartialWithdrawalsRoot(b.pendingPartialWithdrawals)
|
||||
case types.PendingConsolidations:
|
||||
return stateutil.PendingConsolidationsRoot(b.pendingConsolidations)
|
||||
case types.ProposerLookahead:
|
||||
return stateutil.ProposerLookaheadRoot(b.proposerLookahead)
|
||||
}
|
||||
return [32]byte{}, errors.New("invalid field index provided")
|
||||
}
|
||||
|
||||
@@ -112,6 +112,8 @@ func (f FieldIndex) String() string {
|
||||
return "pendingPartialWithdrawals"
|
||||
case PendingConsolidations:
|
||||
return "pendingConsolidations"
|
||||
case ProposerLookahead:
|
||||
return "proposerLookahead"
|
||||
default:
|
||||
return fmt.Sprintf("unknown field index number: %d", f)
|
||||
}
|
||||
@@ -195,6 +197,8 @@ func (f FieldIndex) RealPosition() int {
|
||||
return 35
|
||||
case PendingConsolidations:
|
||||
return 36
|
||||
case ProposerLookahead:
|
||||
return 37
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
@@ -259,6 +263,7 @@ const (
|
||||
PendingDeposits // Electra: EIP-7251
|
||||
PendingPartialWithdrawals // Electra: EIP-7251
|
||||
PendingConsolidations // Electra: EIP-7251
|
||||
ProposerLookahead // Fulu: EIP-7917
|
||||
)
|
||||
|
||||
// Enumerator keeps track of the number of states created since the node's start.
|
||||
|
||||
@@ -259,7 +259,7 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
|
||||
defer span.End()
|
||||
|
||||
if s.isFinalizedRoot(blockRoot) {
|
||||
finalizedState := s.finalizedState()
|
||||
finalizedState := s.FinalizedState()
|
||||
if finalizedState != nil {
|
||||
return finalizedState, nil
|
||||
}
|
||||
@@ -297,7 +297,7 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
|
||||
|
||||
// Does the state exist in finalized info cache.
|
||||
if s.isFinalizedRoot(parentRoot) {
|
||||
return s.finalizedState(), nil
|
||||
return s.FinalizedState(), nil
|
||||
}
|
||||
|
||||
// Does the state exist in epoch boundary cache.
|
||||
|
||||
@@ -196,7 +196,7 @@ func (s *State) isFinalizedRoot(r [32]byte) bool {
|
||||
}
|
||||
|
||||
// Returns the cached and copied finalized state.
|
||||
func (s *State) finalizedState() state.BeaconState {
|
||||
func (s *State) FinalizedState() state.BeaconState {
|
||||
s.finalizedInfo.lock.RLock()
|
||||
defer s.finalizedInfo.lock.RUnlock()
|
||||
return s.finalizedInfo.state.Copy()
|
||||
|
||||
@@ -33,5 +33,5 @@ func TestResume(t *testing.T) {
|
||||
require.DeepSSZEqual(t, beaconState.ToProtoUnsafe(), resumeState.ToProtoUnsafe())
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch, service.finalizedInfo.slot, "Did not get watned slot")
|
||||
assert.Equal(t, service.finalizedInfo.root, root, "Did not get wanted root")
|
||||
assert.NotNil(t, service.finalizedState(), "Wanted a non nil finalized state")
|
||||
assert.NotNil(t, service.FinalizedState(), "Wanted a non nil finalized state")
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"pending_consolidations_root.go",
|
||||
"pending_deposits_root.go",
|
||||
"pending_partial_withdrawals_root.go",
|
||||
"proposer_lookahead_root.go",
|
||||
"reference.go",
|
||||
"sync_committee.root.go",
|
||||
"trie_helpers.go",
|
||||
@@ -49,6 +50,7 @@ go_test(
|
||||
"benchmark_test.go",
|
||||
"field_root_test.go",
|
||||
"field_root_validator_test.go",
|
||||
"proposer_lookahead_root_test.go",
|
||||
"reference_bench_test.go",
|
||||
"state_root_test.go",
|
||||
"trie_helpers_test.go",
|
||||
|
||||
18
beacon-chain/state/stateutil/proposer_lookahead_root.go
Normal file
18
beacon-chain/state/stateutil/proposer_lookahead_root.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package stateutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
)
|
||||
|
||||
// ProposerLookaheadRoot computes the hash tree root of the proposer lookahead
|
||||
func ProposerLookaheadRoot(lookahead []primitives.ValidatorIndex) ([32]byte, error) {
|
||||
chunks := make([][32]byte, (len(lookahead)*8+31)/32)
|
||||
for i, idx := range lookahead {
|
||||
j := i / 4
|
||||
binary.LittleEndian.PutUint64(chunks[j][(i%4)*8:], uint64(idx))
|
||||
}
|
||||
return ssz.MerkleizeVector(chunks, uint64(len(chunks))), nil
|
||||
}
|
||||
17
beacon-chain/state/stateutil/proposer_lookahead_root_test.go
Normal file
17
beacon-chain/state/stateutil/proposer_lookahead_root_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package stateutil_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestProposerLookaheadRoot(t *testing.T) {
|
||||
lookahead := make([]primitives.ValidatorIndex, 64)
|
||||
root, err := stateutil.ProposerLookaheadRoot(lookahead)
|
||||
require.NoError(t, err)
|
||||
expected := [32]byte{83, 109, 152, 131, 127, 45, 209, 101, 165, 93, 94, 234, 233, 20, 133, 149, 68, 114, 213, 111, 36, 109, 242, 86, 191, 60, 174, 25, 53, 42, 18, 60}
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
@@ -7,6 +7,9 @@ go_library(
|
||||
"block_batcher.go",
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"data_columns.go",
|
||||
"data_columns_reconstruct.go",
|
||||
"data_columns_sampling.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
@@ -25,6 +28,8 @@ go_library(
|
||||
"rpc_blob_sidecars_by_range.go",
|
||||
"rpc_blob_sidecars_by_root.go",
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_range.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_light_client.go",
|
||||
"rpc_metadata.go",
|
||||
@@ -38,6 +43,7 @@ go_library(
|
||||
"subscriber_beacon_blocks.go",
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_light_client.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
@@ -55,6 +61,7 @@ go_library(
|
||||
"validate_sync_committee_message.go",
|
||||
"validate_sync_contribution_proof.go",
|
||||
"validate_voluntary_exit.go",
|
||||
"validators_custody.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/sync",
|
||||
visibility = [
|
||||
@@ -76,7 +83,9 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -130,6 +139,8 @@ go_library(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
@@ -153,13 +164,15 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"batch_verifier_test.go",
|
||||
"blobs_test.go",
|
||||
"block_batcher_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"data_columns_sampling_test.go",
|
||||
"data_columns_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
@@ -197,19 +210,23 @@ go_test(
|
||||
"validate_sync_committee_message_test.go",
|
||||
"validate_sync_contribution_proof_test.go",
|
||||
"validate_voluntary_exit_test.go",
|
||||
"validators_custody_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 4,
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -247,6 +264,7 @@ go_test(
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/ecdsa:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/equality:go_default_library",
|
||||
@@ -261,13 +279,17 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
|
||||
@@ -38,8 +38,8 @@ func (s batchState) String() string {
|
||||
return "import_complete"
|
||||
case batchEndSequence:
|
||||
return "end_sequence"
|
||||
case batchBlobSync:
|
||||
return "blob_sync"
|
||||
case batchSidecarSync:
|
||||
return "sidecar_sync"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
@@ -50,7 +50,7 @@ const (
|
||||
batchInit
|
||||
batchSequenced
|
||||
batchErrRetryable
|
||||
batchBlobSync
|
||||
batchSidecarSync
|
||||
batchImportable
|
||||
batchImportComplete
|
||||
batchEndSequence
|
||||
@@ -140,7 +140,7 @@ func (b batch) withResults(results verifiedROBlocks, bs *blobSync) batch {
|
||||
b.results = results
|
||||
b.bs = bs
|
||||
if bs.blobsNeeded() > 0 {
|
||||
return b.withState(batchBlobSync)
|
||||
return b.withState(batchSidecarSync)
|
||||
}
|
||||
return b.withState(batchImportable)
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
case b := <-p.fromWorkers:
|
||||
pid := b.busy
|
||||
busy[pid] = false
|
||||
if b.state == batchBlobSync {
|
||||
if b.state == batchSidecarSync {
|
||||
todo = append(todo, b)
|
||||
sortBatchDesc(todo)
|
||||
} else {
|
||||
|
||||
@@ -31,8 +31,8 @@ func (w *p2pWorker) run(ctx context.Context) {
|
||||
select {
|
||||
case b := <-w.todo:
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
|
||||
if b.state == batchBlobSync {
|
||||
w.done <- w.handleBlobs(ctx, b)
|
||||
if b.state == batchSidecarSync {
|
||||
w.done <- w.handleSidecars(ctx, b)
|
||||
} else {
|
||||
w.done <- w.handleBlocks(ctx, b)
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
|
||||
return b.withResults(vb, bs)
|
||||
}
|
||||
|
||||
func (w *p2pWorker) handleBlobs(ctx context.Context, b batch) batch {
|
||||
func (w *p2pWorker) handleSidecars(ctx context.Context, b batch) batch {
|
||||
b.blobPid = b.busy
|
||||
start := time.Now()
|
||||
// we don't need to use the response for anything other than metrics, because blobResponseValidation
|
||||
|
||||
@@ -181,7 +181,7 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
chain, clock := defaultMockChain(t)
|
||||
chain, clock := defaultMockChain(t, 0)
|
||||
if c.chain == nil {
|
||||
c.chain = chain
|
||||
}
|
||||
@@ -279,7 +279,7 @@ func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
func defaultMockChain(t *testing.T, currentSlot uint64) (*mock.ChainService, *startup.Clock) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
df, err := forks.Fork(de)
|
||||
require.NoError(t, err)
|
||||
@@ -290,8 +290,14 @@ func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
require.NoError(t, err)
|
||||
now := time.Now()
|
||||
genOffset := types.Slot(params.BeaconConfig().SecondsPerSlot) * cs
|
||||
genesis := now.Add(-1 * time.Second * time.Duration(int64(genOffset)))
|
||||
clock := startup.NewClock(genesis, [32]byte{})
|
||||
genesisTime := now.Add(-1 * time.Second * time.Duration(int64(genOffset)))
|
||||
|
||||
clock := startup.NewClock(genesisTime, [32]byte{}, startup.WithNower(
|
||||
func() time.Time {
|
||||
return genesisTime.Add(time.Duration(currentSlot*params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
},
|
||||
))
|
||||
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: fe},
|
||||
Fork: df,
|
||||
|
||||
@@ -78,9 +78,10 @@ func (bb *blockRangeBatcher) next(ctx context.Context, stream libp2pcore.Stream)
|
||||
if !more {
|
||||
return blockBatch{}, false
|
||||
}
|
||||
if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
|
||||
return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
|
||||
}
|
||||
// TODO: Uncomment out of devnet.
|
||||
// if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
|
||||
// return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
|
||||
// }
|
||||
|
||||
// Wait for the ticker before doing anything expensive, unless this is the first batch.
|
||||
if bb.ticker != nil && bb.current != nil {
|
||||
|
||||
924
beacon-chain/sync/data_columns.go
Normal file
924
beacon-chain/sync/data_columns.go
Normal file
@@ -0,0 +1,924 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RequestDataColumnSidecarsByRoot is an opinionated, high level function which, for each data column in `dataColumnsToFetch`:
|
||||
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
|
||||
// - Request the data column sidecars from the selected peers.
|
||||
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
|
||||
//
|
||||
// This function:
|
||||
// - returns on success when all the initially missing sidecars in `dataColumnsToFetch` are retrieved, or
|
||||
// - returns an error if all peers in `peers` are exhausted and at least one data column sidecar is still missing.
|
||||
//
|
||||
// TODO: In case at least one column is still missing after peer exhaustion,
|
||||
//
|
||||
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
|
||||
func RequestDataColumnSidecarsByRoot(
|
||||
ctx context.Context,
|
||||
dataColumnsToFetch []uint64,
|
||||
block blocks.ROBlock,
|
||||
peers []core.PeerID,
|
||||
clock *startup.Clock,
|
||||
p2p p2p.P2P,
|
||||
ctxMap ContextByteVersions,
|
||||
newColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(dataColumnsToFetch) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Assemble the peers who can provide the needed data columns.
|
||||
dataColumnsByAdmissiblePeer, _, _, err := AdmissiblePeersForDataColumns(peers, dataColumnsToFetch, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get admissible peers for data columns")
|
||||
}
|
||||
|
||||
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnsToFetch))
|
||||
remainingMissingColumns := make(map[uint64]bool, len(dataColumnsToFetch))
|
||||
for _, column := range dataColumnsToFetch {
|
||||
remainingMissingColumns[column] = true
|
||||
}
|
||||
|
||||
blockRoot := block.Root()
|
||||
|
||||
for len(dataColumnsByAdmissiblePeer) > 0 {
|
||||
peersToFetchFrom, err := SelectPeersToFetchDataColumnsFrom(sliceFromMap(remainingMissingColumns, true /*sorted*/), dataColumnsByAdmissiblePeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers to fetch data columns from")
|
||||
}
|
||||
|
||||
// Request the data columns from each peer.
|
||||
successfulColumns := make(map[uint64]bool, len(remainingMissingColumns))
|
||||
for peer, peerRequestedColumns := range peersToFetchFrom {
|
||||
log := log.WithFields(logrus.Fields{"peer": peer.String(), "blockRoot": fmt.Sprintf("%#x", blockRoot)})
|
||||
|
||||
// Build the requests for the data columns.
|
||||
byRootRequest := ð.DataColumnsByRootIdentifier{BlockRoot: blockRoot[:], Columns: peerRequestedColumns}
|
||||
|
||||
// Send the requests to the peer.
|
||||
peerSidecars, err := SendDataColumnSidecarsByRootRequest(ctx, clock, p2p, peer, ctxMap, &types.DataColumnsByRootIdentifiers{byRootRequest})
|
||||
if err != nil {
|
||||
// Remove this peer since it failed to respond correctly.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": peer.String(),
|
||||
"blockRoot": fmt.Sprintf("%#x", block.Root()),
|
||||
}).WithError(err).Debug("Failed to request data columns from peer")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if returned data columns align with the block.
|
||||
if err := peerdas.DataColumnsAlignWithBlock(block, peerSidecars); err != nil {
|
||||
// Remove this peer since it failed to respond correctly.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Align with block failed")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the received sidecars.
|
||||
verifier := newColumnsVerifier(peerSidecars, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Valid verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Sidecar inclusion proof verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Sidecar KZG proof verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
verifiedPeerSidecars, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
return nil, errors.Wrap(err, "verified data columns")
|
||||
}
|
||||
|
||||
// Mark columns as successful
|
||||
for _, sidecar := range verifiedPeerSidecars {
|
||||
successfulColumns[sidecar.Index] = true
|
||||
}
|
||||
|
||||
// Check if all requested columns were successfully returned.
|
||||
peerMissingColumns := make(map[uint64]bool)
|
||||
for _, index := range peerRequestedColumns {
|
||||
if !successfulColumns[index] {
|
||||
peerMissingColumns[index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(peerMissingColumns) > 0 {
|
||||
// Remove this peer if some requested columns were not correctly returned.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithField("missingColumns", sliceFromMap(peerMissingColumns, true /*sorted*/)).Debug("Peer did not provide all requested data columns")
|
||||
}
|
||||
|
||||
verifiedSidecars = append(verifiedSidecars, verifiedPeerSidecars...)
|
||||
}
|
||||
|
||||
// Update remaining columns for the next retry.
|
||||
for col := range successfulColumns {
|
||||
delete(remainingMissingColumns, col)
|
||||
}
|
||||
|
||||
if len(remainingMissingColumns) > 0 {
|
||||
// Some columns are still missing, retry with the remaining peers.
|
||||
continue
|
||||
}
|
||||
|
||||
return verifiedSidecars, nil
|
||||
}
|
||||
|
||||
// If we still have remaining columns after all retries, return error
|
||||
return nil, errors.Errorf("failed to retrieve all requested data columns after retries for block root=%#x, missing columns=%v", blockRoot, sliceFromMap(remainingMissingColumns, true /*sorted*/))
|
||||
}
|
||||
|
||||
// RequestMissingDataColumnsByRange is an opinionated, high level function which, for each block in `blks`:
|
||||
// - Computes all data column sidecars we should store and which are missing (according to our node ID and `groupCount`),
|
||||
// - Builds an optimized set of data column sidecars by range requests in order to never request a data column that is already stored in the DB,
|
||||
// and in order to minimize the number of total requests, while not exceeding `batchSize` sidecars per requests.
|
||||
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
|
||||
// - Request the data column sidecars from the selected peers.
|
||||
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
|
||||
//
|
||||
// This function:
|
||||
// - returns on success when all the initially missing sidecars for `blks` are retrieved, or
|
||||
// - returns an error if no progress at all is made after 5 consecutives trials.
|
||||
// (If at least one additional data column sidecar is retrieved between two trials, the counter is reset.)
|
||||
//
|
||||
// In case of success, initially missing data columns grouped by block root are returned.
|
||||
// This function expects blocks to be sorted by slot.
|
||||
//
|
||||
// TODO: In case at least one column is still missing after all allowed retries,
|
||||
//
|
||||
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
|
||||
func RequestMissingDataColumnsByRange(
|
||||
ctx context.Context,
|
||||
clock *startup.Clock,
|
||||
ctxMap ContextByteVersions,
|
||||
p2p p2p.P2P,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
groupCount uint64,
|
||||
dataColumnsStorage filesystem.DataColumnStorageSummarizer,
|
||||
blks []blocks.ROBlock,
|
||||
batchSize int,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.RODataColumn, error) {
|
||||
const maxAllowedStall = 5 // Number of trials before giving up.
|
||||
|
||||
if len(blks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the current slot.
|
||||
currentSlot := clock.CurrentSlot()
|
||||
|
||||
// Compute the minimum slot for which we should serve data columns.
|
||||
minimumSlot, err := DataColumnsRPCMinValidSlot(currentSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns RPC min valid slot")
|
||||
}
|
||||
|
||||
// Get blocks by root and compute all missing columns by root.
|
||||
blockByRoot := make(map[[fieldparams.RootLength]byte]blocks.ROBlock, len(blks))
|
||||
missingColumnsByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(blks))
|
||||
for _, blk := range blks {
|
||||
// Extract the block root and the block slot
|
||||
blockRoot, blockSlot := blk.Root(), blk.Block().Slot()
|
||||
|
||||
// Populate the block by root.
|
||||
blockByRoot[blockRoot] = blk
|
||||
|
||||
// Skip blocks that are not in the retention period.
|
||||
if blockSlot < minimumSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
missingColumns, err := MissingDataColumns(blk, p2p.NodeID(), groupCount, dataColumnsStorage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
for _, column := range missingColumns {
|
||||
if _, ok := missingColumnsByRoot[blockRoot]; !ok {
|
||||
missingColumnsByRoot[blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
missingColumnsByRoot[blockRoot][column] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if there are no missing data columns.
|
||||
if len(missingColumnsByRoot) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the number of missing data columns.
|
||||
previousMissingDataColumnsCount := itemsCount(missingColumnsByRoot)
|
||||
|
||||
// Count the number of retries for the same amount of missing data columns.
|
||||
stallCount := 0
|
||||
|
||||
// Add log fields.
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"initialMissingColumnsCount": previousMissingDataColumnsCount,
|
||||
"blockCount": len(blks),
|
||||
"firstSlot": blks[0].Block().Slot(),
|
||||
"lastSlot": blks[len(blks)-1].Block().Slot(),
|
||||
})
|
||||
|
||||
// Log the start of the process.
|
||||
start := time.Now()
|
||||
log.Debug("Requesting data column sidecars - start")
|
||||
|
||||
alignedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn, len(blks))
|
||||
for len(missingColumnsByRoot) > 0 {
|
||||
// Build requests.
|
||||
requests, err := buildDataColumnByRangeRequests(blks, missingColumnsByRoot, batchSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "build data column by range requests")
|
||||
}
|
||||
|
||||
// Requests data column sidecars from peers.
|
||||
retrievedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn)
|
||||
for _, request := range requests {
|
||||
roDataColumns, err := fetchDataColumnsFromPeers(ctx, clock, p2p, rateLimiter, ctxMap, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fetch data columns from peers")
|
||||
}
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
root := roDataColumn.BlockRoot()
|
||||
if _, ok := blockByRoot[root]; !ok {
|
||||
// It may happen if the peer which sent the data columns is on a different fork.
|
||||
continue
|
||||
}
|
||||
|
||||
retrievedDataColumnsByRoot[root] = append(retrievedDataColumnsByRoot[root], roDataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
for root, dataColumns := range retrievedDataColumnsByRoot {
|
||||
// Retrieve the block from the root.
|
||||
block, ok := blockByRoot[root]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found - this should never happen")
|
||||
}
|
||||
|
||||
// Check if the data columns align with blocks.
|
||||
if err := peerdas.DataColumnsAlignWithBlock(block, dataColumns); err != nil {
|
||||
log.WithField("root", root).WithError(err).Debug("Data columns do not align with block")
|
||||
continue
|
||||
}
|
||||
|
||||
alignedDataColumnsByRoot[root] = append(alignedDataColumnsByRoot[root], dataColumns...)
|
||||
|
||||
// Remove aligned data columns from the missing columns.
|
||||
for _, dataColumn := range dataColumns {
|
||||
delete(missingColumnsByRoot[root], dataColumn.Index)
|
||||
if len(missingColumnsByRoot[root]) == 0 {
|
||||
delete(missingColumnsByRoot, root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
missingDataColumnsCount := itemsCount(missingColumnsByRoot)
|
||||
if missingDataColumnsCount == previousMissingDataColumnsCount {
|
||||
stallCount++
|
||||
} else {
|
||||
stallCount = 0
|
||||
}
|
||||
|
||||
previousMissingDataColumnsCount = missingDataColumnsCount
|
||||
|
||||
if missingDataColumnsCount > 0 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"remainingMissingColumnsCount": missingDataColumnsCount,
|
||||
"stallCount": stallCount,
|
||||
"maxAllowedStall": maxAllowedStall,
|
||||
})
|
||||
|
||||
if stallCount >= maxAllowedStall {
|
||||
// It is very likely `bwbs` contains orphaned blocks, for which no peer has the data columns.
|
||||
// We give up and let the state machine handle the situation.
|
||||
const message = "Requesting data column sidecars - no progress, giving up"
|
||||
log.Warning(message)
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"remainingMissingColumnsCount": missingDataColumnsCount,
|
||||
"stallCount": stallCount,
|
||||
}).Debug("Requesting data column sidecars - continue")
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("duration", time.Since(start)).Debug("Requesting data column sidecars - success")
|
||||
return alignedDataColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// MissingDataColumns looks at the data columns we should store for a given block regarding `custodyGroupCount`,
|
||||
// and returns the indices of the missing ones.
|
||||
func MissingDataColumns(block blocks.ROBlock, nodeID enode.ID, custodyGroupCount uint64, dataColumnStorage filesystem.DataColumnStorageSummarizer) ([]uint64, error) {
|
||||
// Blocks before Fulu have no data columns.
|
||||
if block.Version() < version.Fulu {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the blob commitments from the block.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Nothing to build if there are no commitments.
|
||||
if len(commitments) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the expected columns.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
expectedColumns := peerInfo.CustodyColumns
|
||||
|
||||
// Get the stored columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
summary := dataColumnStorage.Summary(block.Root())
|
||||
|
||||
storedColumns := make(map[uint64]bool, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if summary.HasIndex(i) {
|
||||
storedColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the missing columns.
|
||||
missingColumns := make([]uint64, 0, len(expectedColumns))
|
||||
for column := range expectedColumns {
|
||||
if !storedColumns[column] {
|
||||
missingColumns = append(missingColumns, column)
|
||||
}
|
||||
}
|
||||
|
||||
return missingColumns, nil
|
||||
}
|
||||
|
||||
// SelectPeersToFetchDataColumnsFrom implements greedy algorithm in order to select peers to fetch data columns from.
|
||||
// https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm
|
||||
func SelectPeersToFetchDataColumnsFrom(neededDataColumns []uint64, dataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID][]uint64, error) {
|
||||
// Copy the provided needed data columns into a set that we will remove elements from.
|
||||
remainingDataColumns := make(map[uint64]bool, len(neededDataColumns))
|
||||
for _, dataColumn := range neededDataColumns {
|
||||
remainingDataColumns[dataColumn] = true
|
||||
}
|
||||
|
||||
dataColumnsFromSelectedPeers := make(map[peer.ID][]uint64)
|
||||
|
||||
// Filter `dataColumnsByPeer` to only contain needed data columns.
|
||||
neededDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(dataColumnsByPeer))
|
||||
for pid, dataColumns := range dataColumnsByPeer {
|
||||
for dataColumn := range dataColumns {
|
||||
if remainingDataColumns[dataColumn] {
|
||||
if _, ok := neededDataColumnsByPeer[pid]; !ok {
|
||||
neededDataColumnsByPeer[pid] = make(map[uint64]bool, len(neededDataColumns))
|
||||
}
|
||||
|
||||
neededDataColumnsByPeer[pid][dataColumn] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
for len(remainingDataColumns) > 0 {
|
||||
// Check if at least one peer remains. If not, it means that we don't have enough peers to fetch all needed data columns.
|
||||
if len(neededDataColumnsByPeer) == 0 {
|
||||
missingDataColumnsSortedSlice := sliceFromMap(remainingDataColumns, true /*sorted*/)
|
||||
return dataColumnsFromSelectedPeers, errors.Errorf("no peer to fetch the following data columns: %v", missingDataColumnsSortedSlice)
|
||||
}
|
||||
|
||||
// Select the peer that custody the most needed data columns (greedy selection).
|
||||
var bestPeer peer.ID
|
||||
for peer, dataColumns := range neededDataColumnsByPeer {
|
||||
if len(dataColumns) > len(neededDataColumnsByPeer[bestPeer]) {
|
||||
bestPeer = peer
|
||||
}
|
||||
}
|
||||
|
||||
dataColumnsSortedSlice := sliceFromMap(neededDataColumnsByPeer[bestPeer], true /*sorted*/)
|
||||
if uint64(len(dataColumnsSortedSlice)) > maxRequestDataColumnSidecars {
|
||||
dataColumnsSortedSlice = dataColumnsSortedSlice[:maxRequestDataColumnSidecars]
|
||||
}
|
||||
dataColumnsFromSelectedPeers[bestPeer] = dataColumnsSortedSlice
|
||||
|
||||
// Remove the selected peer from the list of peers.
|
||||
delete(neededDataColumnsByPeer, bestPeer)
|
||||
|
||||
// Remove the selected peer's data columns from the list of remaining data columns.
|
||||
for _, dataColumn := range dataColumnsSortedSlice {
|
||||
delete(remainingDataColumns, dataColumn)
|
||||
}
|
||||
|
||||
// Remove the selected peer's data columns from the list of needed data columns by peer.
|
||||
for _, dataColumn := range dataColumnsSortedSlice {
|
||||
for peer, dataColumns := range neededDataColumnsByPeer {
|
||||
delete(dataColumns, dataColumn)
|
||||
|
||||
if len(dataColumns) == 0 {
|
||||
delete(neededDataColumnsByPeer, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dataColumnsFromSelectedPeers, nil
|
||||
}
|
||||
|
||||
// AdmissiblePeersForCustodyGroup returns a map of peers that custody at least one custody group listed in `neededCustodyGroups`.
|
||||
//
|
||||
// It returns:
|
||||
// - A map, where the key of the map is the peer, the value is the custody groups of the peer.
|
||||
// - A map, where the key of the map is the custody group, the value is a list of peers that custody the group.
|
||||
// - A slice of descriptions for non admissible peers.
|
||||
// - An error if any.
|
||||
//
|
||||
// NOTE: distributeSamplesToPeer from the DataColumnSampler implements similar logic,
|
||||
// but with only one column queried in each request.
|
||||
func AdmissiblePeersForDataColumns(
|
||||
peers []peer.ID,
|
||||
neededDataColumns []uint64,
|
||||
p2p p2p.P2P,
|
||||
) (map[peer.ID]map[uint64]bool, map[uint64][]peer.ID, []string, error) {
|
||||
peerCount := len(peers)
|
||||
neededDataColumnsCount := uint64(len(neededDataColumns))
|
||||
|
||||
// Create description slice for non admissible peers.
|
||||
descriptions := make([]string, 0, peerCount)
|
||||
|
||||
// Compute custody columns for each peer.
|
||||
dataColumnsByPeer, err := custodyColumnsFromPeers(p2p, peers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "custody columns from peers")
|
||||
}
|
||||
|
||||
// Filter peers which custody at least one needed data column.
|
||||
dataColumnsByAdmissiblePeer, localDescriptions := filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns, dataColumnsByPeer)
|
||||
descriptions = append(descriptions, localDescriptions...)
|
||||
|
||||
// Compute a map from needed data columns to their peers.
|
||||
admissiblePeersByDataColumn := make(map[uint64][]peer.ID, neededDataColumnsCount)
|
||||
for peerId, peerDataColumns := range dataColumnsByAdmissiblePeer {
|
||||
for _, dataColumn := range neededDataColumns {
|
||||
if peerDataColumns[dataColumn] {
|
||||
admissiblePeersByDataColumn[dataColumn] = append(admissiblePeersByDataColumn[dataColumn], peerId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, descriptions, nil
|
||||
}
|
||||
|
||||
// custodyColumnsFromPeers computes all the custody columns indexed by peer.
|
||||
func custodyColumnsFromPeers(p2pIface p2p.P2P, peers []peer.ID) (map[peer.ID]map[uint64]bool, error) {
|
||||
peerCount := len(peers)
|
||||
|
||||
custodyColumnsByPeer := make(map[peer.ID]map[uint64]bool, peerCount)
|
||||
for _, peer := range peers {
|
||||
// Get the node ID from the peer ID.
|
||||
nodeID, err := p2p.ConvertPeerIDToNodeID(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert peer ID to node ID")
|
||||
}
|
||||
|
||||
// Get the custody group count of the peer.
|
||||
custodyGroupCount := p2pIface.CustodyGroupCountFromPeer(peer)
|
||||
|
||||
// Get peerdas info of the peer.
|
||||
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peerdas info")
|
||||
}
|
||||
|
||||
custodyColumnsByPeer[peer] = dasInfo.CustodyColumns
|
||||
}
|
||||
|
||||
return custodyColumnsByPeer, nil
|
||||
}
|
||||
|
||||
// `filterPeerWhichCustodyAtLeastOneDataColumn` filters peers which custody at least one data column
|
||||
// specified in `neededDataColumns`. It returns also a list of descriptions for non admissible peers.
|
||||
func filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns []uint64, inputDataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID]map[uint64]bool, []string) {
|
||||
// Create pretty needed data columns for logs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
outputDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(inputDataColumnsByPeer))
|
||||
descriptions := make([]string, 0)
|
||||
|
||||
outerLoop:
|
||||
for peer, peerCustodyDataColumns := range inputDataColumnsByPeer {
|
||||
for _, neededDataColumn := range neededDataColumns {
|
||||
if peerCustodyDataColumns[neededDataColumn] {
|
||||
outputDataColumnsByPeer[peer] = peerCustodyDataColumns
|
||||
|
||||
continue outerLoop
|
||||
}
|
||||
}
|
||||
|
||||
peerCustodyColumnsCount := uint64(len(peerCustodyDataColumns))
|
||||
var peerCustodyColumnsLog interface{} = "all"
|
||||
|
||||
if peerCustodyColumnsCount < numberOfColumns {
|
||||
peerCustodyColumnsLog = sliceFromMap(peerCustodyDataColumns, true /*sorted*/)
|
||||
}
|
||||
|
||||
description := fmt.Sprintf("peer %s: does not custody any needed column, custody columns: %v", peer, peerCustodyColumnsLog)
|
||||
descriptions = append(descriptions, description)
|
||||
}
|
||||
|
||||
return outputDataColumnsByPeer, descriptions
|
||||
}
|
||||
|
||||
// buildDataColumnByRangeRequests builds an optimized slices of data column by range requests:
|
||||
// 1. It will never request a data column that is already stored in the DB if there is no "hole" in `roBlocks` other than missed slots.
|
||||
// 2. It will minimize the number of requests.
|
||||
// It expects blocks to be sorted by slot.
|
||||
func buildDataColumnByRangeRequests(roBlocks []blocks.ROBlock, missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool, batchSize int) ([]*eth.DataColumnSidecarsByRangeRequest, error) {
|
||||
batchSizeSlot := primitives.Slot(batchSize)
|
||||
|
||||
// Return early if there are no blocks to process.
|
||||
if len(roBlocks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// It's safe to get the first item of the slice since we've already checked that it's not empty.
|
||||
firstROBlock, lastROBlock := roBlocks[0], roBlocks[len(roBlocks)-1]
|
||||
firstBlockSlot, lastBlockSlot := firstROBlock.Block().Slot(), lastROBlock.Block().Slot()
|
||||
firstBlockRoot := firstROBlock.Root()
|
||||
|
||||
previousMissingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[firstBlockRoot]))
|
||||
|
||||
if missing, ok := missingColumnsByRoot[firstBlockRoot]; ok {
|
||||
for key, value := range missing {
|
||||
previousMissingDataColumns[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
previousBlockSlot, previousStartBlockSlot := firstBlockSlot, firstBlockSlot
|
||||
|
||||
result := make([]*eth.DataColumnSidecarsByRangeRequest, 0, 1)
|
||||
for index := 1; index < len(roBlocks); index++ {
|
||||
roBlock := roBlocks[index]
|
||||
|
||||
// Extract the block from the RO-block.
|
||||
block := roBlock.Block()
|
||||
|
||||
// Extract the slot from the block.
|
||||
blockRoot, blockSlot := roBlock.Root(), block.Slot()
|
||||
|
||||
if blockSlot <= previousBlockSlot {
|
||||
return nil, errors.Errorf("blocks are not strictly sorted by slot. Previous block slot: %d, current block slot: %d", previousBlockSlot, blockSlot)
|
||||
}
|
||||
|
||||
// Extract KZG commitments count from the current block body
|
||||
blockKzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the count of KZG commitments.
|
||||
blockKzgCommitmentCount := len(blockKzgCommitments)
|
||||
|
||||
// Skip blocks without commitments.
|
||||
if blockKzgCommitmentCount == 0 {
|
||||
previousBlockSlot = blockSlot
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the missing data columns for the current block.
|
||||
missingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[blockRoot]))
|
||||
for key, value := range missingColumnsByRoot[blockRoot] {
|
||||
missingDataColumns[key] = value
|
||||
}
|
||||
|
||||
// Compute if the missing data columns differ.
|
||||
missingDataColumnsDiffer := uint64MapDiffer(previousMissingDataColumns, missingDataColumns)
|
||||
|
||||
// Compute if the batch size is reached.
|
||||
batchSizeReached := blockSlot-previousStartBlockSlot >= batchSizeSlot
|
||||
|
||||
if missingDataColumnsDiffer || batchSizeReached {
|
||||
// Append the slice to the result.
|
||||
request := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: previousStartBlockSlot,
|
||||
Count: uint64(blockSlot - previousStartBlockSlot),
|
||||
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
|
||||
}
|
||||
|
||||
result = append(result, request)
|
||||
|
||||
previousStartBlockSlot, previousMissingDataColumns = blockSlot, missingDataColumns
|
||||
}
|
||||
|
||||
previousBlockSlot = blockSlot
|
||||
}
|
||||
|
||||
lastRequest := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: previousStartBlockSlot,
|
||||
Count: uint64(lastBlockSlot - previousStartBlockSlot + 1),
|
||||
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
|
||||
}
|
||||
|
||||
result = append(result, lastRequest)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// fetchDataColumnsFromPeers requests data columns by range to relevant peers
|
||||
func fetchDataColumnsFromPeers(
|
||||
ctx context.Context,
|
||||
clock *startup.Clock,
|
||||
p2p p2p.P2P,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
ctxMap ContextByteVersions,
|
||||
targetRequest *eth.DataColumnSidecarsByRangeRequest,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Filter out requests with no data columns.
|
||||
if len(targetRequest.Columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get all admissible peers with the data columns they custody.
|
||||
dataColumnsByAdmissiblePeer, err := waitForPeersForDataColumns(p2p, rateLimiter, targetRequest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "wait for peers for data columns")
|
||||
}
|
||||
|
||||
// Select the peers that will be requested.
|
||||
dataColumnsToFetchByPeer, err := SelectPeersToFetchDataColumnsFrom(targetRequest.Columns, dataColumnsByAdmissiblePeer)
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
return nil, errors.Wrap(err, "select peers to fetch data columns from")
|
||||
}
|
||||
|
||||
var roDataColumns []blocks.RODataColumn
|
||||
for peer, columnsToFetch := range dataColumnsToFetchByPeer {
|
||||
// Build the request.
|
||||
request := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: targetRequest.StartSlot,
|
||||
Count: targetRequest.Count,
|
||||
Columns: columnsToFetch,
|
||||
}
|
||||
|
||||
peerRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(ctx, clock, p2p, peer, ctxMap, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "send data column sidecars by range request")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, peerRoDataColumns...)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// waitForPeersForDataColumns returns a map, where the key of the map is the peer, the value is the custody columns of the peer.
|
||||
// It uses only peers
|
||||
// - synced up to `lastSlot`, and
|
||||
// - have bandwidth to serve `blockCount` blocks.
|
||||
// It waits until at least one peer per data column is available.
|
||||
func waitForPeersForDataColumns(p2p p2p.P2P, rateLimiter *leakybucket.Collector, request *eth.DataColumnSidecarsByRangeRequest) (map[peer.ID]map[uint64]bool, error) {
|
||||
const delay = 5 * time.Second
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Build nice log fields.
|
||||
lastSlot := request.StartSlot.Add(request.Count).Sub(1)
|
||||
|
||||
var neededDataColumnsLog interface{} = "all"
|
||||
neededDataColumnCount := uint64(len(request.Columns))
|
||||
if neededDataColumnCount < numberOfColumns {
|
||||
neededDataColumnsLog = request.Columns
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"start": request.StartSlot,
|
||||
"targetSlot": lastSlot,
|
||||
"neededDataColumns": neededDataColumnsLog,
|
||||
})
|
||||
|
||||
// Keep only peers with head epoch greater than or equal to the epoch corresponding to the target slot, and
|
||||
// keep only peers with enough bandwidth.
|
||||
filteredPeers, descriptions, err := filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter eers by target slot and bandwidth")
|
||||
}
|
||||
|
||||
// Get the peers that are admissible for the data columns.
|
||||
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err := AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "admissible peers for data columns")
|
||||
}
|
||||
|
||||
descriptions = append(descriptions, moreDescriptions...)
|
||||
|
||||
// Compute data columns without any peer.
|
||||
dataColumnsWithoutPeers := computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
|
||||
|
||||
// Wait if no suitable peers are available.
|
||||
for len(dataColumnsWithoutPeers) > 0 {
|
||||
// Build a nice log fields.
|
||||
var dataColumnsWithoutPeersLog interface{} = "all"
|
||||
dataColumnsWithoutPeersCount := uint64(len(dataColumnsWithoutPeers))
|
||||
if dataColumnsWithoutPeersCount < numberOfColumns {
|
||||
dataColumnsWithoutPeersLog = sliceFromMap(dataColumnsWithoutPeers, true /*sorted*/)
|
||||
}
|
||||
|
||||
log.WithField("columnsWithoutPeer", dataColumnsWithoutPeersLog).Warning("Fetch data columns from peers - no available peers, retrying later")
|
||||
for _, description := range descriptions {
|
||||
log.Debug(description)
|
||||
}
|
||||
|
||||
for pid, peerDataColumns := range dataColumnsByAdmissiblePeer {
|
||||
var peerDataColumnsLog interface{} = "all"
|
||||
peerDataColumnsCount := uint64(len(peerDataColumns))
|
||||
if peerDataColumnsCount < numberOfColumns {
|
||||
peerDataColumnsLog = sliceFromMap(peerDataColumns, true /*sorted*/)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"peerDataColumns": peerDataColumnsLog,
|
||||
}).Debug("Peer data columns")
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
|
||||
// Filter for peers with head epoch greater than or equal to our target epoch for ByRange requests.
|
||||
filteredPeers, descriptions, err = filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter peers by target slot and bandwidth")
|
||||
}
|
||||
|
||||
// Get the peers that are admissible for the data columns.
|
||||
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err = AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "admissible peers for data columns")
|
||||
}
|
||||
|
||||
descriptions = append(descriptions, moreDescriptions...)
|
||||
|
||||
// Compute data columns without any peer.
|
||||
dataColumnsWithoutPeers = computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
|
||||
}
|
||||
|
||||
return dataColumnsByAdmissiblePeer, nil
|
||||
}
|
||||
|
||||
// Filter peers to ensure they are synced to the target slot and have sufficient bandwidth to serve the request.
|
||||
func filterPeersByTargetSlotAndBandwidth(p2p p2p.P2P, rateLimiter *leakybucket.Collector, lastSlot primitives.Slot, blockCount uint64) ([]peer.ID, []string, error) {
|
||||
peers := p2p.Peers().Connected()
|
||||
|
||||
slotPeers, descriptions, err := filterPeersByTargetSlot(p2p, peers, lastSlot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "peers with slot and data columns")
|
||||
}
|
||||
|
||||
// Filter for peers with sufficient bandwidth to serve the request.
|
||||
slotAndBandwidthPeers := hasSufficientBandwidth(rateLimiter, slotPeers, blockCount)
|
||||
|
||||
// Add debugging logs for the filtered peers.
|
||||
peerWithSufficientBandwidthMap := make(map[peer.ID]bool, len(peers))
|
||||
for _, peer := range slotAndBandwidthPeers {
|
||||
peerWithSufficientBandwidthMap[peer] = true
|
||||
}
|
||||
|
||||
for _, peer := range slotPeers {
|
||||
if !peerWithSufficientBandwidthMap[peer] {
|
||||
description := fmt.Sprintf("peer %s: does not have sufficient bandwidth", peer)
|
||||
descriptions = append(descriptions, description)
|
||||
}
|
||||
}
|
||||
return slotAndBandwidthPeers, descriptions, nil
|
||||
}
|
||||
|
||||
func hasSufficientBandwidth(rateLimiter *leakybucket.Collector, peers []peer.ID, count uint64) []peer.ID {
|
||||
var filteredPeers []peer.ID
|
||||
|
||||
for _, p := range peers {
|
||||
if uint64(rateLimiter.Remaining(p.String())) < count {
|
||||
continue
|
||||
}
|
||||
copiedP := p
|
||||
filteredPeers = append(filteredPeers, copiedP)
|
||||
}
|
||||
return filteredPeers
|
||||
}
|
||||
|
||||
func computeDataColumnsWithoutPeers(neededColumns []uint64, peersByColumn map[uint64][]peer.ID) map[uint64]bool {
|
||||
result := make(map[uint64]bool)
|
||||
for _, column := range neededColumns {
|
||||
if _, ok := peersByColumn[column]; !ok {
|
||||
result[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Filter peers with head epoch lower than our target epoch for ByRange requests.
|
||||
func filterPeersByTargetSlot(p2p p2p.P2P, peers []peer.ID, targetSlot primitives.Slot) ([]peer.ID, []string, error) {
|
||||
filteredPeers := make([]peer.ID, 0, len(peers))
|
||||
descriptions := make([]string, 0, len(peers))
|
||||
// Compute the target epoch from the target slot.
|
||||
targetEpoch := slots.ToEpoch(targetSlot)
|
||||
|
||||
for _, peer := range peers {
|
||||
peerChainState, err := p2p.Peers().ChainState(peer)
|
||||
if err != nil {
|
||||
description := fmt.Sprintf("peer %s: error: %s", peer, err)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
if peerChainState == nil {
|
||||
description := fmt.Sprintf("peer %s: chain state is nil", peer)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
peerHeadEpoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
|
||||
if peerHeadEpoch < targetEpoch {
|
||||
description := fmt.Sprintf("peer %s: peer head epoch %d < our target epoch %d", peer, peerHeadEpoch, targetEpoch)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
filteredPeers = append(filteredPeers, peer)
|
||||
}
|
||||
|
||||
return filteredPeers, descriptions, nil
|
||||
}
|
||||
|
||||
// itemsCount returns the total count of items
|
||||
func itemsCount(missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||
count := 0
|
||||
for _, columns := range missingColumnsByRoot {
|
||||
count += len(columns)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// uint64MapDiffer returns true if the two maps differ.
|
||||
func uint64MapDiffer(left, right map[uint64]bool) bool {
|
||||
if len(left) != len(right) {
|
||||
return true
|
||||
}
|
||||
|
||||
for k := range left {
|
||||
if !right[k] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
211
beacon-chain/sync/data_columns_reconstruct.go
Normal file
211
beacon-chain/sync/data_columns_reconstruct.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const broadCastMissingDataColumnsTimeIntoSlot = 3 * time.Second
|
||||
|
||||
func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColumn blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
|
||||
blockRoot := verifiedRODataColumn.BlockRoot()
|
||||
proposerIndex := verifiedRODataColumn.ProposerIndex()
|
||||
slot := verifiedRODataColumn.Slot()
|
||||
|
||||
// Get the columns we store.
|
||||
storedDataColumns := s.cfg.dataColumnStorage.Summary(blockRoot)
|
||||
storedColumnsCount := storedDataColumns.Count()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If reconstruction is not possible or if all columns are already stored, exit early.
|
||||
if storedColumnsCount < peerdas.MinimumColumnsCountToReconstruct() || storedColumnsCount == numberOfColumns {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lock to prevent concurrent reconstruction.
|
||||
if !s.dataColumsnReconstructionLock.TryLock() {
|
||||
// If the mutex is already locked, it means that another goroutine is already reconstructing the data columns.
|
||||
// In this case, no need to reconstruct again.
|
||||
// TODO: Implement the (pathological) case where we want to reconstruct data columns corresponding to different blocks at the same time.
|
||||
// This should be a rare case and we can ignore it for now, but it needs to be addressed in the future.
|
||||
return nil
|
||||
}
|
||||
|
||||
defer s.dataColumsnReconstructionLock.Unlock()
|
||||
|
||||
// Retrieve the node ID.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.custodyInfo.Mut.RLock()
|
||||
defer s.cfg.custodyInfo.Mut.RUnlock()
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := s.cfg.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve our local node info.
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Load all the possible data columns sidecars, to minimize reconstruction time.
|
||||
verifiedSidecars, err := s.cfg.dataColumnStorage.Get(blockRoot, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get data column sidecars")
|
||||
}
|
||||
|
||||
// Recover cells and proofs.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(verifiedSidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reconstruct data column sidecars")
|
||||
}
|
||||
|
||||
// Filter reconstructed sidecars to save.
|
||||
custodyColumns := localNodeInfo.CustodyColumns
|
||||
toSaveSidecars := make([]blocks.VerifiedRODataColumn, 0, len(custodyColumns))
|
||||
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if custodyColumns[sidecar.Index] {
|
||||
toSaveSidecars = append(toSaveSidecars, sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the data columns sidecars in the database.
|
||||
// Note: We do not call `receiveDataColumn`, because it will ignore
|
||||
// incoming data columns via gossip while we did not broadcast (yet) the reconstructed data columns.
|
||||
if err := s.cfg.dataColumnStorage.Save(toSaveSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
// Update reconstruction metrics
|
||||
dataColumnReconstructionHistogram.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
dataColumnReconstructionCounter.Add(float64(len(reconstructedSidecars) - len(verifiedSidecars)))
|
||||
|
||||
// Schedule the broadcast.
|
||||
if err := s.scheduleReconstructedDataColumnsBroadcast(ctx, blockRoot, proposerIndex, slot); err != nil {
|
||||
return errors.Wrap(err, "schedule reconstructed data columns broadcast")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": slot,
|
||||
"fromColumnsCount": storedColumnsCount,
|
||||
}).Debug("Data columns reconstructed and saved")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) scheduleReconstructedDataColumnsBroadcast(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%x", root),
|
||||
"slot": slot,
|
||||
})
|
||||
|
||||
// Get the time corresponding to the start of the slot.
|
||||
genesisTime := uint64(s.cfg.chain.GenesisTime().Unix())
|
||||
slotStartTime, err := slots.ToTime(genesisTime, slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "to time")
|
||||
}
|
||||
|
||||
// Compute when to broadcast the missing data columns.
|
||||
broadcastTime := slotStartTime.Add(broadCastMissingDataColumnsTimeIntoSlot)
|
||||
|
||||
// Compute the waiting time. This could be negative. In such a case, broadcast immediately.
|
||||
waitingTime := time.Until(broadcastTime)
|
||||
|
||||
time.AfterFunc(waitingTime, func() {
|
||||
s.dataColumsnReconstructionLock.Lock()
|
||||
defer s.dataColumsnReconstructionLock.Unlock()
|
||||
|
||||
// Get the node ID.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.custodyInfo.Mut.RLock()
|
||||
defer s.cfg.custodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group count.
|
||||
custodyGroupCount := s.cfg.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve the local node info.
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Peer info")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the data columns we actually store.
|
||||
summary := s.cfg.dataColumnStorage.Summary(root)
|
||||
|
||||
// Compute the missing data columns (data columns we should custody but we do not have received via gossip.)
|
||||
missingColumns := make([]uint64, 0, len(localNodeInfo.CustodyColumns))
|
||||
for column := range localNodeInfo.CustodyColumns {
|
||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, column) {
|
||||
missingColumns = append(missingColumns, column)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit early if there are no missing data columns.
|
||||
// This is the happy path.
|
||||
if len(missingColumns) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, column := range missingColumns {
|
||||
if !summary.HasIndex(column) {
|
||||
// This column was not received nor reconstructed. This should not happen.
|
||||
log.WithField("column", column).Error("Data column not received nor reconstructed")
|
||||
}
|
||||
}
|
||||
|
||||
// Get the non received but reconstructed data column.
|
||||
verifiedRODataColumnSidecars, err := s.cfg.dataColumnStorage.Get(root, missingColumns)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("get data column sidecars")
|
||||
return
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumnSidecars {
|
||||
// Compute the subnet for this column.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(verifiedRODataColumn.Index)
|
||||
|
||||
// Broadcast the missing data column.
|
||||
if err := s.cfg.p2p.BroadcastDataColumn(root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
|
||||
log.WithError(err).Error("Broadcast data column")
|
||||
}
|
||||
|
||||
// Now, we can set the data column as seen.
|
||||
s.setSeenDataColumnIndex(slot, proposerIndex, verifiedRODataColumn.Index)
|
||||
}
|
||||
|
||||
// Sort the missing data columns.
|
||||
slices.Sort[[]uint64](missingColumns)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"timeIntoSlot": broadCastMissingDataColumnsTimeIntoSlot,
|
||||
"columns": missingColumns,
|
||||
}).Debug("Start broadcasting not seen via gossip but reconstructed data columns")
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
627
beacon-chain/sync/data_columns_sampling.go
Normal file
627
beacon-chain/sync/data_columns_sampling.go
Normal file
@@ -0,0 +1,627 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const PeerRefreshInterval = 1 * time.Minute
|
||||
|
||||
type roundSummary struct {
|
||||
RequestedColumns []uint64
|
||||
MissingColumns map[uint64]bool
|
||||
}
|
||||
|
||||
// DataColumnSampler defines the interface for sampling data columns from peers for requested block root and samples count.
|
||||
type DataColumnSampler interface {
|
||||
// Run starts the data column sampling service.
|
||||
Run(ctx context.Context)
|
||||
}
|
||||
|
||||
var _ DataColumnSampler = (*dataColumnSampler1D)(nil)
|
||||
|
||||
// dataColumnSampler1D implements the DataColumnSampler interface for PeerDAS 1D.
|
||||
type dataColumnSampler1D struct {
|
||||
sync.RWMutex
|
||||
|
||||
p2p p2p.P2P
|
||||
clock *startup.Clock
|
||||
ctxMap ContextByteVersions
|
||||
stateNotifier statefeed.Notifier
|
||||
|
||||
// nonCustodyGroups is a set of groups that are not custodied by the node.
|
||||
nonCustodyGroups map[uint64]bool
|
||||
|
||||
// groupsByPeer maps a peer to the groups it is responsible for custody.
|
||||
groupsByPeer map[peer.ID]map[uint64]bool
|
||||
|
||||
// peersByCustodyGroup maps a group to the peer responsible for custody.
|
||||
peersByCustodyGroup map[uint64]map[peer.ID]bool
|
||||
|
||||
// columnVerifier verifies a column according to the specified requirements.
|
||||
columnVerifier verification.NewDataColumnsVerifier
|
||||
|
||||
// custodyInfo contains the custody information of the node.
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// newDataColumnSampler1D creates a new 1D data column sampler.
|
||||
func newDataColumnSampler1D(
|
||||
p2p p2p.P2P,
|
||||
clock *startup.Clock,
|
||||
ctxMap ContextByteVersions,
|
||||
stateNotifier statefeed.Notifier,
|
||||
colVerifier verification.NewDataColumnsVerifier,
|
||||
custodyInfo *peerdas.CustodyInfo,
|
||||
) *dataColumnSampler1D {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
peersByCustodyGroup := make(map[uint64]map[peer.ID]bool, numberOfCustodyGroups)
|
||||
|
||||
for i := range numberOfCustodyGroups {
|
||||
peersByCustodyGroup[i] = make(map[peer.ID]bool)
|
||||
}
|
||||
|
||||
return &dataColumnSampler1D{
|
||||
p2p: p2p,
|
||||
clock: clock,
|
||||
ctxMap: ctxMap,
|
||||
stateNotifier: stateNotifier,
|
||||
groupsByPeer: make(map[peer.ID]map[uint64]bool),
|
||||
peersByCustodyGroup: peersByCustodyGroup,
|
||||
columnVerifier: colVerifier,
|
||||
custodyInfo: custodyInfo,
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements DataColumnSampler.
|
||||
func (d *dataColumnSampler1D) Run(ctx context.Context) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Get the node ID.
|
||||
nodeID := d.p2p.NodeID()
|
||||
|
||||
// Verify if we need to run sampling or not, if not, return directly.
|
||||
// TODO: Rework this part to take into account dynamic custody group count with peer sampling.
|
||||
custodyGroupCount := d.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve our local node info.
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("peer info")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: custody group count != data column group count
|
||||
if custodyGroupCount >= peerdas.MinimumColumnsCountToReconstruct() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"custodyGroupCount": custodyGroupCount,
|
||||
"totalGroups": numberOfCustodyGroups,
|
||||
}).Debug("The node custodies at least the half of the groups, no need to sample")
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize non custody groups.
|
||||
d.nonCustodyGroups = make(map[uint64]bool)
|
||||
for i := range numberOfCustodyGroups {
|
||||
if !localNodeInfo.CustodyGroups[i] {
|
||||
d.nonCustodyGroups[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize peer info first.
|
||||
d.refreshPeerInfo()
|
||||
|
||||
// periodically refresh peer info to keep peer <-> column mapping up to date.
|
||||
async.RunEvery(ctx, PeerRefreshInterval, d.refreshPeerInfo)
|
||||
|
||||
// start the sampling loop.
|
||||
d.samplingRoutine(ctx)
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) samplingRoutine(ctx context.Context) {
|
||||
stateCh := make(chan *feed.Event, 1)
|
||||
stateSub := d.stateNotifier.StateFeed().Subscribe(stateCh)
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt := <-stateCh:
|
||||
d.handleStateNotification(ctx, evt)
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("DataColumnSampler1D subscription to state feed failed")
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context canceled, exiting data column sampling loop.")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh peer information.
|
||||
func (d *dataColumnSampler1D) refreshPeerInfo() {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
activePeers := d.p2p.Peers().Active()
|
||||
d.prunePeerInfo(activePeers)
|
||||
|
||||
for _, pid := range activePeers {
|
||||
// Retrieve the custody group count of the peer.
|
||||
retrievedCustodyGroupCount := d.p2p.CustodyGroupCountFromPeer(pid)
|
||||
|
||||
// Look into our store the custody storedGroups for this peer.
|
||||
storedGroups, ok := d.groupsByPeer[pid]
|
||||
storedGroupsCount := uint64(len(storedGroups))
|
||||
|
||||
if ok && storedGroupsCount == retrievedCustodyGroupCount {
|
||||
// No change for this peer.
|
||||
continue
|
||||
}
|
||||
|
||||
nodeID, err := p2p.ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to convert peer ID to node ID")
|
||||
continue
|
||||
}
|
||||
|
||||
// Retrieve the peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, retrievedCustodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid.String()).Error("Failed to determine peer info")
|
||||
}
|
||||
|
||||
d.groupsByPeer[pid] = peerInfo.CustodyGroups
|
||||
for group := range peerInfo.CustodyGroups {
|
||||
d.peersByCustodyGroup[group][pid] = true
|
||||
}
|
||||
}
|
||||
|
||||
groupsWithoutPeers := make([]uint64, 0)
|
||||
for group, peers := range d.peersByCustodyGroup {
|
||||
if len(peers) == 0 {
|
||||
groupsWithoutPeers = append(groupsWithoutPeers, group)
|
||||
}
|
||||
}
|
||||
|
||||
if len(groupsWithoutPeers) > 0 {
|
||||
slices.Sort[[]uint64](groupsWithoutPeers)
|
||||
log.WithField("groups", groupsWithoutPeers).Warn("Some groups have no peers responsible for custody")
|
||||
}
|
||||
}
|
||||
|
||||
// prunePeerInfo prunes inactive peers from peerByGroup and groupByPeer.
|
||||
// This should not be called outside of refreshPeerInfo without being locked.
|
||||
func (d *dataColumnSampler1D) prunePeerInfo(activePeers []peer.ID) {
|
||||
active := make(map[peer.ID]bool)
|
||||
for _, pid := range activePeers {
|
||||
active[pid] = true
|
||||
}
|
||||
|
||||
for pid := range d.groupsByPeer {
|
||||
if !active[pid] {
|
||||
d.prunePeer(pid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// prunePeer removes a peer from stored peer info map, it should be called with lock held.
|
||||
func (d *dataColumnSampler1D) prunePeer(pid peer.ID) {
|
||||
delete(d.groupsByPeer, pid)
|
||||
for _, peers := range d.peersByCustodyGroup {
|
||||
delete(peers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event *feed.Event) {
|
||||
if event.Type != statefeed.BlockProcessed {
|
||||
return
|
||||
}
|
||||
|
||||
data, ok := event.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *statefeed.BlockProcessedData")
|
||||
return
|
||||
}
|
||||
|
||||
if !data.Verified {
|
||||
// We only process blocks that have been verified
|
||||
log.Error("Data is not verified")
|
||||
return
|
||||
}
|
||||
|
||||
if data.SignedBlock.Version() < version.Fulu {
|
||||
log.Debug("Pre Fulu block, skipping data column sampling")
|
||||
return
|
||||
}
|
||||
|
||||
// Determine if we need to sample data columns for this block.
|
||||
beaconConfig := params.BeaconConfig()
|
||||
samplesPerSlots := beaconConfig.SamplesPerSlot
|
||||
halfOfCustodyGroups := beaconConfig.NumberOfCustodyGroups / 2
|
||||
nonCustodyGroupsCount := uint64(len(d.nonCustodyGroups))
|
||||
|
||||
if nonCustodyGroupsCount <= halfOfCustodyGroups {
|
||||
// Nothing to sample.
|
||||
return
|
||||
}
|
||||
|
||||
// Get the commitments for this block.
|
||||
commitments, err := data.SignedBlock.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
return
|
||||
}
|
||||
|
||||
// Skip if there are no commitments.
|
||||
if len(commitments) == 0 {
|
||||
log.Debug("No commitments in block, skipping data column sampling")
|
||||
return
|
||||
}
|
||||
|
||||
// Randomize columns for sample selection.
|
||||
randomizedColumns, err := randomizeColumns(d.nonCustodyGroups)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to randomize columns")
|
||||
return
|
||||
}
|
||||
|
||||
samplesCount := min(samplesPerSlots, nonCustodyGroupsCount-halfOfCustodyGroups)
|
||||
|
||||
// TODO: Use the first output of `incrementalDAS` as input of the fork choice rule.
|
||||
_, _, err = d.incrementalDAS(ctx, data, randomizedColumns, samplesCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to run incremental DAS")
|
||||
}
|
||||
}
|
||||
|
||||
// incrementalDAS samples data columns from active peers using incremental DAS.
|
||||
// https://ethresear.ch/t/lossydas-lossy-incremental-and-diagonal-sampling-for-data-availability/18963#incrementaldas-dynamically-increase-the-sample-size-10
|
||||
// According to https://github.com/ethereum/consensus-specs/issues/3825, we're going to select query samples exclusively from the non custody columns.
|
||||
func (d *dataColumnSampler1D) incrementalDAS(
|
||||
ctx context.Context,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
columns []uint64,
|
||||
sampleCount uint64,
|
||||
) (bool, []roundSummary, error) {
|
||||
allowedFailures := uint64(0)
|
||||
firstColumnToSample, extendedSampleCount := uint64(0), peerdas.ExtendedSampleCount(sampleCount, allowedFailures)
|
||||
roundSummaries := make([]roundSummary, 0, 1) // We optimistically allocate only one round summary.
|
||||
blockRoot := blockProcessedData.BlockRoot
|
||||
columnsCount := uint64(len(columns))
|
||||
|
||||
start := time.Now()
|
||||
|
||||
for round := 1; ; /*No exit condition */ round++ {
|
||||
if extendedSampleCount > columnsCount {
|
||||
// We already tried to sample all possible columns, this is the unhappy path.
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"round": round - 1,
|
||||
}).Warning("Some columns are still missing after trying to sample all possible columns")
|
||||
return false, roundSummaries, nil
|
||||
}
|
||||
|
||||
// Get the columns to sample for this round.
|
||||
columnsToSample := columns[firstColumnToSample:extendedSampleCount]
|
||||
columnsToSampleCount := extendedSampleCount - firstColumnToSample
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"columns": columnsToSample,
|
||||
"round": round,
|
||||
}).Debug("Start data columns sampling")
|
||||
|
||||
// Sample data columns from peers in parallel.
|
||||
retrievedSamples, err := d.sampleDataColumns(ctx, blockProcessedData, columnsToSample)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "sample data columns")
|
||||
}
|
||||
|
||||
missingSamples := make(map[uint64]bool)
|
||||
for _, column := range columnsToSample {
|
||||
if !retrievedSamples[column] {
|
||||
missingSamples[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
roundSummaries = append(roundSummaries, roundSummary{
|
||||
RequestedColumns: columnsToSample,
|
||||
MissingColumns: missingSamples,
|
||||
})
|
||||
|
||||
retrievedSampleCount := uint64(len(retrievedSamples))
|
||||
if retrievedSampleCount == columnsToSampleCount {
|
||||
// All columns were correctly sampled, this is the happy path.
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"neededRounds": round,
|
||||
"duration": time.Since(start),
|
||||
}).Debug("All columns were successfully sampled")
|
||||
return true, roundSummaries, nil
|
||||
}
|
||||
|
||||
if retrievedSampleCount > columnsToSampleCount {
|
||||
// This should never happen.
|
||||
return false, nil, errors.New("retrieved more columns than requested")
|
||||
}
|
||||
|
||||
// There is still some missing columns, extend the samples.
|
||||
allowedFailures += columnsToSampleCount - retrievedSampleCount
|
||||
oldExtendedSampleCount := extendedSampleCount
|
||||
firstColumnToSample = extendedSampleCount
|
||||
extendedSampleCount = peerdas.ExtendedSampleCount(sampleCount, allowedFailures)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"round": round,
|
||||
"missingColumnsCount": allowedFailures,
|
||||
"currentSampleIndex": oldExtendedSampleCount,
|
||||
"nextSampleIndex": extendedSampleCount,
|
||||
}).Debug("Some columns are still missing after sampling this round.")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) sampleDataColumns(
|
||||
ctx context.Context,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
columns []uint64,
|
||||
) (map[uint64]bool, error) {
|
||||
// distribute samples to peer
|
||||
peerToColumns, err := d.distributeSamplesToPeer(columns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "distribute samples to peer")
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
res := make(map[uint64]bool)
|
||||
|
||||
sampleFromPeer := func(pid peer.ID, cols map[uint64]bool) {
|
||||
defer wg.Done()
|
||||
retrieved := d.sampleDataColumnsFromPeer(ctx, pid, blockProcessedData, cols)
|
||||
|
||||
mu.Lock()
|
||||
for col := range retrieved {
|
||||
res[col] = true
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// sample from peers in parallel
|
||||
for pid, cols := range peerToColumns {
|
||||
wg.Add(1)
|
||||
go sampleFromPeer(pid, cols)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// distributeSamplesToPeer distributes samples to peers based on the columns they are responsible for.
|
||||
// Currently it randomizes peer selection for a column and did not take into account whole peer distribution balance. It could be improved if needed.
|
||||
func (d *dataColumnSampler1D) distributeSamplesToPeer(columns []uint64) (map[peer.ID]map[uint64]bool, error) {
|
||||
dist := make(map[peer.ID]map[uint64]bool)
|
||||
|
||||
for _, column := range columns {
|
||||
custodyGroup, err := peerdas.ComputeCustodyGroupForColumn(column)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute custody group for column")
|
||||
}
|
||||
|
||||
peers := d.peersByCustodyGroup[custodyGroup]
|
||||
if len(peers) == 0 {
|
||||
log.WithField("column", column).Warning("No peers responsible for custody of column")
|
||||
continue
|
||||
}
|
||||
|
||||
pid, err := selectRandomPeer(peers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select random peer")
|
||||
}
|
||||
|
||||
if _, ok := dist[pid]; !ok {
|
||||
dist[pid] = make(map[uint64]bool)
|
||||
}
|
||||
|
||||
dist[pid][column] = true
|
||||
}
|
||||
|
||||
return dist, nil
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) sampleDataColumnsFromPeer(
|
||||
ctx context.Context,
|
||||
pid peer.ID,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
requestedColumns map[uint64]bool,
|
||||
) map[uint64]bool {
|
||||
retrievedColumns := make(map[uint64]bool)
|
||||
|
||||
cols := make([]uint64, 0, len(requestedColumns))
|
||||
for col := range requestedColumns {
|
||||
cols = append(cols, col)
|
||||
}
|
||||
req := ð.DataColumnsByRootIdentifier{
|
||||
BlockRoot: blockProcessedData.BlockRoot[:],
|
||||
Columns: cols,
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
roDataColumns, err := SendDataColumnSidecarsByRootRequest(ctx, d.clock, d.p2p, pid, d.ctxMap, &types.DataColumnsByRootIdentifiers{req})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to send data column sidecar by root")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Once peer sampling is used, we should verify all sampled data columns in a single batch instead of looping over columns.
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
if verifyColumn(roDataColumn, blockProcessedData, pid, requestedColumns, d.columnVerifier) {
|
||||
retrievedColumns[roDataColumn.Index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(retrievedColumns) == len(requestedColumns) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", blockProcessedData.BlockRoot),
|
||||
"requestedColumns": sliceFromMap(requestedColumns, true /*sorted*/),
|
||||
}).Debug("Sampled columns from peer successfully")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", blockProcessedData.BlockRoot),
|
||||
"requestedColumns": sliceFromMap(requestedColumns, true /*sorted*/),
|
||||
"retrievedColumns": sliceFromMap(retrievedColumns, true /*sorted*/),
|
||||
}).Debug("Sampled columns from peer with some errors")
|
||||
}
|
||||
|
||||
return retrievedColumns
|
||||
}
|
||||
|
||||
// randomizeColumns returns a slice containing randomly ordered columns belonging to the input `groups`.
|
||||
func randomizeColumns(custodyGroups map[uint64]bool) ([]uint64, error) {
|
||||
// Compute the number of columns per group.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
// Compute the number of columns.
|
||||
groupCount := uint64(len(custodyGroups))
|
||||
expectedColumnCount := groupCount * columnsPerGroup
|
||||
|
||||
// Compute the columns.
|
||||
columns := make([]uint64, 0, expectedColumnCount)
|
||||
for group := range custodyGroups {
|
||||
columnsGroup, err := peerdas.ComputeColumnsForCustodyGroup(group)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute columns for custody group")
|
||||
}
|
||||
|
||||
columns = append(columns, columnsGroup...)
|
||||
}
|
||||
|
||||
actualColumnCount := len(columns)
|
||||
|
||||
// Safety check.
|
||||
if uint64(actualColumnCount) != expectedColumnCount {
|
||||
return nil, errors.New("invalid number of columns, should never happen")
|
||||
}
|
||||
|
||||
// Shuffle the columns.
|
||||
rand.NewGenerator().Shuffle(actualColumnCount, func(i, j int) {
|
||||
columns[i], columns[j] = columns[j], columns[i]
|
||||
})
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// sliceFromMap returns a sorted list of keys from a map.
|
||||
func sliceFromMap(m map[uint64]bool, sorted ...bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
if len(sorted) > 0 && sorted[0] {
|
||||
slices.Sort(result)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// selectRandomPeer returns a random peer from the given list of peers.
|
||||
func selectRandomPeer(peers map[peer.ID]bool) (peer.ID, error) {
|
||||
peersCount := uint64(len(peers))
|
||||
pick := rand.NewGenerator().Uint64() % peersCount
|
||||
|
||||
for peer := range peers {
|
||||
if pick == 0 {
|
||||
return peer, nil
|
||||
}
|
||||
|
||||
pick--
|
||||
}
|
||||
|
||||
// This should never be reached.
|
||||
return peer.ID(""), errors.New("failed to select random peer")
|
||||
}
|
||||
|
||||
// verifyColumn verifies the retrieved column against the root, the index,
|
||||
// the KZG inclusion and the KZG proof.
|
||||
func verifyColumn(
|
||||
roDataColumn blocks.RODataColumn,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
pid peer.ID,
|
||||
requestedColumns map[uint64]bool,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) bool {
|
||||
retrievedColumn := roDataColumn.Index
|
||||
|
||||
// Filter out columns that were not requested.
|
||||
if !requestedColumns[retrievedColumn] {
|
||||
columnsToSampleList := sliceFromMap(requestedColumns, true /*sorted*/)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"requestedColumns": columnsToSampleList,
|
||||
"retrievedColumn": retrievedColumn,
|
||||
}).Debug("Retrieved column was not requested")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
roBlock, err := blocks.NewROBlock(blockProcessedData.SignedBlock)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to create ROBlock")
|
||||
}
|
||||
|
||||
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
||||
|
||||
if err := peerdas.DataColumnsAlignWithBlock(roBlock, roDataColumns); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
||||
verifier := newDataColumnsVerifier(roDataColumns, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to verify data column")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to prove inclusion")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to verify KZG proof")
|
||||
}
|
||||
|
||||
_, err = verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to upgrade RODataColumns to VerifiedRODataColumns - should never happen")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
554
beacon-chain/sync/data_columns_sampling_test.go
Normal file
554
beacon-chain/sync/data_columns_sampling_test.go
Normal file
@@ -0,0 +1,554 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
func TestRandomizeColumns(t *testing.T) {
|
||||
const count uint64 = 128
|
||||
|
||||
// Generate groups.
|
||||
groups := make(map[uint64]bool, count)
|
||||
for i := uint64(0); i < count; i++ {
|
||||
groups[i] = true
|
||||
}
|
||||
|
||||
// Randomize columns.
|
||||
randomizedColumns, err := randomizeColumns(groups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert back to a map.
|
||||
randomizedColumnsMap := make(map[uint64]bool, count)
|
||||
for _, column := range randomizedColumns {
|
||||
randomizedColumnsMap[column] = true
|
||||
}
|
||||
|
||||
// Check duplicates and missing columns.
|
||||
require.Equal(t, len(groups), len(randomizedColumnsMap))
|
||||
|
||||
// Check the values.
|
||||
for column := range randomizedColumnsMap {
|
||||
require.Equal(t, true, column < count)
|
||||
}
|
||||
}
|
||||
|
||||
// createAndConnectPeer creates a peer with a private key `offset` fixed.
|
||||
// The peer is added and connected to `p2pService`.
|
||||
// If a `RPCDataColumnSidecarsByRootTopicV1` request is made with column index `i`,
|
||||
// then the peer will respond with the `dataColumnSidecars[i]` if it is not in `columnsNotToRespond`.
|
||||
// (If `len(dataColumnSidecars) < i`, then this function will panic.)
|
||||
func createAndConnectPeer(
|
||||
t *testing.T,
|
||||
p2pService *p2ptest.TestP2P,
|
||||
chainService *mock.ChainService,
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar,
|
||||
custodyGroupCount uint64,
|
||||
columnsNotToRespond map[uint64]bool,
|
||||
offset int,
|
||||
) *p2ptest.TestP2P {
|
||||
// Create the private key, depending on the offset.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(offset + i)
|
||||
}
|
||||
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the peer.
|
||||
peer := p2ptest.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
|
||||
peer.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1+"/ssz_snappy", func(stream network.Stream) {
|
||||
// Decode the request.
|
||||
req := new(p2pTypes.DataColumnsByRootIdentifiers)
|
||||
err := peer.Encoding().DecodeWithMaxLength(stream, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, identifier := range *req {
|
||||
for _, column := range identifier.Columns {
|
||||
// Filter out the columns not to respond.
|
||||
if columnsNotToRespond[column] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the response.
|
||||
resp := dataColumnSidecars[column]
|
||||
|
||||
// Send the response.
|
||||
err := WriteDataColumnSidecarChunk(stream, chainService, p2pService.Encoding(), resp)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the stream.
|
||||
closeStream(stream, log)
|
||||
})
|
||||
|
||||
// Create the record and set the custody count.
|
||||
enr := &enr.Record{}
|
||||
enr.Set(peerdas.Cgc(custodyGroupCount))
|
||||
|
||||
// Add the peer and connect it.
|
||||
p2pService.Peers().Add(enr, peer.PeerID(), nil, network.DirOutbound)
|
||||
p2pService.Peers().SetConnectionState(peer.PeerID(), peers.Connected)
|
||||
p2pService.Connect(peer)
|
||||
|
||||
return peer
|
||||
}
|
||||
|
||||
type dataSamplerTest struct {
|
||||
ctx context.Context
|
||||
p2pSvc *p2ptest.TestP2P
|
||||
peers []*p2ptest.TestP2P
|
||||
ctxMap map[[4]byte]int
|
||||
chainSvc *mock.ChainService
|
||||
blockProcessedData *statefeed.BlockProcessedData
|
||||
blobs []kzg.Blob
|
||||
kzgCommitments [][]byte
|
||||
kzgProofs [][]byte
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar
|
||||
}
|
||||
|
||||
func setupDefaultDataColumnSamplerTest(t *testing.T) (*dataSamplerTest, *dataColumnSampler1D) {
|
||||
const (
|
||||
blobCount uint64 = 3
|
||||
custodyRequirement uint64 = 4
|
||||
)
|
||||
|
||||
test, sampler := setupDataColumnSamplerTest(t, blobCount)
|
||||
|
||||
// Custody columns: [6, 38, 70, 102]
|
||||
p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 1)
|
||||
|
||||
// Custody columns: [3, 35, 67, 99]
|
||||
p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 2)
|
||||
|
||||
// Custody columns: [12, 44, 76, 108]
|
||||
p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 3)
|
||||
|
||||
test.peers = []*p2ptest.TestP2P{p1, p2, p3}
|
||||
|
||||
return test, sampler
|
||||
}
|
||||
|
||||
func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTest, *dataColumnSampler1D) {
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs, commitments and inclusion proofs.
|
||||
blobs := make([]kzg.Blob, blobCount)
|
||||
kzgCommitments := make([][]byte, blobCount)
|
||||
kzgProofs := make([][]byte, blobCount)
|
||||
|
||||
for i := uint64(0); i < blobCount; i++ {
|
||||
blob := getRandBlob(t, int64(i))
|
||||
|
||||
kzgCommitment, kzgProof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob
|
||||
kzgCommitments[i] = kzgCommitment[:]
|
||||
kzgProofs[i] = kzgProof[:]
|
||||
}
|
||||
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
dbBlock.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := dataColumnSidecars[0].GetSignedBlockHeader().Header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blockProcessedData := &statefeed.BlockProcessedData{
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: sBlock,
|
||||
}
|
||||
|
||||
p2pSvc := p2ptest.NewTestP2P(t)
|
||||
chainSvc, clock := defaultMockChain(t, 0)
|
||||
|
||||
test := &dataSamplerTest{
|
||||
ctx: context.Background(),
|
||||
p2pSvc: p2pSvc,
|
||||
peers: []*p2ptest.TestP2P{},
|
||||
ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Fulu},
|
||||
chainSvc: chainSvc,
|
||||
blockProcessedData: blockProcessedData,
|
||||
blobs: blobs,
|
||||
kzgCommitments: kzgCommitments,
|
||||
kzgProofs: kzgProofs,
|
||||
dataColumnSidecars: dataColumnSidecars,
|
||||
}
|
||||
clockSync := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clockSync.SetClock(clock))
|
||||
iniWaiter := verification.NewInitializerWaiter(clockSync, nil, nil)
|
||||
ini, err := iniWaiter.WaitForInitializer(context.Background())
|
||||
require.NoError(t, err)
|
||||
sampler := newDataColumnSampler1D(p2pSvc, clock, test.ctxMap, nil, newDataColumnsVerifierFromInitializer(ini), &peerdas.CustodyInfo{})
|
||||
|
||||
return test, sampler
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_PeerManagement(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
numPeers int
|
||||
custodyRequirement uint64
|
||||
expectedColumns [][]uint64
|
||||
prunePeers map[int]bool // Peers to prune.
|
||||
}{
|
||||
{
|
||||
name: "custodyRequirement=4",
|
||||
numPeers: 3,
|
||||
custodyRequirement: 4,
|
||||
expectedColumns: [][]uint64{
|
||||
{6, 37, 48, 113},
|
||||
{35, 79, 92, 109},
|
||||
{31, 44, 58, 97},
|
||||
},
|
||||
prunePeers: map[int]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custodyRequirement=8",
|
||||
numPeers: 3,
|
||||
custodyRequirement: 8,
|
||||
expectedColumns: [][]uint64{
|
||||
{1, 6, 37, 48, 51, 87, 112, 113},
|
||||
{24, 25, 35, 52, 79, 92, 109, 126},
|
||||
{31, 44, 58, 64, 91, 97, 116, 127},
|
||||
},
|
||||
prunePeers: map[int]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.CustodyRequirement = tc.custodyRequirement
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers))
|
||||
for i := 0; i < tc.numPeers; i++ {
|
||||
p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1)
|
||||
test.peers = append(test.peers, p)
|
||||
}
|
||||
|
||||
// confirm everything works
|
||||
sampler.refreshPeerInfo()
|
||||
require.Equal(t, params.BeaconConfig().NumberOfColumns, uint64(len(sampler.peersByCustodyGroup)))
|
||||
|
||||
require.Equal(t, tc.numPeers, len(sampler.groupsByPeer))
|
||||
for i, peer := range test.peers {
|
||||
// confirm peer has the expected columns
|
||||
require.Equal(t, len(tc.expectedColumns[i]), len(sampler.groupsByPeer[peer.PeerID()]))
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
require.Equal(t, true, sampler.groupsByPeer[peer.PeerID()][column])
|
||||
}
|
||||
|
||||
// confirm column to peer mapping are correct
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
require.Equal(t, true, sampler.peersByCustodyGroup[column][peer.PeerID()])
|
||||
}
|
||||
}
|
||||
|
||||
// prune peers
|
||||
for peer := range tc.prunePeers {
|
||||
err := test.p2pSvc.Disconnect(test.peers[peer].PeerID())
|
||||
test.p2pSvc.Peers().SetConnectionState(test.peers[peer].PeerID(), peers.Disconnected)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
require.Equal(t, tc.numPeers-len(tc.prunePeers), len(sampler.groupsByPeer))
|
||||
for i, peer := range test.peers {
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
expected := true
|
||||
if tc.prunePeers[i] {
|
||||
expected = false
|
||||
}
|
||||
require.Equal(t, expected, sampler.peersByCustodyGroup[column][peer.PeerID()])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_SampleDistribution(t *testing.T) {
|
||||
// TODO: Use `t.Run`.
|
||||
testCases := []struct {
|
||||
numPeers int
|
||||
custodyRequirement uint64
|
||||
columnsToDistribute [][]uint64
|
||||
expectedDistribution []map[int][]uint64
|
||||
}{
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 4,
|
||||
// peer custody maps
|
||||
// p0: {6, 37, 48, 113},
|
||||
// p1: {35, 79, 92, 109},
|
||||
// p2: {31, 44, 58, 97},
|
||||
columnsToDistribute: [][]uint64{
|
||||
{6, 35, 31},
|
||||
{6, 48, 79, 109, 31, 97},
|
||||
{6, 37, 113},
|
||||
{11},
|
||||
},
|
||||
expectedDistribution: []map[int][]uint64{
|
||||
{
|
||||
0: {6}, // p0
|
||||
1: {35}, // p1
|
||||
2: {31}, // p2
|
||||
},
|
||||
{
|
||||
0: {6, 48}, // p0
|
||||
1: {79, 109}, // p1
|
||||
2: {31, 97}, // p2
|
||||
},
|
||||
{
|
||||
0: {6, 37, 113}, // p0
|
||||
},
|
||||
{},
|
||||
},
|
||||
},
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 8,
|
||||
// peer custody maps
|
||||
// p0: {6, 37, 48, 113, 1, 112, 87, 51},
|
||||
// p1: {35, 79, 92, 109, 52, 126, 25, 24},
|
||||
// p2: {31, 44, 58, 97, 116, 91, 64, 127},
|
||||
columnsToDistribute: [][]uint64{
|
||||
{6, 48, 79, 25, 24, 97}, // all covered by peers
|
||||
{6, 35, 31, 32}, // `32` is not in covered by peers
|
||||
},
|
||||
expectedDistribution: []map[int][]uint64{
|
||||
{
|
||||
0: {6, 48}, // p0
|
||||
1: {79, 25, 24}, // p1
|
||||
2: {97}, // p2
|
||||
},
|
||||
{
|
||||
0: {6}, // p0
|
||||
1: {35}, // p1
|
||||
2: {31}, // p2
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
params.SetupTestConfigCleanup(t)
|
||||
for _, tc := range testCases {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.CustodyRequirement = tc.custodyRequirement
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers))
|
||||
for i := 0; i < tc.numPeers; i++ {
|
||||
p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1)
|
||||
test.peers = append(test.peers, p)
|
||||
}
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
for idx, columns := range tc.columnsToDistribute {
|
||||
result, err := sampler.distributeSamplesToPeer(columns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(tc.expectedDistribution[idx]), len(result), fmt.Sprintf("%v - %v", tc.expectedDistribution[idx], result))
|
||||
|
||||
for peerIdx, dist := range tc.expectedDistribution[idx] {
|
||||
for _, column := range dist {
|
||||
peerID := test.peers[peerIdx].PeerID()
|
||||
require.Equal(t, true, result[peerID][column])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) {
|
||||
test, sampler := setupDefaultDataColumnSamplerTest(t)
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
t.Run("sample all columns", func(t *testing.T) {
|
||||
sampleColumns := []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97}
|
||||
retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 12, len(retrieved))
|
||||
for _, column := range sampleColumns {
|
||||
require.Equal(t, true, retrieved[column])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("sample a subset of columns", func(t *testing.T) {
|
||||
sampleColumns := []uint64{35, 31, 79, 48, 113, 97}
|
||||
retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(retrieved))
|
||||
for _, column := range sampleColumns {
|
||||
require.Equal(t, true, retrieved[column])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("sample a subset of columns with missing columns", func(t *testing.T) {
|
||||
sampleColumns := []uint64{35, 31, 100, 79}
|
||||
retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrieved))
|
||||
require.DeepEqual(t, map[uint64]bool{35: true, 31: true, 79: true}, retrieved)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.DataColumnSidecarSubnetCount = 32
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
samplesCount uint64
|
||||
possibleColumnsToRequest []uint64
|
||||
columnsNotToRespond map[uint64]bool
|
||||
expectedSuccess bool
|
||||
expectedRoundSummaries []roundSummary
|
||||
}{
|
||||
{
|
||||
name: "All columns are correctly sampled in a single round",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97},
|
||||
columnsNotToRespond: map[uint64]bool{},
|
||||
expectedSuccess: true,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{6, 35, 31, 37, 79},
|
||||
MissingColumns: map[uint64]bool{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Two missing columns in the first round, ok in the second round",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97},
|
||||
columnsNotToRespond: map[uint64]bool{6: true, 31: true},
|
||||
expectedSuccess: true,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{6, 35, 31, 37, 79},
|
||||
MissingColumns: map[uint64]bool{6: true, 31: true},
|
||||
},
|
||||
{
|
||||
RequestedColumns: []uint64{44, 48, 92, 58, 113, 109},
|
||||
MissingColumns: map[uint64]bool{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Two missing columns in the first round, one missing in the second round. Fail to sample.",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97},
|
||||
columnsNotToRespond: map[uint64]bool{6: true, 31: true, 48: true},
|
||||
expectedSuccess: false,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{6, 35, 31, 37, 79},
|
||||
MissingColumns: map[uint64]bool{6: true, 31: true},
|
||||
},
|
||||
{
|
||||
RequestedColumns: []uint64{44, 48, 92, 58, 113, 109},
|
||||
MissingColumns: map[uint64]bool{48: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
test, sampler := setupDataColumnSamplerTest(t, 3)
|
||||
p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 1)
|
||||
p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 2)
|
||||
p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 3)
|
||||
test.peers = []*p2ptest.TestP2P{p1, p2, p3}
|
||||
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
success, summaries, err := sampler.incrementalDAS(test.ctx, test.blockProcessedData, tc.possibleColumnsToRequest, tc.samplesCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSuccess, success)
|
||||
require.DeepEqual(t, tc.expectedRoundSummaries, summaries)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func deterministicRandomness(t *testing.T, seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
require.NoError(t, err)
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(t *testing.T, seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(t, seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(t *testing.T, seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
for i := 0; i < len(blob); i += 32 {
|
||||
fieldElementBytes := getRandFieldElement(t, seed+int64(i))
|
||||
copy(blob[i:i+32], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
1605
beacon-chain/sync/data_columns_test.go
Normal file
1605
beacon-chain/sync/data_columns_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
@@ -46,6 +47,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -81,6 +83,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -125,6 +128,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -167,6 +171,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -211,6 +216,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -255,6 +261,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -274,6 +281,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV3+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -73,6 +74,7 @@ go_test(
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
prysmsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -23,7 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
mathPrysm "github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
p2ppb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -34,7 +36,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// maxPendingRequests limits how many concurrent fetch request one can initiate.
|
||||
maxPendingRequests = 64
|
||||
// peersPercentagePerRequest caps percentage of peers to be used in a request.
|
||||
@@ -78,6 +79,10 @@ type blocksFetcherConfig struct {
|
||||
peerFilterCapacityWeight float64
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewDataColumnsVerifier
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// blocksFetcher is a service to fetch chain data from peers.
|
||||
@@ -94,6 +99,9 @@ type blocksFetcher struct {
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewDataColumnsVerifier
|
||||
blocksPerPeriod uint64
|
||||
rateLimiter *leakybucket.Collector
|
||||
peerLocks map[peer.ID]*peerLock
|
||||
@@ -102,6 +110,7 @@ type blocksFetcher struct {
|
||||
capacityWeight float64 // how remaining capacity affects peer selection
|
||||
mode syncMode // allows to use fetcher in different sync scenarios
|
||||
quit chan struct{} // termination notifier
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// peerLock restricts fetcher actions on per peer basis. Currently, used for rate limiting.
|
||||
@@ -124,7 +133,7 @@ type fetchRequestResponse struct {
|
||||
blobsFrom peer.ID
|
||||
start primitives.Slot
|
||||
count uint64
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
err error
|
||||
}
|
||||
|
||||
@@ -162,6 +171,9 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
bs: cfg.bs,
|
||||
dcs: cfg.dcs,
|
||||
bv: cfg.bv,
|
||||
cv: cfg.cv,
|
||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||
rateLimiter: rateLimiter,
|
||||
peerLocks: make(map[peer.ID]*peerLock),
|
||||
@@ -170,6 +182,7 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
capacityWeight: capacityWeight,
|
||||
mode: cfg.mode,
|
||||
quit: make(chan struct{}),
|
||||
custodyInfo: cfg.custodyInfo,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,7 +194,7 @@ func maxBatchLimit() int {
|
||||
if params.DenebEnabled() {
|
||||
maxLimit = params.BeaconConfig().MaxRequestBlocksDeneb
|
||||
}
|
||||
castedMaxLimit, err := math.Int(maxLimit)
|
||||
castedMaxLimit, err := mathPrysm.Int(maxLimit)
|
||||
if err != nil {
|
||||
// Should be impossible to hit this case.
|
||||
log.WithError(err).Error("Unable to calculate the max batch limit")
|
||||
@@ -298,7 +311,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
response := &fetchRequestResponse{
|
||||
start: start,
|
||||
count: count,
|
||||
bwb: []blocks.BlockWithROBlobs{},
|
||||
bwb: []blocks.BlockWithROSidecars{},
|
||||
err: nil,
|
||||
}
|
||||
|
||||
@@ -317,30 +330,96 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
if f.mode == modeStopOnFinalizedEpoch {
|
||||
highestFinalizedSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(targetEpoch + 1))
|
||||
if start > highestFinalizedSlot {
|
||||
response.err = fmt.Errorf("%w, slot: %d, highest finalized slot: %d",
|
||||
errSlotIsTooHigh, start, highestFinalizedSlot)
|
||||
response.err = fmt.Errorf(
|
||||
"%w, slot: %d, highest finalized slot: %d",
|
||||
errSlotIsTooHigh, start, highestFinalizedSlot,
|
||||
)
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
response.bwb, response.blocksFrom, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
|
||||
if response.err == nil {
|
||||
pid, bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.blocksFrom, peers)
|
||||
pid, err := f.fetchSidecars(ctx, response.blocksFrom, peers, response.bwb)
|
||||
if err != nil {
|
||||
response.err = err
|
||||
}
|
||||
response.bwb = bwb
|
||||
|
||||
response.blobsFrom = pid
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer.
|
||||
// fetchSidecars fetches sidecars corresponding to blocks in `response.bwb`.
|
||||
// It mutates `Blobs` and `Columns` fields of `response.bwb` with fetched sidecars.
|
||||
func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []peer.ID, bwScs []blocks.BlockWithROSidecars) (peer.ID, error) {
|
||||
const batchSize = 32
|
||||
|
||||
// Find the first block with a slot greater than or equal to the first Fulu slot.
|
||||
// (Blocks are sorted by slot.)
|
||||
firstFuluIndex := sort.Search(len(bwScs), func(i int) bool {
|
||||
return bwScs[i].Block.Version() >= version.Fulu
|
||||
})
|
||||
|
||||
blocksWithBlobs := bwScs[:firstFuluIndex]
|
||||
blocksWithDataColumns := bwScs[firstFuluIndex:]
|
||||
|
||||
if len(blocksWithBlobs) == 0 && len(blocksWithDataColumns) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var (
|
||||
blobsPid peer.ID
|
||||
err error
|
||||
)
|
||||
|
||||
if len(blocksWithBlobs) > 0 {
|
||||
// Fetch blob sidecars.
|
||||
blobsPid, err = f.fetchBlobsFromPeer(ctx, blocksWithBlobs, pid, peers)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "fetch blobs from peer")
|
||||
}
|
||||
}
|
||||
|
||||
if len(blocksWithDataColumns) == 0 {
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// Extract blocks.
|
||||
dataColumnBlocks := make([]blocks.ROBlock, 0, len(blocksWithBlobs))
|
||||
for _, blockWithSidecars := range blocksWithDataColumns {
|
||||
block := blockWithSidecars.Block
|
||||
dataColumnBlocks = append(dataColumnBlocks, block)
|
||||
}
|
||||
|
||||
// Fetch data column sidecars.
|
||||
actualGroupCount := f.custodyInfo.ActualGroupCount()
|
||||
fetchedDataColumnsByRoot, err := prysmsync.RequestMissingDataColumnsByRange(ctx, f.clock, f.ctxMap, f.p2p, f.rateLimiter, actualGroupCount, f.dcs, dataColumnBlocks, batchSize)
|
||||
if err != nil {
|
||||
return blobsPid, errors.Wrap(err, "fetch missing data columns from peers")
|
||||
}
|
||||
|
||||
// Populate the response.
|
||||
for i := range bwScs {
|
||||
bwSc := &bwScs[i]
|
||||
root := bwSc.Block.Root()
|
||||
if columns, ok := fetchedDataColumnsByRoot[root]; ok {
|
||||
bwSc.Columns = columns
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Return the (multiple) peer IDs that provided the data columns and not only the one for blobs.
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer, sorted by slot.
|
||||
func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
ctx context.Context,
|
||||
start primitives.Slot, count uint64,
|
||||
peers []peer.ID,
|
||||
) ([]blocks.BlockWithROBlobs, peer.ID, error) {
|
||||
) ([]blocks.BlockWithROSidecars, peer.ID, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlocksFromPeer")
|
||||
defer span.End()
|
||||
|
||||
@@ -355,39 +434,38 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
// peers are dialed first.
|
||||
peers = append(bestPeers, peers...)
|
||||
peers = dedupPeers(peers)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
blocks, err := f.requestBlocks(ctx, req, p)
|
||||
for _, peer := range peers {
|
||||
blocks, err := f.requestBlocks(ctx, req, peer)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Could not request blocks by range from peer")
|
||||
log.WithField("peer", peer).WithError(err).Debug("Could not request blocks by range from peer")
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(peer)
|
||||
robs, err := sortedBlockWithVerifiedBlobSlice(blocks)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("invalid BeaconBlocksByRange response")
|
||||
log.WithField("peer", peer).WithError(err).Debug("invalid BeaconBlocksByRange response")
|
||||
continue
|
||||
}
|
||||
if len(features.Get().BlacklistedRoots) > 0 {
|
||||
for _, b := range robs {
|
||||
if features.BlacklistedBlock(b.Block.Root()) {
|
||||
return nil, p, prysmsync.ErrInvalidFetchedData
|
||||
return nil, peer, prysmsync.ErrInvalidFetchedData
|
||||
}
|
||||
}
|
||||
}
|
||||
return robs, p, err
|
||||
return robs, peer, err
|
||||
}
|
||||
return nil, "", errNoPeersAvailable
|
||||
}
|
||||
|
||||
func sortedBlockWithVerifiedBlobSlice(bs []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROBlobs, error) {
|
||||
rb := make([]blocks.BlockWithROBlobs, len(bs))
|
||||
for i, b := range bs {
|
||||
func sortedBlockWithVerifiedBlobSlice(blks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROSidecars, error) {
|
||||
rb := make([]blocks.BlockWithROSidecars, len(blks))
|
||||
for i, b := range blks {
|
||||
ro, err := blocks.NewROBlock(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rb[i] = blocks.BlockWithROBlobs{Block: ro}
|
||||
rb[i] = blocks.BlockWithROSidecars{Block: ro}
|
||||
}
|
||||
sort.Sort(blocks.BlockWithROBlobsSlice(rb))
|
||||
return rb, nil
|
||||
@@ -403,7 +481,7 @@ type commitmentCountList []commitmentCount
|
||||
|
||||
// countCommitments makes a list of all blocks that have commitments that need to be satisfied.
|
||||
// This gives us a representation to finish building the request that is lightweight and readable for testing.
|
||||
func countCommitments(bwb []blocks.BlockWithROBlobs, retentionStart primitives.Slot) commitmentCountList {
|
||||
func countCommitments(bwb []blocks.BlockWithROSidecars, retentionStart primitives.Slot) commitmentCountList {
|
||||
if len(bwb) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -485,7 +563,9 @@ func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
|
||||
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
|
||||
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
|
||||
|
||||
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) ([]blocks.BlockWithROBlobs, error) {
|
||||
// verifyAndPopulateBlobs mutate the input `bwb` argument by adding verified blobs.
|
||||
// This function mutates the input `bwb` argument.
|
||||
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||
blobsByRoot := make(map[[32]byte][]blocks.ROBlob)
|
||||
for i := range blobs {
|
||||
if blobs[i].Slot() < req.StartSlot {
|
||||
@@ -495,46 +575,53 @@ func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob
|
||||
blobsByRoot[br] = append(blobsByRoot[br], blobs[i])
|
||||
}
|
||||
for i := range bwb {
|
||||
bwi, err := populateBlock(bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
err := populateBlock(&bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
if err != nil {
|
||||
if errors.Is(err, errDidntPopulate) {
|
||||
continue
|
||||
}
|
||||
return bwb, err
|
||||
return err
|
||||
}
|
||||
bwb[i] = bwi
|
||||
}
|
||||
return bwb, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var errDidntPopulate = errors.New("skipping population of block")
|
||||
|
||||
func populateBlock(bw blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks.BlockWithROBlobs, error) {
|
||||
// populateBlock verifies and populates blobs for a block.
|
||||
// This function mutates the input `bw` argument.
|
||||
func populateBlock(bw *blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||
blk := bw.Block
|
||||
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
commits, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
if len(commits) == 0 {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
// Drop blobs on the floor if we already have them.
|
||||
if bss != nil && bss.Summary(blk.Root()).AllAvailable(len(commits)) {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
if len(commits) != len(blobs) {
|
||||
return bw, missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||
return missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||
}
|
||||
|
||||
for ci := range commits {
|
||||
if err := verify.BlobAlignsWithBlock(blobs[ci], blk); err != nil {
|
||||
return bw, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bw.Blobs = blobs
|
||||
return bw, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
||||
@@ -547,29 +634,34 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e
|
||||
}
|
||||
|
||||
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROBlobs, pid peer.ID, peers []peer.ID) (peer.ID, []blocks.BlockWithROBlobs, error) {
|
||||
// This function mutates the input `bwb` argument.
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROSidecars, pid peer.ID, peers []peer.ID) (peer.ID, error) {
|
||||
if len(bwb) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
|
||||
defer span.End()
|
||||
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
|
||||
return "", bwb, nil
|
||||
return "", nil
|
||||
}
|
||||
blobWindowStart, err := prysmsync.BlobRPCMinValidSlot(f.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", err
|
||||
}
|
||||
// Construct request message based on observed interval of blocks in need of blobs.
|
||||
req := countCommitments(bwb, blobWindowStart).blobRange(f.bs).Request()
|
||||
if req == nil {
|
||||
return "", bwb, nil
|
||||
return "", nil
|
||||
}
|
||||
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
|
||||
// We dial the initial peer first to ensure that we get the desired set of blobs.
|
||||
wantedPeers := append([]peer.ID{pid}, peers...)
|
||||
bestPeers := f.hasSufficientBandwidth(wantedPeers, req.Count)
|
||||
peers = append([]peer.ID{pid}, peers...)
|
||||
peers = f.hasSufficientBandwidth(peers, req.Count)
|
||||
// We append the best peers to the front so that higher capacity
|
||||
// peers are dialed first. If all of them fail, we fallback to the
|
||||
// initial peer we wanted to request blobs from.
|
||||
peers = append(bestPeers, pid)
|
||||
peers = append(peers, pid)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
blobs, err := f.requestBlobs(ctx, req, p)
|
||||
@@ -578,16 +670,31 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.Blo
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
robs, err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs)
|
||||
if err != nil {
|
||||
if err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs); err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
|
||||
continue
|
||||
}
|
||||
return p, robs, err
|
||||
return p, err
|
||||
}
|
||||
return "", nil, errNoPeersAvailable
|
||||
return "", errNoPeersAvailable
|
||||
}
|
||||
|
||||
// sortedSliceFromMap returns a sorted slice of keys from a map.
|
||||
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i] < result[j]
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// waitForPeersFo
|
||||
|
||||
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
||||
func (f *blocksFetcher) requestBlocks(
|
||||
ctx context.Context,
|
||||
@@ -642,6 +749,7 @@ func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecar
|
||||
}
|
||||
f.rateLimiter.Add(pid.String(), int64(req.Count))
|
||||
l.Unlock()
|
||||
|
||||
return prysmsync.SendBlobsByRangeRequest(ctx, f.clock, f.p2p, pid, f.ctxMap, req)
|
||||
}
|
||||
|
||||
@@ -682,7 +790,7 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
||||
// Exit early if we have sufficient capacity
|
||||
return nil
|
||||
}
|
||||
intCount, err := math.Int(count)
|
||||
intCount, err := mathPrysm.Int(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -699,7 +807,8 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
||||
}
|
||||
|
||||
func (f *blocksFetcher) hasSufficientBandwidth(peers []peer.ID, count uint64) []peer.ID {
|
||||
filteredPeers := []peer.ID{}
|
||||
var filteredPeers []peer.ID
|
||||
|
||||
for _, p := range peers {
|
||||
if uint64(f.rateLimiter.Remaining(p.String())) < count {
|
||||
continue
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user