mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
157 Commits
ba2333069a
...
backfill-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b114d5cf99 | ||
|
|
61207bd3ac | ||
|
|
0b6fcd7d17 | ||
|
|
fe2766e716 | ||
|
|
9135d765e1 | ||
|
|
eca87f29d1 | ||
|
|
00821c8f55 | ||
|
|
4b9e92bcd7 | ||
|
|
b01d9005b8 | ||
|
|
8d812d5f0e | ||
|
|
24a3cb2a8b | ||
|
|
66d1d3e248 | ||
|
|
99933678ea | ||
|
|
34f8e1e92b | ||
|
|
a6a41a8755 | ||
|
|
f110b94fac | ||
|
|
33023aa282 | ||
|
|
eeb3cdc99e | ||
|
|
1e7147f060 | ||
|
|
8936beaff3 | ||
|
|
c00283f247 | ||
|
|
a4269cf308 | ||
|
|
91f3c8a4d0 | ||
|
|
30c7ee9c7b | ||
|
|
456d8b9eb9 | ||
|
|
4fe3e6d31a | ||
|
|
01ee1c80b4 | ||
|
|
c14fe47a81 | ||
|
|
b9deabbf0a | ||
|
|
5d66a98e78 | ||
|
|
2d46d6ffae | ||
|
|
57107e50a7 | ||
|
|
47271254f6 | ||
|
|
f304028874 | ||
|
|
8abc5e159a | ||
|
|
b1ac53c4dd | ||
|
|
27ab68c856 | ||
|
|
ddf5a3953b | ||
|
|
92d2fc101d | ||
|
|
8996000d2b | ||
|
|
a2fcba2349 | ||
|
|
abe8638991 | ||
|
|
0b5064b474 | ||
|
|
da9d4cf5b9 | ||
|
|
a62cca15dd | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
a216cb4105 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
@@ -1,7 +1,7 @@
|
||||
workspace(name = "prysm")
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "rules_pkg",
|
||||
@@ -16,8 +16,6 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "toolchains_protoc",
|
||||
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -109,22 +110,26 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -135,7 +140,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -145,12 +152,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -176,7 +186,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -587,7 +587,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -229,13 +229,16 @@ func verifyBlobCommitmentCount(slot primitives.Slot, body interfaces.ReadOnlyBea
|
||||
if body.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
kzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(kzgs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
|
||||
commitmentCount, maxBlobsPerBlock := len(kzgs), params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if commitmentCount > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: actual count %d - max allowed %d", commitmentCount, maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -926,8 +926,10 @@ func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1)}}
|
||||
maxCommitmentsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, maxCommitmentsPerBlock+1)}}
|
||||
rb, err = consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: actual count %d - max allowed %d", maxCommitmentsPerBlock+1, maxCommitmentsPerBlock), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
}
|
||||
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -37,8 +37,11 @@ const (
|
||||
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
|
||||
SingleAttReceived = 9
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 10
|
||||
|
||||
// BlockGossipReceived is sent after a block has been received from gossip or API that passes validation rules.
|
||||
BlockGossipReceived = 10
|
||||
BlockGossipReceived = 11
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -90,6 +93,11 @@ type SingleAttReceivedData struct {
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
|
||||
// DataColumnSidecarReceivedData is the data sent with DataColumnSidecarReceived events.
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// BlockGossipReceivedData is the data sent with BlockGossipReceived events.
|
||||
type BlockGossipReceivedData struct {
|
||||
// SignedBlock is the block that was received.
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"info.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"peer_sampling.go",
|
||||
"reconstruction.go",
|
||||
"validator.go",
|
||||
"verification.go",
|
||||
@@ -44,6 +45,7 @@ go_test(
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"peer_sampling_test.go",
|
||||
"reconstruction_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
|
||||
@@ -190,3 +190,29 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// ColumnIndices is a map of column indices where the key is the column index and the value is a boolean.
|
||||
// The boolean could indicate different things, eg whether the column is needed (in the context of satisfying custody requirements)
|
||||
// or present (in the context of a custody check on disk or in cache).
|
||||
type ColumnIndices map[uint64]bool
|
||||
|
||||
// CopyTrueIndices allows callers to get a copy of the given ColumnIndices, filtering out any keys
|
||||
// where the value == `false`.
|
||||
func CopyTrueIndices(src ColumnIndices) ColumnIndices {
|
||||
dst := make(ColumnIndices, len(src))
|
||||
for k, v := range src {
|
||||
if v {
|
||||
dst[k] = true
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// ColumnIndicesFromSlice converts a slice of uint64 indices into the ColumnIndices equivalent.
|
||||
func ColumnIndicesFromSlice(indices []uint64) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for _, index := range indices {
|
||||
ci[index] = true
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
)
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -4,21 +4,26 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -27,13 +32,18 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_columns_test.go",
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -41,6 +51,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,10 +53,16 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
if len(sc) == 0 {
|
||||
func (s *LazilyPersistentStore) Persist(current primitives.Slot, scg ...blocks.ROSidecar) error {
|
||||
if len(scg) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sc, err := blocks.BlobSidecarsFromSidecars(scg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
|
||||
200
beacon-chain/das/availability_columns.go
Normal file
200
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.DataColumnStorage, nodeID enode.ID, newDataColumnsVerifier verification.NewDataColumnsVerifier, custodyInfo *peerdas.CustodyInfo) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
custodyInfo: custodyInfo,
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first sidecar.
|
||||
firstSidecar := dataColumnSidecars[0]
|
||||
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := firstSidecar.BlockRoot()
|
||||
for _, sidecar := range dataColumnSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := dataColumnCacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
for i := range sidecars {
|
||||
if err := entry.stash(&dataColumnSidecars[i]); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := dataColumnCacheKey{slot: block.Block().Slot(), root: blockRoot}
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "filter")
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return errors.Wrap(err, "valid")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verified RO data columns - should never happen")
|
||||
}
|
||||
|
||||
if err := s.store.Save(verifiedRoDataColumns); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current spoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the groups count.
|
||||
custodyGroupCount := s.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
303
beacon-chain/das/availability_columns_test.go
Normal file
303
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,303 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, dataColumnParamsByBlockRoot verification.DataColumnsParamsByRoot) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
||||
}
|
||||
|
||||
return roSidecars, roDataColumns
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
{2}: {{ColumnIndex: 2}},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]verification.DataColumnParams{
|
||||
{}: {{ColumnIndex: 1}, {ColumnIndex: 5}},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := dataColumnCacheKey{slot: 0, root: [32]byte{}}
|
||||
entry := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[int]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
dataColumnsParams := make([]verification.DataColumnParams, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := verification.DataColumnParams{
|
||||
ColumnIndex: index,
|
||||
KzgCommitments: commitments,
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
dataColumnsParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: dataColumnsParams}
|
||||
_, verifiedRoDataColumns := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParamsByBlockRoot)
|
||||
|
||||
key := dataColumnCacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
@@ -116,9 +116,11 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: scs}
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
@@ -141,12 +143,14 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
scs[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
@@ -155,7 +159,10 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
@@ -163,10 +170,13 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
scs[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, scs[0]), errIndexOutOfBounds)
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
|
||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
||||
|
||||
_, more := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -10,12 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
var errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
|
||||
|
||||
// cacheKey includes the slot so that we can easily iterate through the cache and compare
|
||||
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
|
||||
|
||||
135
beacon-chain/das/data_column_cache.go
Normal file
135
beacon-chain/das/data_column_cache.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
|
||||
errColumnIndexTooHigh = errors.New("column index too high")
|
||||
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
|
||||
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
|
||||
)
|
||||
|
||||
// dataColumnCacheKey includes the slot so that we can easily iterate through the cache and compare
|
||||
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
|
||||
// the root+slot when interacting with the cache, so it isn't an inconvenience to use both.
|
||||
type dataColumnCacheKey struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
}
|
||||
|
||||
type dataColumnCache struct {
|
||||
entries map[dataColumnCacheKey]*dataColumnCacheEntry
|
||||
}
|
||||
|
||||
func newDataColumnCache() *dataColumnCache {
|
||||
return &dataColumnCache{entries: make(map[dataColumnCacheKey]*dataColumnCacheEntry)}
|
||||
}
|
||||
|
||||
// ensure returns the entry for the given key, creating it if it isn't already present.
|
||||
func (c *dataColumnCache) ensure(key dataColumnCacheKey) *dataColumnCacheEntry {
|
||||
entry, ok := c.entries[key]
|
||||
if !ok {
|
||||
entry = &dataColumnCacheEntry{}
|
||||
c.entries[key] = entry
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *dataColumnCache) delete(key dataColumnCacheKey) {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type dataColumnCacheEntry struct {
|
||||
scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.DataColumnStorageSummary
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) setDiskSummary(sum filesystem.DataColumnStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of DataColumnSidecars.
|
||||
// Only the first DataColumnSidecar of a given Index will be kept in the cache.
|
||||
// stash will return an error if the given data colunn is already in the cache, or if the Index is out of bounds.
|
||||
func (e *dataColumnCacheEntry) stash(sc *blocks.RODataColumn) error {
|
||||
if sc.Index >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", sc.Index)
|
||||
}
|
||||
|
||||
if e.scs[sc.Index] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitments)
|
||||
}
|
||||
|
||||
e.scs[sc.Index] = sc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) filter(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range nonEmptyIndices {
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(commitmentsArray[i], e.scs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.scs[i])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
124
beacon-chain/das/data_column_cache_test.go
Normal file
124
beacon-chain/das/data_column_cache_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
c := newDataColumnCache()
|
||||
key := dataColumnCacheKey{}
|
||||
entry := c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true})
|
||||
entry.setDiskSummary(diskSummary)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{diskSummary: diskSummary}, *entry)
|
||||
|
||||
c.delete(key)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
}
|
||||
|
||||
func TestStash(t *testing.T) {
|
||||
t.Run("Index too high", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 10_000}}}
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal and already existing", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
|
||||
|
||||
err = entry.stash(&roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterDataColumns(t *testing.T) {
|
||||
t.Run("All available", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("Some scs missing", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Commitments not equal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[1] = &roDataColumns[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
|
||||
dataColumnParamsByBlockRoot := verification.DataColumnsParamsByRoot{root: {{ColumnIndex: 3, KzgCommitments: [][]byte{[]byte{3}}}}}
|
||||
expected, _ := verification.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[3] = &expected[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
require.Equal(t, 2, s.count())
|
||||
}
|
||||
|
||||
func TestNonEmptyIndices(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{10}}, nil, [][]byte{[]byte{20}}}
|
||||
actual := s.nonEmptyIndices()
|
||||
require.DeepEqual(t, map[uint64]bool{1: true, 3: true}, actual)
|
||||
}
|
||||
@@ -14,6 +14,12 @@ import (
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
AvailabilityChecker
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
}
|
||||
|
||||
// AvailabilityChecker is the minimum interface needed to check if data is available for a block.
|
||||
// We should prefer this interface over AvailabilityStore in places where we don't need to persist blob data.
|
||||
type AvailabilityChecker interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
@@ -24,9 +25,13 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
|
||||
}
|
||||
|
||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(current, sc...)
|
||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
@@ -97,6 +98,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -44,11 +45,16 @@ var (
|
||||
GetPayloadMethodV3,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
GetBlobsV1,
|
||||
}
|
||||
electraEngineEndpoints = []string{
|
||||
NewPayloadMethodV4,
|
||||
GetPayloadMethodV4,
|
||||
}
|
||||
fuluEngineEndpoints = []string{
|
||||
GetPayloadMethodV5,
|
||||
GetBlobsV2,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -73,6 +79,8 @@ const (
|
||||
GetPayloadMethodV3 = "engine_getPayloadV3"
|
||||
// GetPayloadMethodV4 is the get payload method added for electra
|
||||
GetPayloadMethodV4 = "engine_getPayloadV4"
|
||||
// GetPayloadMethodV5 is the get payload method added for fulu
|
||||
GetPayloadMethodV5 = "engine_getPayloadV5"
|
||||
// BlockByHashMethod request string for JSON-RPC.
|
||||
BlockByHashMethod = "eth_getBlockByHash"
|
||||
// BlockByNumberMethod request string for JSON-RPC.
|
||||
@@ -85,11 +93,21 @@ const (
|
||||
ExchangeCapabilities = "engine_exchangeCapabilities"
|
||||
// GetBlobsV1 request string for JSON-RPC.
|
||||
GetBlobsV1 = "engine_getBlobsV1"
|
||||
// GetBlobsV2 request string for JSON-RPC.
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
// TODO: Remove temporarily needed hack since geth takes an input blobs txs with blobs proofs, and
|
||||
// does the heavy lifting of building cells proofs, while normally this is done by the tx sender.
|
||||
// This is a cool hack because it lets the CL to act as if the tx sender actually computed the cells proofs.
|
||||
// The only counter part is the `engine_getPayloadv<x>` takes a lot of time.
|
||||
// defaultEngineTimeout = time.Second
|
||||
defaultEngineTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
var errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
var (
|
||||
errInvalidPayloadBodyResponse = errors.New("engine api payload body response is invalid")
|
||||
errMissingBlobsAndProofsFromEL = errors.New("engine api payload body response is missing blobs and proofs")
|
||||
)
|
||||
|
||||
// ForkchoiceUpdatedResponse is the response kind received by the
|
||||
// engine_forkchoiceUpdatedV1 endpoint.
|
||||
@@ -108,6 +126,7 @@ type Reconstructor interface {
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -257,6 +276,9 @@ func (s *Service) ForkchoiceUpdated(
|
||||
|
||||
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
|
||||
pe := slots.ToEpoch(slot)
|
||||
if pe >= params.BeaconConfig().FuluForkEpoch {
|
||||
return GetPayloadMethodV5, &pb.ExecutionBundleFulu{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return GetPayloadMethodV4, &pb.ExecutionBundleElectra{}
|
||||
}
|
||||
@@ -289,7 +311,7 @@ func (s *Service) GetPayload(ctx context.Context, payloadId [8]byte, slot primit
|
||||
}
|
||||
res, err := blocks.NewGetPayloadResponse(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "new get payload response")
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -302,6 +324,9 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
if params.ElectraEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, electraEngineEndpoints...)
|
||||
}
|
||||
if params.FuluEnabled() {
|
||||
supportedEngineEndpoints = append(supportedEngineEndpoints, fuluEngineEndpoints...)
|
||||
}
|
||||
var result []string
|
||||
err := s.rpcClient.CallContext(ctx, &result, ExchangeCapabilities, supportedEngineEndpoints)
|
||||
if err != nil {
|
||||
@@ -495,9 +520,10 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProof, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobs")
|
||||
defer span.End()
|
||||
|
||||
// If the execution engine does not support `GetBlobsV1`, return early to prevent encountering an error later.
|
||||
if !s.capabilityCache.has(GetBlobsV1) {
|
||||
return nil, nil
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV1))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProof, len(versionedHashes))
|
||||
@@ -505,6 +531,19 @@ func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) (
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -615,6 +654,73 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
|
||||
// and constructs the corresponding verified read-only data column sidecars.
|
||||
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
block := signedROBlock.Block()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": block.Slot(),
|
||||
})
|
||||
|
||||
kzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Collect KZG hashes for all blobs
|
||||
var kzgHashes []common.Hash
|
||||
for _, commitment := range kzgCommitments {
|
||||
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
|
||||
}
|
||||
|
||||
// Fetch all blobsAndCellsProofs from EL
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, kzgHashes)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
log.Debug("No blobs returned from EL")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Extract the blobs and proofs from the blobAndProofV2s.
|
||||
blobs := make([][]byte, 0, len(blobAndProofV2s))
|
||||
cellProofs := make([][]byte, 0, len(blobAndProofV2s))
|
||||
for _, blobsAndProofs := range blobAndProofV2s {
|
||||
if blobsAndProofs == nil {
|
||||
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
|
||||
}
|
||||
blobs = append(blobs, blobsAndProofs.Blob)
|
||||
cellProofs = append(cellProofs, blobsAndProofs.KzgProofs...)
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
|
||||
}
|
||||
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
|
||||
}
|
||||
|
||||
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
log.Debug("Data columns successfully reconstructed from EL")
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
@@ -902,3 +1008,8 @@ func toBlockNumArg(number *big.Int) string {
|
||||
}
|
||||
return hexutil.EncodeBig(number)
|
||||
}
|
||||
|
||||
// wrapWithBlockRoot returns a new error with the given block root.
|
||||
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
|
||||
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
mocks "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
@@ -167,6 +168,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
@@ -317,11 +319,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundler.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundler.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundler.GetBlobs())
|
||||
})
|
||||
t.Run(GetPayloadMethodV4, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
@@ -372,11 +374,11 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, uint64(2), g)
|
||||
|
||||
commitments := [][]byte{bytesutil.PadTo([]byte("commitment1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("commitment2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundle.KzgCommitments)
|
||||
require.DeepEqual(t, commitments, resp.BlobsBundler.GetKzgCommitments())
|
||||
proofs := [][]byte{bytesutil.PadTo([]byte("proof1"), fieldparams.BLSPubkeyLength), bytesutil.PadTo([]byte("proof2"), fieldparams.BLSPubkeyLength)}
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundler.GetProofs())
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundler.GetBlobs())
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
@@ -405,7 +407,52 @@ func TestClient_HTTP(t *testing.T) {
|
||||
|
||||
require.DeepEqual(t, requests, resp.ExecutionRequests)
|
||||
})
|
||||
t.Run(GetPayloadMethodV5, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionBundleFulu"].(*pb.GetPayloadV5ResponseJson)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
reqArg, err := json.Marshal(pb.PayloadIDBytes(payloadId))
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
))
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
client := &Service{}
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
_, ok = resp.BlobsBundler.(*pb.BlobsBundleV2)
|
||||
if !ok {
|
||||
t.Logf("resp.BlobsBundler has unexpected type: %T", resp.BlobsBundler)
|
||||
}
|
||||
require.Equal(t, ok, true)
|
||||
})
|
||||
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
@@ -1539,6 +1586,7 @@ func fixtures() map[string]interface{} {
|
||||
"ExecutionPayloadCapellaWithValue": s.ExecutionPayloadWithValueCapella,
|
||||
"ExecutionPayloadDenebWithValue": s.ExecutionPayloadWithValueDeneb,
|
||||
"ExecutionBundleElectra": s.ExecutionBundleElectra,
|
||||
"ExecutionBundleFulu": s.ExecutionBundleFulu,
|
||||
"ValidPayloadStatus": s.ValidPayloadStatus,
|
||||
"InvalidBlockHashStatus": s.InvalidBlockHashStatus,
|
||||
"AcceptedStatus": s.AcceptedStatus,
|
||||
@@ -1774,6 +1822,36 @@ func fixturesStruct() *payloadFixtures {
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
}
|
||||
executionBundleFixtureFulu := &pb.GetPayloadV5ResponseJson{
|
||||
ShouldOverrideBuilder: true,
|
||||
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
|
||||
ParentHash: &common.Hash{'a'},
|
||||
FeeRecipient: &common.Address{'b'},
|
||||
StateRoot: &common.Hash{'c'},
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlockNumber: &hexUint,
|
||||
GasLimit: &hexUint,
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
BlobGasUsed: &bgu,
|
||||
ExcessBlobGas: &ebg,
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
BlobsBundle: &pb.BlobBundleV2JSON{
|
||||
Commitments: []hexutil.Bytes{[]byte("commitment1"), []byte("commitment2")},
|
||||
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
ExecutionRequests: []hexutil.Bytes{append([]byte{pb.DepositRequestType}, depositRequestBytes...),
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
miner := bytesutil.PadTo([]byte("miner"), fieldparams.FeeRecipientLength)
|
||||
@@ -1868,6 +1946,7 @@ func fixturesStruct() *payloadFixtures {
|
||||
ExecutionPayloadWithValueCapella: executionPayloadWithValueFixtureCapella,
|
||||
ExecutionPayloadWithValueDeneb: executionPayloadWithValueFixtureDeneb,
|
||||
ExecutionBundleElectra: executionBundleFixtureElectra,
|
||||
ExecutionBundleFulu: executionBundleFixtureFulu,
|
||||
ValidPayloadStatus: validStatus,
|
||||
InvalidBlockHashStatus: inValidBlockHashStatus,
|
||||
AcceptedStatus: acceptedStatus,
|
||||
@@ -1892,6 +1971,7 @@ type payloadFixtures struct {
|
||||
ExecutionPayloadWithValueCapella *pb.GetPayloadV2ResponseJson
|
||||
ExecutionPayloadWithValueDeneb *pb.GetPayloadV3ResponseJson
|
||||
ExecutionBundleElectra *pb.GetPayloadV4ResponseJson
|
||||
ExecutionBundleFulu *pb.GetPayloadV5ResponseJson
|
||||
ValidPayloadStatus *pb.PayloadStatus
|
||||
InvalidBlockHashStatus *pb.PayloadStatus
|
||||
AcceptedStatus *pb.PayloadStatus
|
||||
@@ -2424,7 +2504,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
t.Run("get-blobs end point is not supported", func(t *testing.T) {
|
||||
hi := mockSummary(t, []bool{true, true, true, true, true, false})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "engine_getBlobsV1 is not supported", err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
@@ -2476,6 +2556,76 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup right fork epoch
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
b := util.NewBeaconBlockFulu()
|
||||
b.Block.Slot = 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
kzgCommitments := createRandomKzgCommitments(t, 6)
|
||||
b.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, "get blobs V2 for block", err)
|
||||
})
|
||||
|
||||
t.Run("nothing received", func(t *testing.T) {
|
||||
srv := createBlobServerV2(t, 0, []bool{})
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("receiving all blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{true, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
|
||||
t.Run("missing some blobs", func(t *testing.T) {
|
||||
blobMasks := []bool{false, true, true, true, true, true}
|
||||
srv := createBlobServerV2(t, 6, blobMasks)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
|
||||
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
kzgCommitments := make([][]byte, num)
|
||||
for i := range kzgCommitments {
|
||||
@@ -2511,6 +2661,42 @@ func createBlobServer(t *testing.T, numBlobs int, callbackFuncs ...func()) *http
|
||||
}))
|
||||
}
|
||||
|
||||
func createBlobServerV2(t *testing.T, numBlobs int, blobMasks []bool) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
require.Equal(t, len(blobMasks), numBlobs)
|
||||
|
||||
blobAndCellProofs := make([]*pb.BlobAndProofV2Json, numBlobs)
|
||||
for i := range blobAndCellProofs {
|
||||
if !blobMasks[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
blobAndCellProofs[i] = &pb.BlobAndProofV2Json{
|
||||
Blob: []byte("0xblob"),
|
||||
KzgProofs: []hexutil.Bytes{},
|
||||
}
|
||||
for j := 0; j < int(params.BeaconConfig().NumberOfColumns); j++ {
|
||||
cellProof := make([]byte, 48)
|
||||
blobAndCellProofs[i].KzgProofs = append(blobAndCellProofs[i].KzgProofs, cellProof)
|
||||
}
|
||||
}
|
||||
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": blobAndCellProofs,
|
||||
}
|
||||
|
||||
err := json.NewEncoder(w).Encode(respJSON)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
}
|
||||
|
||||
func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, err := rpc.DialHTTP(url)
|
||||
require.NoError(t, err)
|
||||
@@ -2522,6 +2708,12 @@ func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Se
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func setupRpcClientV2(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, client := setupRpcClient(t, url, client)
|
||||
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV2: nil}}
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return &verification.MockBlobVerifier{
|
||||
|
||||
@@ -38,6 +38,8 @@ type EngineClient struct {
|
||||
ErrGetPayload error
|
||||
BlobSidecars []blocks.VerifiedROBlob
|
||||
ErrorBlobSidecars error
|
||||
DataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
ErrorDataColumnSidecars error
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -113,6 +115,10 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
func (e *EngineClient) ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
@@ -86,44 +87,47 @@ type serviceFlagOpts struct {
|
||||
// full PoS node. It handles the lifecycle of the entire system and registers
|
||||
// services to a service registry.
|
||||
type BeaconNode struct {
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *runtime.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
slasherDB db.SlasherDatabase
|
||||
attestationCache *cache.AttestationCache
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
depositCache cache.DepositCache
|
||||
trackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
payloadIDCache *cache.PayloadIDCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
BackfillOpts []backfill.ServiceOption
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
DataColumnStorageOptions []filesystem.DataColumnStorageOption
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
slasherEnabled bool
|
||||
lcStore *lightclient.Store
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -161,6 +165,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
syncChecker: &initialsync.SyncChecker{},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
slasherEnabled: cliCtx.Bool(flags.SlasherFlag.Name),
|
||||
lcStore: &lightclient.Store{},
|
||||
}
|
||||
@@ -190,6 +195,15 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.BlobStorage = blobs
|
||||
}
|
||||
|
||||
if beacon.DataColumnStorage == nil {
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(cliCtx.Context, beacon.DataColumnStorageOptions...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new data column storage")
|
||||
}
|
||||
|
||||
beacon.DataColumnStorage = dataColumnStorage
|
||||
}
|
||||
|
||||
bfs, err := startBaseServices(cliCtx, beacon, depositAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not start modules")
|
||||
@@ -202,6 +216,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.BackfillOpts,
|
||||
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
|
||||
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
|
||||
backfill.WithCustodyInfo(beacon.custodyInfo),
|
||||
)
|
||||
|
||||
if err := registerServices(cliCtx, beacon, synchronizer, bfs); err != nil {
|
||||
@@ -280,7 +295,9 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, errors.Wrap(err, "could not start DB")
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
@@ -491,6 +508,10 @@ func (b *BeaconNode) clearDB(clearDB, forceClearDB bool, d *kv.Store, dbPath str
|
||||
return nil, errors.Wrap(err, "could not clear blob storage")
|
||||
}
|
||||
|
||||
if err := b.DataColumnStorage.Clear(); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear data column storage")
|
||||
}
|
||||
|
||||
d, err = kv.NewKVStore(b.ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new database")
|
||||
@@ -697,6 +718,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -775,9 +797,11 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
blockchain.WithBlobStorage(b.BlobStorage),
|
||||
blockchain.WithDataColumnStorage(b.DataColumnStorage),
|
||||
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
blockchain.WithPayloadIDCache(b.payloadIDCache),
|
||||
blockchain.WithSyncChecker(b.syncChecker),
|
||||
blockchain.WithCustodyInfo(b.custodyInfo),
|
||||
blockchain.WithSlasherEnabled(b.slasherEnabled),
|
||||
blockchain.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
@@ -862,8 +886,11 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
regularsync.WithDataColumnStorage(b.DataColumnStorage),
|
||||
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
regularsync.WithAvailableBlocker(bFillStore),
|
||||
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
|
||||
regularsync.WithCustodyInfo(b.custodyInfo),
|
||||
regularsync.WithSlasherEnabled(b.slasherEnabled),
|
||||
regularsync.WithLightClientStore(b.lcStore),
|
||||
)
|
||||
@@ -889,6 +916,8 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
CustodyInfo: b.custodyInfo,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -983,6 +1012,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1010,6 +1040,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
@@ -1151,7 +1182,8 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
// TODO: Add backfill for data column storage
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.DataColumnStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
cmd.ValidatorMonitorIndicesFlag.Value.SetInt(1)
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
node, err := New(ctx, cancel, WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
|
||||
node.Close()
|
||||
@@ -75,7 +75,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
go func() {
|
||||
@@ -99,7 +99,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
node, err := New(ctx, cancel, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)))
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
node.Start()
|
||||
@@ -130,7 +130,7 @@ func TestClearDB(t *testing.T) {
|
||||
context, cancel := newCliContextWithCancel(&app, set)
|
||||
options := []Option{
|
||||
WithExecutionChainOptions([]execution.Option{execution.WithHttpEndpoint(endpoint)}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)), WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
}
|
||||
_, err = New(context, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -50,3 +50,20 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
|
||||
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorage = bs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
|
||||
// to be used when initializing data column storage.
|
||||
func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.DataColumnStorageOptions = append(bn.DataColumnStorageOptions, opt...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,7 +222,6 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
// Is our sync bitvector record up to date?
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
// Compare current epoch with the Fulu fork epoch.
|
||||
|
||||
@@ -42,7 +42,7 @@ func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
if flags.Get().MinimumSyncPeers < required {
|
||||
required = flags.Get().MinimumSyncPeers
|
||||
}
|
||||
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.fc.FinalizedCheckpoint().Epoch)
|
||||
_, peers := a.ps.BestFinalized(-1, a.fc.FinalizedCheckpoint().Epoch)
|
||||
if len(peers) < required {
|
||||
log.WithFields(logrus.Fields{
|
||||
"suitable": len(peers),
|
||||
@@ -52,27 +52,33 @@ func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
type AssignmentFilter func([]peer.ID) []peer.ID
|
||||
|
||||
// Assign uses the "BestFinalized" method to select the best peers that agree on a canonical block
|
||||
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
|
||||
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
|
||||
// the number of outbound requests to each peer from a given component.
|
||||
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
func (a *Assigner) Assign(filter AssignmentFilter) ([]peer.ID, error) {
|
||||
best, err := a.freshPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pickBest(busy, n, best), nil
|
||||
return filter(best), nil
|
||||
}
|
||||
|
||||
func pickBest(busy map[peer.ID]bool, n int, best []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0, n)
|
||||
for _, p := range best {
|
||||
if len(ps) == n {
|
||||
return ps
|
||||
}
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
// NotBusy is a filter that returns a list of peer.IDs with len() <= n, which are not in the `busy` map.
|
||||
// n == -1 will return all peers that are not busy.
|
||||
func NotBusy(busy map[peer.ID]bool, n int) AssignmentFilter {
|
||||
return func(peers []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0)
|
||||
for _, p := range peers {
|
||||
if n > 0 && len(ps) == n {
|
||||
return ps
|
||||
}
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
return ps
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
@@ -18,8 +18,9 @@ func TestPickBest(t *testing.T) {
|
||||
expected []peer.ID
|
||||
}{
|
||||
{
|
||||
name: "",
|
||||
n: 0,
|
||||
name: "don't limit",
|
||||
n: 0,
|
||||
expected: best,
|
||||
},
|
||||
{
|
||||
name: "none busy",
|
||||
@@ -88,7 +89,8 @@ func TestPickBest(t *testing.T) {
|
||||
if c.best == nil {
|
||||
c.best = best
|
||||
}
|
||||
pb := pickBest(c.busy, c.n, c.best)
|
||||
filt := NotBusy(c.busy, c.n)
|
||||
pb := filt(c.best)
|
||||
require.Equal(t, len(c.expected), len(pb))
|
||||
for i := range c.expected {
|
||||
require.Equal(t, c.expected[i], pb[i])
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Scorer = (*BadResponsesScorer)(nil)
|
||||
@@ -129,13 +128,14 @@ func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
// if peerData, ok := s.store.PeerData(pid); ok {
|
||||
// TODO: Remote this out of devnet
|
||||
// if peerData.BadResponses >= s.config.Threshold {
|
||||
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
// return nil
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package scorers_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
@@ -13,39 +12,41 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
// const pid = "peer1"
|
||||
|
||||
ctx := t.Context()
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 4,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
// scorer.Increment(pid)
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
@@ -137,56 +138,60 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
|
||||
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pid := peer.ID("peer1")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pid)
|
||||
// if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// } else {
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
for i := 0; i < len(pids); i++ {
|
||||
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
}
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pids[1])
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
return badPeers[i] < badPeers[j]
|
||||
})
|
||||
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
}
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
// for i := 0; i < len(pids); i++ {
|
||||
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
// }
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pids[1])
|
||||
// scorer.Increment(pids[2])
|
||||
// scorer.Increment(pids[4])
|
||||
// }
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
// want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
// badPeers := scorer.BadPeers()
|
||||
// sort.Slice(badPeers, func(i, j int) bool {
|
||||
// return badPeers[i] < badPeers[j]
|
||||
// })
|
||||
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
// }
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -211,99 +211,102 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestScorers_Service_loop(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_loop(t *testing.T) {
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
// defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 5,
|
||||
DecayInterval: 50 * time.Millisecond,
|
||||
},
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
DecayInterval: 25 * time.Millisecond,
|
||||
Decay: 64,
|
||||
},
|
||||
},
|
||||
})
|
||||
s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 5,
|
||||
// DecayInterval: 50 * time.Millisecond,
|
||||
// },
|
||||
// BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
// DecayInterval: 25 * time.Millisecond,
|
||||
// Decay: 64,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// s1 := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// s2 := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
pid1 := peer.ID("peer1")
|
||||
peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
// pid1 := peer.ID("peer1")
|
||||
// peerStatuses.Add(nil, pid1, nil, network.DirUnknown)
|
||||
// for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
// s1.Increment(pid1)
|
||||
// }
|
||||
// assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
// s2.IncrementProcessedBlocks("peer1", 221)
|
||||
// assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
|
||||
done := make(chan struct{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
done <- struct{}{}
|
||||
}()
|
||||
ticker := time.NewTicker(50 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Error("Timed out")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// done := make(chan struct{}, 1)
|
||||
// go func() {
|
||||
// defer func() {
|
||||
// done <- struct{}{}
|
||||
// }()
|
||||
// ticker := time.NewTicker(50 * time.Millisecond)
|
||||
// defer ticker.Stop()
|
||||
// for {
|
||||
// select {
|
||||
// case <-ticker.C:
|
||||
// if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
// return
|
||||
// }
|
||||
// case <-ctx.Done():
|
||||
// t.Error("Timed out")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
// }()
|
||||
|
||||
<-done
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
// <-done
|
||||
// assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
// assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
// }
|
||||
|
||||
func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// }
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
// peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 2,
|
||||
// DecayInterval: 50 * time.Second,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
// for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
// assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
// assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
// assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
// }
|
||||
|
||||
@@ -705,31 +705,47 @@ func (p *Status) deprecatedPrune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
|
||||
// upon by the majority of peers. This method may not return the absolute highest finalized, but
|
||||
// the finalized epoch in which most peers can serve blocks (plurality voting).
|
||||
// Ideally, all peers would be reporting the same finalized epoch but some may be behind due to their
|
||||
// own latency, or because of their finalized epoch at the time we queried them.
|
||||
// Returns epoch number and list of peers that are at or beyond that epoch.
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than `ourFinalizedEpoch`
|
||||
// that is agreed upon by the majority of peers, and the peers agreeing on this finalized epoch.
|
||||
// This method may not return the absolute highest finalized epoch, but the finalized epoch in which
|
||||
// most peers can serve blocks (plurality voting). Ideally, all peers would be reporting the same
|
||||
// finalized epoch but some may be behind due to their own latency, or because of their finalized
|
||||
// epoch at the time we queried them. Returns epoch number and list of peers that are at or beyond
|
||||
// that epoch.
|
||||
func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
|
||||
// key: finalized epoch, value: number of peers that support this finalized epoch.
|
||||
finalizedEpochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: finalized epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch >= ourFinalizedEpoch {
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
// Skip if the peer's finalized epoch is not defined, or if the peer's finalized epoch is
|
||||
// lower than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.FinalizedEpoch < ourFinalizedEpoch {
|
||||
continue
|
||||
}
|
||||
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which is the epoch most peers agree upon.
|
||||
var targetEpoch primitives.Epoch
|
||||
var mostVotes uint64
|
||||
// If there is a tie, select the highest epoch.
|
||||
targetEpoch, mostVotes := primitives.Epoch(0), uint64(0)
|
||||
for epoch, count := range finalizedEpochVotes {
|
||||
if count > mostVotes || (count == mostVotes && epoch > targetEpoch) {
|
||||
mostVotes = count
|
||||
@@ -737,11 +753,12 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort PIDs by finalized epoch, in decreasing order.
|
||||
// Sort PIDs by finalized (epoch, head), in decreasing order.
|
||||
sort.Slice(potentialPIDs, func(i, j int) bool {
|
||||
if pidEpoch[potentialPIDs[i]] == pidEpoch[potentialPIDs[j]] {
|
||||
return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
|
||||
}
|
||||
|
||||
return pidEpoch[potentialPIDs[i]] > pidEpoch[potentialPIDs[j]]
|
||||
})
|
||||
|
||||
@@ -754,7 +771,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
}
|
||||
|
||||
// Trim potential peers to at most maxPeers.
|
||||
if len(potentialPIDs) > maxPeers {
|
||||
if maxPeers > 0 && len(potentialPIDs) > maxPeers {
|
||||
potentialPIDs = potentialPIDs[:maxPeers]
|
||||
}
|
||||
|
||||
@@ -764,26 +781,42 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
// and is shared by at least minPeers.
|
||||
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
connected := p.Connected()
|
||||
|
||||
// Calculate our head slot.
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
ourHeadSlot := slotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
|
||||
// key: head epoch, value: number of peers that support this epoch.
|
||||
epochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: head epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
|
||||
ourHeadSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(ourHeadEpoch))
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.HeadSlot > ourHeadSlot {
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
// Skip if the peer's head epoch is not defined, or if the peer's head slot is
|
||||
// lower or equal than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.HeadSlot <= ourHeadSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
epoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
|
||||
epochVotes[epoch]++
|
||||
pidEpoch[pid] = epoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which has enough peers' votes (>= minPeers).
|
||||
var targetEpoch primitives.Epoch
|
||||
targetEpoch := primitives.Epoch(0)
|
||||
for epoch, votes := range epochVotes {
|
||||
if votes >= uint64(minPeers) && targetEpoch < epoch {
|
||||
targetEpoch = epoch
|
||||
@@ -1012,16 +1045,23 @@ func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "to ip")
|
||||
}
|
||||
// ip, err := manet.ToIP(peerData.Address)
|
||||
// if err != nil {
|
||||
// return errors.Wrap(err, "to ip")
|
||||
// }
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return errors.Errorf("collocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
}
|
||||
}
|
||||
// if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
// if val > CollocationLimit {
|
||||
// TODO: Remove this out of denvet.
|
||||
// return errors.Errorf("colocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
// log.WithFields(logrus.Fields{
|
||||
// "pid": pid,
|
||||
// "ip": ip.String(),
|
||||
// "colocationCount": val,
|
||||
// "colocationLimit": CollocationLimit,
|
||||
// }).Debug("Colocation limit exceeded. Peer should be banned.")
|
||||
// }
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package peers_test
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -329,55 +328,56 @@ func TestPeerWithNilChainState(t *testing.T) {
|
||||
require.Equal(t, resChainState, nothing)
|
||||
}
|
||||
|
||||
func TestPeerBadResponses(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerBadResponses(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
require.NoError(t, err)
|
||||
{
|
||||
_, err := id.MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// id, err := peer.Decode("16Uiu2HAkyWZ4Ni1TpvDS8dPxsozmHY85KaiFjodQuV6Tz5tkHVeR")
|
||||
// require.NoError(t, err)
|
||||
// {
|
||||
// _, err := id.MarshalBinary()
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
direction := network.DirInbound
|
||||
p.Add(new(enr.Record), id, address, direction)
|
||||
// address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
// require.NoError(t, err, "Failed to create address")
|
||||
// direction := network.DirInbound
|
||||
// p.Add(new(enr.Record), id, address, direction)
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
// resBadResponses, err := scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
// scorer.Increment(id)
|
||||
// resBadResponses, err = scorer.Count(id)
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
// assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
// }
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
@@ -496,100 +496,102 @@ func TestPeerValidTime(t *testing.T) {
|
||||
assert.Equal(t, numPeersConnected, len(p.Connected()), "Unexpected number of connected peers")
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPrune(t *testing.T) {
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
if i%7 == 0 {
|
||||
// Peer added as disconnected.
|
||||
_ = addPeer(t, p, peers.Disconnected)
|
||||
}
|
||||
// Peer added to peer handler.
|
||||
_ = addPeer(t, p, peers.Connected)
|
||||
}
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// if i%7 == 0 {
|
||||
// // Peer added as disconnected.
|
||||
// _ = addPeer(t, p, peers.PeerDisconnected)
|
||||
// }
|
||||
// // Peer added to peer handler.
|
||||
// _ = addPeer(t, p, peers.PeerConnected)
|
||||
// }
|
||||
|
||||
disPeers := p.Disconnected()
|
||||
firstPID := disPeers[0]
|
||||
secondPID := disPeers[1]
|
||||
thirdPID := disPeers[2]
|
||||
// disPeers := p.Disconnected()
|
||||
// firstPID := disPeers[0]
|
||||
// secondPID := disPeers[1]
|
||||
// thirdPID := disPeers[2]
|
||||
|
||||
scorer := p.Scorers().BadResponsesScorer()
|
||||
// scorer := p.Scorers().BadResponsesScorer()
|
||||
|
||||
// Make first peer a bad peer
|
||||
scorer.Increment(firstPID)
|
||||
scorer.Increment(firstPID)
|
||||
// // Make first peer a bad peer
|
||||
// scorer.Increment(firstPID)
|
||||
// scorer.Increment(firstPID)
|
||||
|
||||
// Add bad response for p2.
|
||||
scorer.Increment(secondPID)
|
||||
// // Add bad response for p2.
|
||||
// scorer.Increment(secondPID)
|
||||
|
||||
// Prune peers
|
||||
p.Prune()
|
||||
// // Prune peers
|
||||
// p.Prune()
|
||||
|
||||
// Bad peer is expected to still be kept in handler.
|
||||
badRes, err := scorer.Count(firstPID)
|
||||
assert.NoError(t, err, "error is supposed to be nil")
|
||||
assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
// // Bad peer is expected to still be kept in handler.
|
||||
// badRes, err := scorer.Count(firstPID)
|
||||
// assert.NoError(t, err, "error is supposed to be nil")
|
||||
// assert.Equal(t, 2, badRes, "Did not get expected amount")
|
||||
|
||||
// Not so good peer is pruned away so that we can reduce the
|
||||
// total size of the handler.
|
||||
_, err = scorer.Count(secondPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
// // Not so good peer is pruned away so that we can reduce the
|
||||
// // total size of the handler.
|
||||
// _, err = scorer.Count(secondPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
|
||||
// Last peer has been removed.
|
||||
_, err = scorer.Count(thirdPID)
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
}
|
||||
// // Last peer has been removed.
|
||||
// _, err = scorer.Count(thirdPID)
|
||||
// assert.ErrorContains(t, "peer unknown", err)
|
||||
// }
|
||||
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestPeerIPTracker(t *testing.T) {
|
||||
// resetCfg := features.InitWithReset(&features.Flags{
|
||||
// EnablePeerScorer: false,
|
||||
// })
|
||||
// defer resetCfg()
|
||||
// maxBadResponses := 2
|
||||
// p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: maxBadResponses,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.ConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
// badIP := "211.227.218.116"
|
||||
// var badPeers []peer.ID
|
||||
// for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
// port := strconv.Itoa(3000 + i)
|
||||
// addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
// }
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
// }
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.Disconnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
// // Add in bad peers, so that our records are trimmed out
|
||||
// // from the peer store.
|
||||
// for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// // Peer added to peer handler.
|
||||
// pid := addPeer(t, p, peers.PeerDisconnected)
|
||||
// p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
// }
|
||||
// p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
// for _, pr := range badPeers {
|
||||
// assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
|
||||
@@ -61,6 +61,12 @@ const LightClientFinalityUpdateName = "/light_client_finality_update"
|
||||
// LightClientOptimisticUpdateName is the name for the LightClientOptimisticUpdate topic.
|
||||
const LightClientOptimisticUpdateName = "/light_client_optimistic_update"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
const DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -92,6 +98,12 @@ const (
|
||||
RPCLightClientFinalityUpdateTopicV1 = protocolPrefix + LightClientFinalityUpdateName + SchemaVersionV1
|
||||
// RPCLightClientOptimisticUpdateTopicV1 is a topic for requesting a light client Optimistic update.
|
||||
RPCLightClientOptimisticUpdateTopicV1 = protocolPrefix + LightClientOptimisticUpdateName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1 - New in Fulu.
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
@@ -139,6 +151,11 @@ var RPCTopicMappings = map[string]interface{}{
|
||||
RPCLightClientUpdatesByRangeTopicV1: new(pb.LightClientUpdatesByRangeRequest),
|
||||
RPCLightClientFinalityUpdateTopicV1: new(interface{}),
|
||||
RPCLightClientOptimisticUpdateTopicV1: new(interface{}),
|
||||
|
||||
// DataColumnSidecarsByRange v1 Message
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -161,6 +178,8 @@ var messageMapping = map[string]bool{
|
||||
LightClientUpdatesByRangeName: true,
|
||||
LightClientFinalityUpdateName: true,
|
||||
LightClientOptimisticUpdateName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -358,48 +356,49 @@ func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.C
|
||||
return fd
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
peers *peers.Status
|
||||
info peer.AddrInfo
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "bad peer",
|
||||
peers: func() *peers.Status {
|
||||
ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
for i := 0; i < 10; i++ {
|
||||
ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
}
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "refused to connect to bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _, _ := createHost(t, 34567)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
host: h,
|
||||
peers: tt.peers,
|
||||
}
|
||||
err := s.connectWithPeer(ctx, tt.info)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestService_connectWithPeer(t *testing.T) {
|
||||
// params.SetupTestConfigCleanup(t)
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// peers *peers.Status
|
||||
// info peer.AddrInfo
|
||||
// wantErr string
|
||||
// }{
|
||||
// {
|
||||
// name: "bad peer",
|
||||
// peers: func() *peers.Status {
|
||||
// ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// for i := 0; i < 10; i++ {
|
||||
// ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
// }
|
||||
// return ps
|
||||
// }(),
|
||||
// info: peer.AddrInfo{ID: "bad"},
|
||||
// wantErr: "refused to connect to bad peer",
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// h, _, _ := createHost(t, 34567)
|
||||
// defer func() {
|
||||
// if err := h.Close(); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// }()
|
||||
// ctx := context.Background()
|
||||
// s := &Service{
|
||||
// host: h,
|
||||
// peers: tt.peers,
|
||||
// }
|
||||
// err := s.connectWithPeer(ctx, tt.info)
|
||||
// if len(tt.wantErr) > 0 {
|
||||
// require.ErrorContains(t, tt.wantErr, err)
|
||||
// } else {
|
||||
// require.NoError(t, err)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -9,10 +9,13 @@ var (
|
||||
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
|
||||
ErrGeneric = errors.New("internal service error")
|
||||
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
|
||||
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrMaxDataColumnReqExceeded = errors.New("requested more than MAX_REQUEST_DATA_COLUMN_SIDECARS")
|
||||
|
||||
ErrResourceUnavailable = errors.New("resource requested unavailable")
|
||||
)
|
||||
|
||||
@@ -206,8 +206,8 @@ func (s BlobSidecarsByRootReq) Swap(i, j int) {
|
||||
}
|
||||
|
||||
// Len is the number of elements in the collection.
|
||||
func (s BlobSidecarsByRootReq) Len() int {
|
||||
return len(s)
|
||||
func (s *BlobSidecarsByRootReq) Len() int {
|
||||
return len(*s)
|
||||
}
|
||||
|
||||
// ====================================
|
||||
|
||||
@@ -221,7 +221,6 @@ func generateDataColumnIdentifiers(n int) []*eth.DataColumnsByRootIdentifier {
|
||||
|
||||
func TestDataColumnSidecarsByRootReq_Marshal(t *testing.T) {
|
||||
/*
|
||||
|
||||
SSZ encoding of DataColumnsByRootIdentifiers is tested in spectests.
|
||||
However, encoding a list of DataColumnsByRootIdentifier is not.
|
||||
We are testing it here.
|
||||
|
||||
@@ -201,6 +201,11 @@ func ConvertPeerIDToNodeID(pid peer.ID) (enode.ID, error) {
|
||||
return [32]byte{}, errors.Wrap(err, "parse public key")
|
||||
}
|
||||
|
||||
newPubkey := &ecdsa.PublicKey{Curve: gCrypto.S256(), X: pubKeyObjSecp256k1.X(), Y: pubKeyObjSecp256k1.Y()}
|
||||
newPubkey := &ecdsa.PublicKey{
|
||||
Curve: gCrypto.S256(),
|
||||
X: pubKeyObjSecp256k1.X(),
|
||||
Y: pubKeyObjSecp256k1.Y(),
|
||||
}
|
||||
|
||||
return enode.PubkeyToIDV4(newPubkey), nil
|
||||
}
|
||||
|
||||
@@ -97,18 +97,19 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// parseIndices filters out invalid and duplicate blob indices
|
||||
func parseIndices(url *url.URL, s primitives.Slot) ([]uint64, error) {
|
||||
func parseIndices(url *url.URL, s primitives.Slot) ([]int, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s)
|
||||
rawIndices := url.Query()["indices"]
|
||||
indices := make([]uint64, 0, params.BeaconConfig().MaxBlobsPerBlock(s))
|
||||
indices := make([]int, 0, maxBlobsPerBlock)
|
||||
invalidIndices := make([]string, 0)
|
||||
loop:
|
||||
for _, raw := range rawIndices {
|
||||
ix, err := strconv.ParseUint(raw, 10, 64)
|
||||
ix, err := strconv.Atoi(raw)
|
||||
if err != nil {
|
||||
invalidIndices = append(invalidIndices, raw)
|
||||
continue
|
||||
}
|
||||
if ix >= uint64(params.BeaconConfig().MaxBlobsPerBlock(s)) {
|
||||
if !(0 <= ix && ix < maxBlobsPerBlock) {
|
||||
invalidIndices = append(invalidIndices, raw)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -520,13 +520,13 @@ func Test_parseIndices(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
query string
|
||||
want []uint64
|
||||
want []int
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "happy path with duplicate indices within bound and other query parameters ignored",
|
||||
query: "indices=1&indices=2&indices=1&indices=3&bar=bar",
|
||||
want: []uint64{1, 2, 3},
|
||||
want: []int{1, 2, 3},
|
||||
},
|
||||
{
|
||||
name: "out of bounds indices throws error",
|
||||
|
||||
@@ -10,11 +10,13 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -36,6 +38,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
@@ -47,12 +50,16 @@ go_test(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,12 +3,15 @@ package lookup
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -40,7 +43,7 @@ func (e BlockIdParseError) Error() string {
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
Blobs(ctx context.Context, id string, indices []uint64) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
Blobs(ctx context.Context, id string, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
}
|
||||
|
||||
// BeaconDbBlocker is an implementation of Blocker. It retrieves blocks from the beacon chain database.
|
||||
@@ -49,6 +52,7 @@ type BeaconDbBlocker struct {
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// Block returns the beacon block for a given identifier. The identifier can be one of:
|
||||
@@ -129,6 +133,137 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// blobsFromStoredBlobs retrieves blobs corresponding to `indices` and `root` from the store.
|
||||
// This function expects blobs to be stored directly (aka. no data columns).
|
||||
func (p *BeaconDbBlocker) blobsFromStoredBlobs(indices []int, root []byte, commitments [][]byte) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
sum := p.BlobStorage.Summary(bytesutil.ToBytes32(root))
|
||||
|
||||
if len(indices) == 0 {
|
||||
for index := range commitments {
|
||||
if sum.HasIndex(uint64(index)) {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, index := range indices {
|
||||
if uint64(index) >= sum.MaxBlobsForEpoch() {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", index, sum.MaxBlobsForEpoch()),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if !sum.HasIndex(uint64(index)) {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d not found", index),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
blobs := make([]*blocks.VerifiedROBlob, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
vblob, err := p.BlobStorage.Get(bytesutil.ToBytes32(root), uint64(index))
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
blobs = append(blobs, &vblob)
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
// blobsFromStoredDataColumns retrieves data columns from the store, reconstruct the whole matrix if needed, convert the matrix to blobs,
|
||||
// and then returns blobs corresponding to `indices` and `root` from the store,
|
||||
// This function expects data columns to be stored (aka. no blobs).
|
||||
// If not enough data columns are available to extract blobs from them (either directly or after reconstruction), an error is returned.
|
||||
func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indices []int, rootBytes []byte) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
root := bytesutil.ToBytes32(rootBytes)
|
||||
|
||||
// Use all indices if none are provided.
|
||||
if len(indices) == 0 {
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "could not retrieve blob commitments"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
for index := range commitments {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
}
|
||||
|
||||
// Count how many columns we have in the store.
|
||||
summary := p.DataColumnStorage.Summary(root)
|
||||
stored := summary.Stored()
|
||||
count := uint64(len(stored))
|
||||
|
||||
if count < peerdas.MinimumColumnsCountToReconstruct() {
|
||||
// There is no way to reconstruct the data columns.
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs. Please start the beacon node with the `--%s` flag to ensure this call to success, or retry later if it is already the case.", flags.SubscribeAllDataSubnets.Name),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve from the database needed data columns.
|
||||
verifiedRoDataColumnSidecars, err := p.neededDataColumnSidecars(root, stored)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "needed data column sidecars"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
verifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(block, verifiedRoDataColumnSidecars, indices)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "blobs from data columns"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedRoBlobSidecars, nil
|
||||
}
|
||||
|
||||
func (p *BeaconDbBlocker) neededDataColumnSidecars(root [fieldparams.RootLength]byte, stored map[uint64]bool) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if we have all the non-extended data columns.
|
||||
cellsPerBlob := fieldparams.CellsPerBlob
|
||||
blobIndices := make([]uint64, 0, cellsPerBlob)
|
||||
hasAllBlobColumns := true
|
||||
for i := range uint64(cellsPerBlob) {
|
||||
if !stored[i] {
|
||||
hasAllBlobColumns = false
|
||||
break
|
||||
}
|
||||
blobIndices = append(blobIndices, i)
|
||||
}
|
||||
|
||||
if hasAllBlobColumns {
|
||||
// Retrieve only the non-extended data columns.
|
||||
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, blobIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns storage get")
|
||||
}
|
||||
|
||||
return verifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// Retrieve all the data columns.
|
||||
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns storage get")
|
||||
}
|
||||
|
||||
return verifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// Blobs returns the blobs for a given block id identifier and blob indices. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
@@ -144,7 +279,7 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
// - block exists, has commitments, inside retention period (greater of protocol- or user-specified) serve then w/ 200 unless we hit an error reading them.
|
||||
// we are technically not supposed to import a block to forkchoice unless we have the blobs, so the nuance here is if we can't find the file and we are inside the protocol-defined retention period, then it's actually a 500.
|
||||
// - block exists, has commitments, outside retention period (greater of protocol- or user-specified) - ie just like block exists, no commitment
|
||||
func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
var rootSlice []byte
|
||||
switch id {
|
||||
case "genesis":
|
||||
@@ -212,64 +347,55 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
|
||||
root := bytesutil.ToBytes32(rootSlice)
|
||||
|
||||
b, err := p.BeaconDB.Block(ctx, root)
|
||||
roSignedBlock, err := p.BeaconDB.Block(ctx, root)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve block %#x from db", rootSlice), Reason: core.Internal}
|
||||
}
|
||||
if b == nil {
|
||||
|
||||
if roSignedBlock == nil {
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("block %#x not found in db", rootSlice), Reason: core.NotFound}
|
||||
}
|
||||
|
||||
// if block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
// If block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(roSignedBlock.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
commitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
roBlock := roSignedBlock.Block()
|
||||
|
||||
commitments, err := roBlock.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", rootSlice), Reason: core.Internal}
|
||||
}
|
||||
|
||||
// if there are no commitments return 200 w/ empty list
|
||||
if len(commitments) == 0 {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
sum := p.BlobStorage.Summary(root)
|
||||
// Get the slot of the block.
|
||||
blockSlot := roBlock.Slot()
|
||||
|
||||
if len(indices) == 0 {
|
||||
for i := range commitments {
|
||||
if sum.HasIndex(uint64(i)) {
|
||||
indices = append(indices, uint64(i))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, ix := range indices {
|
||||
if ix >= sum.MaxBlobsForEpoch() {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", ix, sum.MaxBlobsForEpoch()),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
if !sum.HasIndex(ix) {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d not found", ix),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Get the first peerDAS epoch.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
|
||||
blobs := make([]*blocks.VerifiedROBlob, len(indices))
|
||||
for i, index := range indices {
|
||||
vblob, err := p.BlobStorage.Get(root, index)
|
||||
// Compute the first peerDAS slot.
|
||||
fuluForkSlot := primitives.Slot(math.MaxUint64)
|
||||
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
|
||||
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", rootSlice, index),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate peerDAS start slot"), Reason: core.Internal}
|
||||
}
|
||||
blobs[i] = &vblob
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
if blockSlot >= fuluForkSlot {
|
||||
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, root)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to create roBlock with root %#x", root), Reason: core.Internal}
|
||||
}
|
||||
|
||||
return p.blobsFromStoredDataColumns(roBlock, indices, rootSlice)
|
||||
}
|
||||
|
||||
return p.blobsFromStoredBlobs(indices, rootSlice, commitments)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package lookup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
@@ -9,6 +12,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
@@ -17,12 +21,16 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpbalpha "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestGetBlock(t *testing.T) {
|
||||
@@ -51,7 +59,7 @@ func TestGetBlock(t *testing.T) {
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
fetcher := &BeaconDbBlocker{
|
||||
@@ -60,7 +68,7 @@ func TestGetBlock(t *testing.T) {
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
},
|
||||
}
|
||||
@@ -71,13 +79,13 @@ func TestGetBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBeaconBlock
|
||||
want *ethpb.SignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[30].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
@@ -87,7 +95,7 @@ func TestGetBlock(t *testing.T) {
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[30].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
@@ -97,12 +105,12 @@ func TestGetBlock(t *testing.T) {
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[64].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
@@ -117,7 +125,7 @@ func TestGetBlock(t *testing.T) {
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[20].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
@@ -127,7 +135,7 @@ func TestGetBlock(t *testing.T) {
|
||||
{
|
||||
name: "hex",
|
||||
blockID: []byte(hexutil.Encode(blkContainers[20].BlockRoot)),
|
||||
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[20].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
@@ -149,7 +157,7 @@ func TestGetBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
pb, err := result.Proto()
|
||||
require.NoError(t, err)
|
||||
pbBlock, ok := pb.(*ethpbalpha.SignedBeaconBlock)
|
||||
pbBlock, ok := pb.(*ethpb.SignedBeaconBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(pbBlock, tt.want) {
|
||||
t.Error("Expected blocks to equal")
|
||||
@@ -158,6 +166,118 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
for i := 0; i < len(blob); i += 32 {
|
||||
fieldElementBytes := getRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+32], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
func generateRandomBlocSignedBeaconBlockkAndVerifiedRoBlobs(t *testing.T, blobCount int) (interfaces.SignedBeaconBlock, []*blocks.VerifiedROBlob) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Generate random blobs and their corresponding commitments and proofs.
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
|
||||
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
// Create a random blob.
|
||||
blob := getRandBlob(int64(blobIndex))
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
// Generate a blobKZGCommitment for the blob.
|
||||
blobKZGCommitment, proof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
|
||||
blobKzgProofs = append(blobKzgProofs, proof)
|
||||
}
|
||||
|
||||
// Set the commitments into the block.
|
||||
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
|
||||
for _, blobKZGCommitment := range blobKzgCommitments {
|
||||
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
|
||||
}
|
||||
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
|
||||
|
||||
// Generate verified RO blobs.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
|
||||
require.NoError(t, err)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
blob := blobs[blobIndex]
|
||||
blobKZGCommitment := blobKzgCommitments[blobIndex]
|
||||
blobKzgProof := blobKzgProofs[blobIndex]
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: signedBeaconBlockHeader,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
return signedBeaconBlock, verifiedROBlobs
|
||||
}
|
||||
|
||||
func TestGetBlob(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
@@ -218,7 +338,7 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
@@ -232,7 +352,7 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
@@ -270,14 +390,14 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", []uint64{2})
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", []int{2})
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 1, len(verifiedBlobs))
|
||||
sidecar := verifiedBlobs[0].BlobSidecar
|
||||
@@ -289,7 +409,7 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
@@ -302,28 +422,28 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("no blob at index", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
noBlobIndex := uint64(len(blobs)) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []uint64{0, noBlobIndex})
|
||||
noBlobIndex := len(blobs) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, noBlobIndex})
|
||||
require.NotNil(t, rpcErr)
|
||||
assert.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
t.Run("index too big", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []uint64{0, math.MaxUint})
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, math.MaxInt})
|
||||
require.NotNil(t, rpcErr)
|
||||
assert.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
|
||||
@@ -109,6 +109,8 @@ func (ds *Server) getPeer(pid peer.ID) (*ethpb.DebugPeerResponse, error) {
|
||||
peerInfo.MetadataV0 = metadata.MetadataObjV0()
|
||||
case metadata.MetadataObjV1() != nil:
|
||||
peerInfo.MetadataV1 = metadata.MetadataObjV1()
|
||||
case metadata.MetadataObjV2() != nil:
|
||||
peerInfo.MetadataV2 = metadata.MetadataObjV2()
|
||||
}
|
||||
}
|
||||
addresses := peerStore.Addrs(pid)
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# gazelle:ignore
|
||||
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
@@ -36,6 +38,7 @@ go_library(
|
||||
"//api/client/builder:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
@@ -46,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -179,7 +183,6 @@ common_deps = [
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
]
|
||||
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
timeout = "moderate",
|
||||
|
||||
@@ -8,13 +8,18 @@ import (
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// constructGenericBeaconBlock constructs a `GenericBeaconBlock` based on the block version and other parameters.
|
||||
func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock, blobsBundle *enginev1.BlobsBundle, winningBid primitives.Wei) (*ethpb.GenericBeaconBlock, error) {
|
||||
func (vs *Server) constructGenericBeaconBlock(
|
||||
sBlk interfaces.SignedBeaconBlock,
|
||||
blobsBundler enginev1.BlobsBundler,
|
||||
winningBid primitives.Wei,
|
||||
) (*ethpb.GenericBeaconBlock, error) {
|
||||
if sBlk == nil || sBlk.Block() == nil {
|
||||
return nil, fmt.Errorf("block cannot be nil")
|
||||
return nil, errors.New("block cannot be nil")
|
||||
}
|
||||
|
||||
blockProto, err := sBlk.Block().Proto()
|
||||
@@ -34,12 +39,21 @@ func (vs *Server) constructGenericBeaconBlock(sBlk interfaces.SignedBeaconBlock,
|
||||
return vs.constructBellatrixBlock(blockProto, isBlinded, bidStr), nil
|
||||
case version.Capella:
|
||||
return vs.constructCapellaBlock(blockProto, isBlinded, bidStr), nil
|
||||
case version.Deneb:
|
||||
return vs.constructDenebBlock(blockProto, isBlinded, bidStr, blobsBundle), nil
|
||||
case version.Electra:
|
||||
return vs.constructElectraBlock(blockProto, isBlinded, bidStr, blobsBundle), nil
|
||||
case version.Deneb, version.Electra:
|
||||
bundle, ok := blobsBundler.(*enginev1.BlobsBundle)
|
||||
if blobsBundler != nil && !ok {
|
||||
return nil, fmt.Errorf("expected *BlobsBundler, got %T", blobsBundler)
|
||||
}
|
||||
if sBlk.Version() == version.Deneb {
|
||||
return vs.constructDenebBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
}
|
||||
return vs.constructElectraBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
case version.Fulu:
|
||||
return vs.constructFuluBlock(blockProto, isBlinded, bidStr, blobsBundle), nil
|
||||
bundle, ok := blobsBundler.(*enginev1.BlobsBundleV2)
|
||||
if blobsBundler != nil && !ok {
|
||||
return nil, fmt.Errorf("expected *BlobsBundleV2, got %T", blobsBundler)
|
||||
}
|
||||
return vs.constructFuluBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown block version: %d", sBlk.Version())
|
||||
}
|
||||
@@ -92,7 +106,7 @@ func (vs *Server) constructElectraBlock(blockProto proto.Message, isBlinded bool
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Electra{Electra: electraContents}, IsBlinded: false, PayloadValue: payloadValue}
|
||||
}
|
||||
|
||||
func (vs *Server) constructFuluBlock(blockProto proto.Message, isBlinded bool, payloadValue string, bundle *enginev1.BlobsBundle) *ethpb.GenericBeaconBlock {
|
||||
func (vs *Server) constructFuluBlock(blockProto proto.Message, isBlinded bool, payloadValue string, bundle *enginev1.BlobsBundleV2) *ethpb.GenericBeaconBlock {
|
||||
if isBlinded {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedFulu{BlindedFulu: blockProto.(*ethpb.BlindedBeaconBlockFulu)}, IsBlinded: true, PayloadValue: payloadValue}
|
||||
}
|
||||
|
||||
@@ -29,12 +29,19 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r1, err := eb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
result, err := vs.constructGenericBeaconBlock(b, nil, primitives.ZeroWei())
|
||||
bundle := &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{{1, 2, 3}},
|
||||
Proofs: [][]byte{{4, 5, 6}},
|
||||
Blobs: [][]byte{{7, 8, 9}},
|
||||
}
|
||||
result, err := vs.constructGenericBeaconBlock(b, bundle, primitives.ZeroWei())
|
||||
require.NoError(t, err)
|
||||
r2, err := result.GetFulu().Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, r1, r2)
|
||||
require.Equal(t, result.IsBlinded, false)
|
||||
require.DeepEqual(t, bundle.Blobs, result.GetFulu().GetBlobs())
|
||||
require.DeepEqual(t, bundle.Proofs, result.GetFulu().GetKzgProofs())
|
||||
})
|
||||
|
||||
// Test for Electra version
|
||||
|
||||
@@ -15,9 +15,13 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -58,28 +62,31 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert slot to time")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
}).Info("Begin building block")
|
||||
|
||||
log := log.WithField("slot", req.Slot)
|
||||
log.WithField("sinceSlotStartTime", time.Since(t)).Info("Begin building block")
|
||||
|
||||
// A syncing validator should not produce a block.
|
||||
if vs.SyncChecker.Syncing() {
|
||||
log.Error("Fail to build block: node is syncing")
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
log.WithError(err).Error("Fail to build block: node is optimistic")
|
||||
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
head, parentRoot, err := vs.getParentState(ctx, req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get parent state")
|
||||
return nil, err
|
||||
}
|
||||
sBlk, err := getEmptyBlock(req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get empty block")
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
|
||||
}
|
||||
// Set slot, graffiti, randao reveal, and parent root.
|
||||
@@ -91,6 +98,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
// Set proposer index.
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, head)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not calculate proposer index")
|
||||
return nil, fmt.Errorf("could not calculate proposer index %w", err)
|
||||
}
|
||||
sBlk.SetProposerIndex(idx)
|
||||
@@ -101,7 +109,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
|
||||
resp, err := vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor)
|
||||
log := log.WithFields(logrus.Fields{
|
||||
log = log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
"validator": sBlk.Block().ProposerIndex(),
|
||||
@@ -232,7 +240,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
}()
|
||||
|
||||
winningBid := primitives.ZeroWei()
|
||||
var bundle *enginev1.BlobsBundle
|
||||
var bundle enginev1.BlobsBundler
|
||||
if sBlk.Version() >= version.Bellatrix {
|
||||
local, err := vs.getLocalPayload(ctx, sBlk.Block(), head)
|
||||
if err != nil {
|
||||
@@ -274,7 +282,13 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// ProposeBeaconBlock handles the proposal of beacon blocks.
|
||||
// TODO: Add tests
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -286,12 +300,12 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
|
||||
}
|
||||
isPeerDASEnabled := coreTime.PeerDASIsActive(block.Block().Slot())
|
||||
|
||||
var sidecars []*ethpb.BlobSidecar
|
||||
if block.IsBlinded() {
|
||||
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
block, blobSidecars, dataColumnSideCars, err = vs.handleBlindedBlock(ctx, block, isPeerDASEnabled)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
|
||||
blobSidecars, dataColumnSideCars, err = vs.handleUnblindedBlock(block, req, isPeerDASEnabled)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
@@ -302,9 +316,10 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
|
||||
}
|
||||
|
||||
slot := block.Block().Slot()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -315,8 +330,14 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
if isPeerDASEnabled {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root, slot); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive data columns: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@@ -328,46 +349,75 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock, isPeerDASEnabled bool) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
return nil, nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
}
|
||||
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return nil, nil, errors.New("unconfigured block builder")
|
||||
return nil, nil, nil, errors.New("unconfigured block builder")
|
||||
}
|
||||
|
||||
copiedBlock, err := block.Copy()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, errors.Wrap(err, "block copy")
|
||||
}
|
||||
|
||||
payload, bundle, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, block)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "submit blinded block failed")
|
||||
return nil, nil, nil, errors.Wrap(err, "submit blinded block")
|
||||
}
|
||||
|
||||
if err := copiedBlock.Unblind(payload); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unblind failed")
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind")
|
||||
}
|
||||
|
||||
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
if isPeerDASEnabled {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, bundle.GetBlobs(), bundle.GetProofs())
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return copiedBlock, nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind blobs sidecars: commitment value doesn't match block")
|
||||
}
|
||||
|
||||
return copiedBlock, sidecars, nil
|
||||
return copiedBlock, blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
isPeerDASEnabled bool,
|
||||
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
|
||||
if isPeerDASEnabled {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -383,7 +433,7 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
}
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
|
||||
eg, eCtx := errgroup.WithContext(ctx)
|
||||
for i, sc := range sidecars {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
@@ -412,6 +462,69 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
sidecars []*ethpb.DataColumnSidecar,
|
||||
root [fieldparams.RootLength]byte,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
dataColumnsWithholdCount := features.Get().DataColumnsWithholdCount
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, sd := range sidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sd, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
sidecar := sd
|
||||
eg.Go(func() error {
|
||||
if sidecar.Index < dataColumnsWithholdCount {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"index": sidecar.Index,
|
||||
}).Warning("Withholding data column")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumn(root, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
|
||||
@@ -54,7 +54,7 @@ const blockBuilderTimeout = 1 * time.Second
|
||||
const gasLimitAdjustmentFactor = 1024
|
||||
|
||||
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
|
||||
func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, local *blocks.GetPayloadResponse, bid builder.Bid, builderBoostFactor primitives.Gwei) (primitives.Wei, *enginev1.BlobsBundle, error) {
|
||||
func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, local *blocks.GetPayloadResponse, bid builder.Bid, builderBoostFactor primitives.Gwei) (primitives.Wei, enginev1.BlobsBundler, error) {
|
||||
_, span := trace.StartSpan(ctx, "ProposerServer.setExecutionData")
|
||||
defer span.End()
|
||||
|
||||
@@ -69,13 +69,13 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
|
||||
// Use local payload if builder payload is nil.
|
||||
if bid == nil {
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
builderPayload, err := bid.Header()
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to retrieve header from BuilderBid")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -84,7 +84,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Warn("Proposer: failed to match withdrawals root")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
// Compare payload values between local and builder. Default to the local value if it is higher.
|
||||
@@ -97,7 +97,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
"minBuilderBid": minBid,
|
||||
"builderGweiValue": builderValueGwei,
|
||||
}).Warn("Proposer: using local execution payload because min bid not attained")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
// Use local block if min difference is not attained
|
||||
@@ -108,7 +108,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
"minBidDiff": minDiff,
|
||||
"builderGweiValue": builderValueGwei,
|
||||
}).Warn("Proposer: using local execution payload because min difference with local value was not attained")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
}
|
||||
|
||||
// Use builder payload if the following in true:
|
||||
@@ -133,7 +133,7 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
bidDeneb, ok := bid.(builder.BidDeneb)
|
||||
if !ok {
|
||||
log.Warnf("bid type %T does not implement builder.BidDeneb", bid)
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
builderKzgCommitments = bidDeneb.BlobKzgCommitments()
|
||||
}
|
||||
@@ -144,14 +144,14 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
bidElectra, ok := bid.(builder.BidElectra)
|
||||
if !ok {
|
||||
log.Warnf("bid type %T does not implement builder.BidElectra", bid)
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
executionRequests = bidElectra.ExecutionRequests()
|
||||
}
|
||||
}
|
||||
if err := setBuilderExecution(blk, builderPayload, builderKzgCommitments, executionRequests); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
return bid.Value(), nil, nil
|
||||
}
|
||||
@@ -171,11 +171,11 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
trace.Int64Attribute("builderGweiValue", int64(builderValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
|
||||
trace.Int64Attribute("builderBoostFactor", int64(builderBoostFactor)), // lint:ignore uintcast -- This is OK for tracing.
|
||||
)
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
default: // Bellatrix case.
|
||||
if err := setBuilderExecution(blk, builderPayload, nil, nil); err != nil {
|
||||
log.WithError(err).Warn("Proposer: failed to set builder payload")
|
||||
return local.Bid, local.BlobsBundle, setLocalExecution(blk, local)
|
||||
return local.Bid, local.BlobsBundler, setLocalExecution(blk, local)
|
||||
} else {
|
||||
return bid.Value(), nil, nil
|
||||
}
|
||||
@@ -375,8 +375,8 @@ func matchingWithdrawalsRoot(local, builder interfaces.ExecutionData) (bool, err
|
||||
// It delegates to setExecution for the actual work.
|
||||
func setLocalExecution(blk interfaces.SignedBeaconBlock, local *blocks.GetPayloadResponse) error {
|
||||
var kzgCommitments [][]byte
|
||||
if local.BlobsBundle != nil {
|
||||
kzgCommitments = local.BlobsBundle.KzgCommitments
|
||||
if local.BlobsBundler != nil {
|
||||
kzgCommitments = local.BlobsBundler.GetKzgCommitments()
|
||||
}
|
||||
if local.ExecutionRequests != nil {
|
||||
if err := blk.SetExecutionRequests(local.ExecutionRequests); err != nil {
|
||||
|
||||
@@ -520,7 +520,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
PayloadIDBytes: id,
|
||||
GetPayloadResponse: &blocks.GetPayloadResponse{
|
||||
ExecutionData: ed,
|
||||
BlobsBundle: blobsBundle,
|
||||
BlobsBundler: blobsBundle,
|
||||
Bid: primitives.ZeroWei(),
|
||||
},
|
||||
}
|
||||
@@ -528,7 +528,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
res, err := vs.getLocalPayload(ctx, blk.Block(), capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), res.ExecutionData.BlockNumber())
|
||||
require.DeepEqual(t, res.BlobsBundle, blobsBundle)
|
||||
require.DeepEqual(t, res.BlobsBundler, blobsBundle)
|
||||
})
|
||||
t.Run("Can get builder payload and blobs in Deneb", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -529,7 +529,7 @@ func TestServer_GetBeaconBlock_Deneb(t *testing.T) {
|
||||
PayloadIDBytes: &enginev1.PayloadIDBytes{1},
|
||||
GetPayloadResponse: &blocks.GetPayloadResponse{
|
||||
ExecutionData: ed,
|
||||
BlobsBundle: bundle,
|
||||
BlobsBundler: bundle,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ type Server struct {
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
|
||||
@@ -89,6 +89,7 @@ type Config struct {
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
@@ -120,6 +121,7 @@ type Config struct {
|
||||
Router *http.ServeMux
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
LCStore *lightClient.Store
|
||||
@@ -196,6 +198,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlobStorage: s.cfg.BlobStorage,
|
||||
DataColumnStorage: s.cfg.DataColumnStorage,
|
||||
}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
|
||||
coreService := &core.Service{
|
||||
@@ -236,6 +239,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
P2P: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
BlobReceiver: s.cfg.BlobReceiver,
|
||||
DataColumnReceiver: s.cfg.DataColumnReceiver,
|
||||
MockEth1Votes: s.cfg.MockEth1Votes,
|
||||
Eth1BlockFetcher: s.cfg.ExecutionChainService,
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
|
||||
@@ -36,6 +36,6 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
// Blobs --
|
||||
func (m *MockBlocker) Blobs(_ context.Context, _ string, _ []uint64) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
panic("implement me") // lint:nopanic -- Test code.
|
||||
func (*MockBlocker) Blobs(_ context.Context, _ string, _ []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
return nil, &core.RpcError{}
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
|
||||
defer span.End()
|
||||
|
||||
if s.isFinalizedRoot(blockRoot) {
|
||||
finalizedState := s.finalizedState()
|
||||
finalizedState := s.FinalizedState()
|
||||
if finalizedState != nil {
|
||||
return finalizedState, nil
|
||||
}
|
||||
@@ -297,7 +297,7 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
|
||||
|
||||
// Does the state exist in finalized info cache.
|
||||
if s.isFinalizedRoot(parentRoot) {
|
||||
return s.finalizedState(), nil
|
||||
return s.FinalizedState(), nil
|
||||
}
|
||||
|
||||
// Does the state exist in epoch boundary cache.
|
||||
|
||||
@@ -196,7 +196,7 @@ func (s *State) isFinalizedRoot(r [32]byte) bool {
|
||||
}
|
||||
|
||||
// Returns the cached and copied finalized state.
|
||||
func (s *State) finalizedState() state.BeaconState {
|
||||
func (s *State) FinalizedState() state.BeaconState {
|
||||
s.finalizedInfo.lock.RLock()
|
||||
defer s.finalizedInfo.lock.RUnlock()
|
||||
return s.finalizedInfo.state.Copy()
|
||||
|
||||
@@ -33,5 +33,5 @@ func TestResume(t *testing.T) {
|
||||
require.DeepSSZEqual(t, beaconState.ToProtoUnsafe(), resumeState.ToProtoUnsafe())
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch, service.finalizedInfo.slot, "Did not get watned slot")
|
||||
assert.Equal(t, service.finalizedInfo.root, root, "Did not get wanted root")
|
||||
assert.NotNil(t, service.finalizedState(), "Wanted a non nil finalized state")
|
||||
assert.NotNil(t, service.FinalizedState(), "Wanted a non nil finalized state")
|
||||
}
|
||||
|
||||
@@ -7,6 +7,10 @@ go_library(
|
||||
"block_batcher.go",
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"data_column_assignment.go",
|
||||
"data_columns.go",
|
||||
"data_columns_reconstruct.go",
|
||||
"data_columns_sampling.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
@@ -25,6 +29,8 @@ go_library(
|
||||
"rpc_blob_sidecars_by_range.go",
|
||||
"rpc_blob_sidecars_by_root.go",
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_range.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_light_client.go",
|
||||
"rpc_metadata.go",
|
||||
@@ -38,6 +44,7 @@ go_library(
|
||||
"subscriber_beacon_blocks.go",
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_light_client.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
@@ -55,6 +62,7 @@ go_library(
|
||||
"validate_sync_committee_message.go",
|
||||
"validate_sync_contribution_proof.go",
|
||||
"validate_voluntary_exit.go",
|
||||
"validators_custody.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/sync",
|
||||
visibility = [
|
||||
@@ -76,7 +84,9 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -130,6 +140,7 @@ go_library(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
@@ -153,13 +164,15 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"batch_verifier_test.go",
|
||||
"blobs_test.go",
|
||||
"block_batcher_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"data_columns_sampling_test.go",
|
||||
"data_columns_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
@@ -197,19 +210,23 @@ go_test(
|
||||
"validate_sync_committee_message_test.go",
|
||||
"validate_sync_contribution_proof_test.go",
|
||||
"validate_voluntary_exit_test.go",
|
||||
"validators_custody_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 4,
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -247,6 +264,7 @@ go_test(
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/ecdsa:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/equality:go_default_library",
|
||||
@@ -261,13 +279,17 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
|
||||
@@ -6,6 +6,8 @@ go_library(
|
||||
"batch.go",
|
||||
"batcher.go",
|
||||
"blobs.go",
|
||||
"columns.go",
|
||||
"fulu_transition.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"pool.go",
|
||||
@@ -18,6 +20,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -41,6 +44,7 @@ go_library(
|
||||
"//runtime:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -67,6 +71,7 @@ go_test(
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -38,8 +37,10 @@ func (s batchState) String() string {
|
||||
return "import_complete"
|
||||
case batchEndSequence:
|
||||
return "end_sequence"
|
||||
case batchBlobSync:
|
||||
return "blob_sync"
|
||||
case batchSyncBlobs:
|
||||
return "sync_blobs"
|
||||
case batchSyncColumns:
|
||||
return "sync_columns"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
@@ -50,7 +51,9 @@ const (
|
||||
batchInit
|
||||
batchSequenced
|
||||
batchErrRetryable
|
||||
batchBlobSync
|
||||
batchErrFatal
|
||||
batchSyncBlobs
|
||||
batchSyncColumns
|
||||
batchImportable
|
||||
batchImportComplete
|
||||
batchEndSequence
|
||||
@@ -72,9 +75,12 @@ type batch struct {
|
||||
err error
|
||||
state batchState
|
||||
busy peer.ID
|
||||
nextReqCols []uint64
|
||||
blockPid peer.ID
|
||||
blobPid peer.ID
|
||||
columnPid peer.ID
|
||||
bs *blobSync
|
||||
cs *columnSync
|
||||
}
|
||||
|
||||
func (b batch) logFields() logrus.Fields {
|
||||
@@ -93,6 +99,9 @@ func (b batch) logFields() logrus.Fields {
|
||||
if b.retries > 0 {
|
||||
f["retryAfter"] = b.retryAfter.String()
|
||||
}
|
||||
if b.state == batchSyncColumns {
|
||||
f["nextColumns"] = fmt.Sprintf("%v", b.nextReqCols)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
@@ -136,22 +145,29 @@ func (b batch) blobRequest() *eth.BlobSidecarsByRangeRequest {
|
||||
}
|
||||
}
|
||||
|
||||
func (b batch) withResults(results verifiedROBlocks, bs *blobSync) batch {
|
||||
func (b batch) postBlockSync(results verifiedROBlocks, bs *blobSync, cs *columnSync) batch {
|
||||
b.results = results
|
||||
b.bs = bs
|
||||
b.cs = cs
|
||||
if bs.blobsNeeded() > 0 {
|
||||
return b.withState(batchBlobSync)
|
||||
return b.withState(batchSyncBlobs)
|
||||
}
|
||||
if len(cs.columnsNeeded()) > 0 {
|
||||
return b.withState(batchSyncColumns)
|
||||
}
|
||||
return b.withState(batchImportable)
|
||||
}
|
||||
|
||||
func (b batch) postBlobSync() batch {
|
||||
func (b batch) postSidecarSync() batch {
|
||||
if b.blobsNeeded() > 0 {
|
||||
log.WithFields(b.logFields()).WithField("blobsMissing", b.blobsNeeded()).Error("Batch still missing blobs after downloading from peer")
|
||||
b.bs = nil
|
||||
b.results = []blocks.ROBlock{}
|
||||
return b.withState(batchErrRetryable)
|
||||
}
|
||||
if len(b.cs.columnsNeeded()) > 0 {
|
||||
return b.withState(batchSyncColumns)
|
||||
}
|
||||
return b.withState(batchImportable)
|
||||
}
|
||||
|
||||
@@ -187,6 +203,11 @@ func (b batch) withRetryableError(err error) batch {
|
||||
return b.withState(batchErrRetryable)
|
||||
}
|
||||
|
||||
func (b batch) withFatalError(err error) batch {
|
||||
b.err = errors.Wrap(err, "fatal erorr in batch")
|
||||
return b.withState(batchErrFatal)
|
||||
}
|
||||
|
||||
func (b batch) blobsNeeded() int {
|
||||
return b.bs.blobsNeeded()
|
||||
}
|
||||
@@ -195,8 +216,8 @@ func (b batch) blobResponseValidator() sync.BlobResponseValidation {
|
||||
return b.bs.validateNext
|
||||
}
|
||||
|
||||
func (b batch) availabilityStore() das.AvailabilityStore {
|
||||
return b.bs.store
|
||||
func (b batch) validatingColumnRequest() *validatingColumnRequest {
|
||||
return b.cs.newValidatingColumnRequest(b.nextReqCols)
|
||||
}
|
||||
|
||||
var batchBlockUntil = func(ctx context.Context, untilRetry time.Duration, b batch) error {
|
||||
@@ -223,6 +244,21 @@ func (b batch) waitUntilReady(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b batch) workComplete() bool {
|
||||
if b.state == batchImportable {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b batch) selectPeer(matrix *sync.ColumnPeerRank, busy map[peer.ID]bool) (peer.ID, []uint64, error) {
|
||||
if b.state == batchSyncColumns {
|
||||
return matrix.HighestForIndices(b.cs.columnsNeeded(), busy)
|
||||
}
|
||||
peer, err := matrix.Lowest(busy)
|
||||
return peer, nil, err
|
||||
}
|
||||
|
||||
func sortBatchDesc(bb []batch) {
|
||||
sort.Slice(bb, func(i, j int) bool {
|
||||
return bb[i].end > bb[j].end
|
||||
|
||||
@@ -90,7 +90,10 @@ func (bs *blobSync) validateNext(rb blocks.ROBlob) error {
|
||||
if err := v.SidecarKzgProofVerified(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.store.Persist(bs.current, rb); err != nil {
|
||||
|
||||
sc := blocks.NewSidecarFromBlobSidecar(rb)
|
||||
|
||||
if err := bs.store.Persist(bs.current, sc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
250
beacon-chain/sync/backfill/columns.go
Normal file
250
beacon-chain/sync/backfill/columns.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidResponseOrder = errors.New("out of order DataColumnSidecar response")
|
||||
errColumnResponseSlotOutOfRange = errors.New("slot out of range for DataColumnSidecar response")
|
||||
errColumnIndexNotRequested = errors.New("index in DataColumnSidecar response not requested")
|
||||
)
|
||||
|
||||
type columnBatch struct {
|
||||
first primitives.Slot
|
||||
last primitives.Slot
|
||||
custodyRequirement peerdas.ColumnIndices
|
||||
blockColumnsByRoot map[[32]byte]*blockColumns
|
||||
peerRank *sync.ColumnPeerRank
|
||||
}
|
||||
|
||||
type blockColumns struct {
|
||||
remaining peerdas.ColumnIndices
|
||||
commitments [][]byte
|
||||
}
|
||||
|
||||
func (cs *columnBatch) needed() peerdas.ColumnIndices {
|
||||
if len(cs.custodyRequirement) == 0 {
|
||||
return nil
|
||||
}
|
||||
search := peerdas.CopyTrueIndices(cs.custodyRequirement)
|
||||
ci := make(peerdas.ColumnIndices, len(search))
|
||||
// avoid iterating every single block+index by only searching for indices
|
||||
// we haven't found yet.
|
||||
for _, v := range cs.blockColumnsByRoot {
|
||||
for col := range search {
|
||||
if v.remaining[col] {
|
||||
ci[col] = true
|
||||
// We found the column, so we can delete it from the search.
|
||||
delete(search, col)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
type columnSync struct {
|
||||
*columnBatch
|
||||
store das.AvailabilityStore
|
||||
current primitives.Slot
|
||||
}
|
||||
|
||||
func newColumnSync(b batch, blks verifiedROBlocks, current primitives.Slot, p p2p.P2P, vbs verifiedROBlocks, cfg *workerCfg) (*columnSync, error) {
|
||||
cb, err := buildColumnBatch(b, blks, p.NodeID(), cfg.custodyInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cb == nil {
|
||||
return &columnSync{}, nil
|
||||
}
|
||||
return &columnSync{
|
||||
columnBatch: cb,
|
||||
current: current,
|
||||
store: das.NewLazilyPersistentStoreColumn(cfg.cfs, p.NodeID(), cfg.ndcv, cfg.custodyInfo),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cs *columnSync) blockColumns(root [32]byte) *blockColumns {
|
||||
if cs.columnBatch == nil {
|
||||
return nil
|
||||
}
|
||||
return cs.columnBatch.blockColumnsByRoot[root]
|
||||
}
|
||||
|
||||
func (cs *columnSync) columnsNeeded() peerdas.ColumnIndices {
|
||||
if cs.columnBatch == nil {
|
||||
return nil
|
||||
}
|
||||
return cs.columnBatch.needed()
|
||||
}
|
||||
|
||||
func (cs *columnSync) request(reqCols []uint64) *ethpb.DataColumnSidecarsByRangeRequest {
|
||||
return sync.DataColumnSidecarsByRangeRequest(reqCols, cs.first, cs.last)
|
||||
}
|
||||
|
||||
func (cs *columnSync) newValidatingColumnRequest(cols []uint64) *validatingColumnRequest {
|
||||
req := cs.request(cols)
|
||||
if req == nil {
|
||||
return nil
|
||||
}
|
||||
return &validatingColumnRequest{
|
||||
req: req,
|
||||
columns: peerdas.ColumnIndicesFromSlice(cols),
|
||||
cs: cs,
|
||||
}
|
||||
}
|
||||
|
||||
type validatingColumnRequest struct {
|
||||
last primitives.Slot
|
||||
req *ethpb.DataColumnSidecarsByRangeRequest
|
||||
columns map[uint64]bool
|
||||
cs *columnSync
|
||||
}
|
||||
|
||||
func (v *validatingColumnRequest) validate(cd blocks.RODataColumn) bool {
|
||||
return recordColumnSidecarDownload(cd, v.countedValidation(cd))
|
||||
}
|
||||
|
||||
func recordColumnSidecarDownload(cd blocks.RODataColumn, valid bool) bool {
|
||||
validity := "invalid"
|
||||
if valid {
|
||||
validity = "valid"
|
||||
}
|
||||
backfillDataColumnSidecarDownloaded.WithLabelValues(fmt.Sprintf("%d", cd.Index), validity).Inc()
|
||||
backfillBytesDataColumnSidecar.Add(float64(cd.SizeSSZ()))
|
||||
return valid
|
||||
}
|
||||
|
||||
// When we call Persist we'll get the verification checks that are provided by the availability store.
|
||||
// In addition to those checks this function calls rpcValidity which maintains a state machine across
|
||||
// response values to ensure that the response is valid in the context of the overall request,
|
||||
// like making sure that the block roots is one of the ones we expect based on the blocks we used to
|
||||
// construct the request. It also does cheap sanity checks on the DataColumnSidecar values like
|
||||
// ensuring that the commitments line up with the block.
|
||||
func (v *validatingColumnRequest) countedValidation(cd blocks.RODataColumn) bool {
|
||||
if err := v.rpcValidity(cd); err != nil {
|
||||
log.WithError(err).WithField("slot", cd.Slot()).WithField("index", cd.Index).Error("invalid data column sidecar response")
|
||||
return false
|
||||
}
|
||||
root := cd.BlockRoot()
|
||||
expected := v.cs.blockColumns(root)
|
||||
if expected == nil {
|
||||
return false
|
||||
}
|
||||
// We don't need this column, but we trust the column state machine verified we asked for it as part of a range request.
|
||||
// So we can just skip over it and not try to persist it.
|
||||
if !expected.remaining[cd.Index] {
|
||||
return true
|
||||
}
|
||||
if len(cd.KzgCommitments) != len(expected.commitments) {
|
||||
log.WithField("slot", cd.Slot()).WithField("index", cd.Index).Error("unexpected number of commitments in data column sidecar")
|
||||
return false
|
||||
}
|
||||
for i, cmt := range cd.KzgCommitments {
|
||||
if !bytes.Equal(cmt, expected.commitments[i]) {
|
||||
log.WithField("slot", cd.Slot()).WithField("index", cd.Index).WithField("cmtIndex", i).Error("commitment in data column sidecar does not match expected commitment")
|
||||
return false
|
||||
}
|
||||
}
|
||||
if err := v.cs.store.Persist(v.cs.current, blocks.NewSidecarFromDataColumnSidecar(cd)); err != nil {
|
||||
log.WithError(err).Error("failed to persist data column")
|
||||
return false
|
||||
}
|
||||
delete(expected.remaining, cd.Index)
|
||||
return true
|
||||
}
|
||||
|
||||
// rpcValidity checks that the individual DataColumnSidecar value is valid in the context of the overall response
|
||||
// respecting the p2p spec rules for DataColumnSidecarByRange responses:
|
||||
// - values are in the requsted slot range
|
||||
// - values are in slot order
|
||||
// - block roots are canonical wrt the blocks we believe are canonical
|
||||
// (assuming previous block response from another peer was honest)
|
||||
// - there are not too many values in the response
|
||||
// - the column index is one of the requested columns
|
||||
func (v *validatingColumnRequest) rpcValidity(col blocks.RODataColumn) error {
|
||||
slot := col.Slot()
|
||||
if v.last > slot {
|
||||
return errInvalidResponseOrder
|
||||
}
|
||||
if slot < v.req.StartSlot {
|
||||
return errors.Wrap(errColumnResponseSlotOutOfRange, "sidecar slot before request start")
|
||||
}
|
||||
if slot >= v.req.StartSlot+primitives.Slot(v.req.Count) {
|
||||
return errors.Wrap(errColumnResponseSlotOutOfRange, "sidecar slot after request end")
|
||||
}
|
||||
// This is an important check because we may have already satisfied this column for a given
|
||||
// block root but still requested it for the benefit of other blocks in the batch. So this check ensures
|
||||
// that it was part ofthe overall batch request.
|
||||
if !v.columns[col.Index] {
|
||||
return errColumnIndexNotRequested
|
||||
}
|
||||
v.last = col.Slot()
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildColumnBatch(b batch, fuluBlocks verifiedROBlocks, nodeID enode.ID, ci *peerdas.CustodyInfo) (*columnBatch, error) {
|
||||
if len(fuluBlocks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
fuluStart := params.BeaconConfig().FuluForkEpoch
|
||||
// If the batch end slot or last result block are pre-fulu, so are the rest.
|
||||
if slots.ToEpoch(b.end) < fuluStart || slots.ToEpoch(fuluBlocks[len(fuluBlocks)-1].Block().Slot()) < fuluStart {
|
||||
return nil, nil
|
||||
}
|
||||
// The last block in the batch is in fulu, but the first one is not.
|
||||
// Find the index of the first fulu block to exclude the pre-fulu blocks.
|
||||
if slots.ToEpoch(fuluBlocks[0].Block().Slot()) < fuluStart {
|
||||
fuluStart := sort.Search(len(fuluBlocks), func(i int) bool {
|
||||
return slots.ToEpoch(fuluBlocks[i].Block().Slot()) >= fuluStart
|
||||
})
|
||||
fuluBlocks = fuluBlocks[fuluStart:]
|
||||
}
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupSamplingSize := ci.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupSamplingSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
indices := peerdas.CopyTrueIndices(peerInfo.CustodyColumns)
|
||||
|
||||
summary := &columnBatch{
|
||||
custodyRequirement: indices,
|
||||
blockColumnsByRoot: make(map[[32]byte]*blockColumns, len(fuluBlocks)),
|
||||
}
|
||||
for _, b := range fuluBlocks {
|
||||
cmts, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get blob kzg commitments")
|
||||
}
|
||||
if len(cmts) == 0 {
|
||||
continue
|
||||
}
|
||||
slot := b.Block().Slot()
|
||||
if len(summary.blockColumnsByRoot) == 0 {
|
||||
summary.first = slot
|
||||
}
|
||||
summary.blockColumnsByRoot[b.Root()] = &blockColumns{
|
||||
remaining: peerdas.CopyTrueIndices(indices),
|
||||
commitments: cmts,
|
||||
}
|
||||
summary.last = slot
|
||||
}
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
54
beacon-chain/sync/backfill/fulu_transition.go
Normal file
54
beacon-chain/sync/backfill/fulu_transition.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var errAvailabilityCheckerInvalid = errors.New("invalid availability checker state")
|
||||
|
||||
type multiStore struct {
|
||||
fuluStart primitives.Slot
|
||||
columnStore das.AvailabilityStore
|
||||
blobStore das.AvailabilityStore
|
||||
}
|
||||
|
||||
// Persist implements das.AvailabilityStore.
|
||||
var _ das.AvailabilityChecker = &multiStore{}
|
||||
|
||||
// IsDataAvailable implements the das.AvailabilityStore interface.
|
||||
func (m *multiStore) IsDataAvailable(ctx context.Context, current primitives.Slot, blk blocks.ROBlock) error {
|
||||
if blk.Block().Slot() < m.fuluStart {
|
||||
return m.checkAvailabilityWithFallback(ctx, m.blobStore, current, blk)
|
||||
}
|
||||
return m.checkAvailabilityWithFallback(ctx, m.columnStore, current, blk)
|
||||
}
|
||||
|
||||
func (m *multiStore) checkAvailabilityWithFallback(ctx context.Context, ac das.AvailabilityChecker, current primitives.Slot, blk blocks.ROBlock) error {
|
||||
if ac != nil {
|
||||
return ac.IsDataAvailable(ctx, current, blk)
|
||||
}
|
||||
cmts, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(cmts) > 0 {
|
||||
return errAvailabilityCheckerInvalid
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newMultiStore(fuluStart primitives.Slot, b batch) *multiStore {
|
||||
s := &multiStore{fuluStart: fuluStart}
|
||||
if b.bs != nil && b.bs.store != nil {
|
||||
s.blobStore = b.bs.store
|
||||
}
|
||||
if b.cs != nil && b.cs.store != nil {
|
||||
s.columnStore = b.cs.store
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -92,6 +92,33 @@ var (
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
backfillBatchTimeDownloadingColumns = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_columns_time_download",
|
||||
Help: "Time, in milliseconds, batch spent downloading DataColumnSidecars from peer.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
backfillBatchTimeVerifyingColumns = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_columns_time_verify",
|
||||
Help: "Time, in milliseconds, batch spent verifying DataColumnSidecars.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
backfillBytesDataColumnSidecar = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_data_column_sidecar_bytes_downloaded",
|
||||
Help: "DataColumnSidecar bytes downloaded from peers for backfill.",
|
||||
},
|
||||
)
|
||||
backfillDataColumnSidecarDownloaded = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_data_column_sidecar_downloaded",
|
||||
Help: "Number of DataColumnSidecar values downloaded from peers for backfill.",
|
||||
},
|
||||
[]string{"index", "validity"},
|
||||
)
|
||||
)
|
||||
|
||||
func blobValidationMetrics(_ blocks.ROBlob) error {
|
||||
|
||||
@@ -2,22 +2,18 @@ package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type batchWorkerPool interface {
|
||||
spawn(ctx context.Context, n int, clock *startup.Clock, a PeerAssigner, v *verifier, cm sync.ContextByteVersions, blobVerifier verification.NewBlobVerifier, bfs *filesystem.BlobStorage)
|
||||
spawn(ctx context.Context, n int, a PeerAssigner, cfg *workerCfg)
|
||||
todo(b batch)
|
||||
complete() (batch, error)
|
||||
}
|
||||
@@ -26,11 +22,11 @@ type worker interface {
|
||||
run(context.Context)
|
||||
}
|
||||
|
||||
type newWorker func(id workerId, in, out chan batch, c *startup.Clock, v *verifier, cm sync.ContextByteVersions, nbv verification.NewBlobVerifier, bfs *filesystem.BlobStorage) worker
|
||||
type newWorker func(id workerId, in, out chan batch, cfg *workerCfg) worker
|
||||
|
||||
func defaultNewWorker(p p2p.P2P) newWorker {
|
||||
return func(id workerId, in, out chan batch, c *startup.Clock, v *verifier, cm sync.ContextByteVersions, nbv verification.NewBlobVerifier, bfs *filesystem.BlobStorage) worker {
|
||||
return newP2pWorker(id, p, in, out, c, v, cm, nbv, bfs)
|
||||
return func(id workerId, in, out chan batch, cfg *workerCfg) worker {
|
||||
return newP2pWorker(id, p, in, out, cfg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +41,8 @@ type p2pBatchWorkerPool struct {
|
||||
endSeq []batch
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
p2p p2p.P2P
|
||||
earliest primitives.Slot
|
||||
}
|
||||
|
||||
var _ batchWorkerPool = &p2pBatchWorkerPool{}
|
||||
@@ -59,14 +57,15 @@ func newP2PBatchWorkerPool(p p2p.P2P, maxBatches int) *p2pBatchWorkerPool {
|
||||
fromWorkers: make(chan batch),
|
||||
maxBatches: maxBatches,
|
||||
shutdownErr: make(chan error),
|
||||
p2p: p,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) spawn(ctx context.Context, n int, c *startup.Clock, a PeerAssigner, v *verifier, cm sync.ContextByteVersions, nbv verification.NewBlobVerifier, bfs *filesystem.BlobStorage) {
|
||||
func (p *p2pBatchWorkerPool) spawn(ctx context.Context, n int, a PeerAssigner, cfg *workerCfg) {
|
||||
p.ctx, p.cancel = context.WithCancel(ctx)
|
||||
go p.batchRouter(a)
|
||||
for i := 0; i < n; i++ {
|
||||
go p.newWorker(workerId(i), p.toWorkers, p.fromWorkers, c, v, cm, nbv, bfs).run(p.ctx)
|
||||
go p.newWorker(workerId(i), p.toWorkers, p.fromWorkers, cfg).run(p.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +102,6 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
busy := make(map[peer.ID]bool)
|
||||
todo := make([]batch, 0)
|
||||
rt := time.NewTicker(time.Second)
|
||||
earliest := primitives.Slot(math.MaxUint64)
|
||||
for {
|
||||
select {
|
||||
case b := <-p.toRouter:
|
||||
@@ -116,50 +114,72 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
// to retry failed assignments.
|
||||
case b := <-p.fromWorkers:
|
||||
pid := b.busy
|
||||
busy[pid] = false
|
||||
if b.state == batchBlobSync {
|
||||
todo = append(todo, b)
|
||||
sortBatchDesc(todo)
|
||||
} else {
|
||||
delete(busy, pid)
|
||||
if b.workComplete() {
|
||||
p.fromRouter <- b
|
||||
break
|
||||
}
|
||||
todo = append(todo, b)
|
||||
sortBatchDesc(todo)
|
||||
case <-p.ctx.Done():
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
p.shutdown(p.ctx.Err())
|
||||
return
|
||||
}
|
||||
if len(todo) == 0 {
|
||||
continue
|
||||
}
|
||||
// Try to assign as many outstanding batches as possible to peers and feed the assigned batches to workers.
|
||||
assigned, err := pa.Assign(busy, len(todo))
|
||||
var err error
|
||||
todo, err = p.processTodo(todo, pa, busy)
|
||||
if err != nil {
|
||||
if errors.Is(err, peers.ErrInsufficientSuitable) {
|
||||
// Transient error resulting from insufficient number of connected peers. Leave batches in
|
||||
// queue and get to them whenever the peer situation is resolved.
|
||||
continue
|
||||
}
|
||||
p.shutdown(err)
|
||||
return
|
||||
}
|
||||
for _, pid := range assigned {
|
||||
if err := todo[0].waitUntilReady(p.ctx); err != nil {
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
p.shutdown(p.ctx.Err())
|
||||
return
|
||||
}
|
||||
busy[pid] = true
|
||||
todo[0].busy = pid
|
||||
p.toWorkers <- todo[0].withPeer(pid)
|
||||
if todo[0].begin < earliest {
|
||||
earliest = todo[0].begin
|
||||
oldestBatch.Set(float64(earliest))
|
||||
}
|
||||
todo = todo[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) processTodo(todo []batch, pa PeerAssigner, busy map[peer.ID]bool) ([]batch, error) {
|
||||
if len(todo) == 0 {
|
||||
return todo, nil
|
||||
}
|
||||
notBusy, err := pa.Assign(peers.NotBusy(busy, -1))
|
||||
if err != nil {
|
||||
if errors.Is(err, peers.ErrInsufficientSuitable) {
|
||||
// Transient error resulting from insufficient number of connected peers. Leave batches in
|
||||
// queue and get to them whenever the peer situation is resolved.
|
||||
return todo, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
peerRank, err := sync.ComputeColumnPeerRank(notBusy, p.p2p)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to compute column matrix for peer assignment")
|
||||
return todo, nil
|
||||
}
|
||||
if len(notBusy) == 0 {
|
||||
log.Warn("No suitable peers available for batch assignment")
|
||||
return todo, nil
|
||||
}
|
||||
for i, b := range todo {
|
||||
pid, cols, err := b.selectPeer(peerRank, busy)
|
||||
if err != nil {
|
||||
log.WithField("not_busy", len(notBusy)).WithError(err).WithFields(b.logFields()).Error("Failed to select peer for batch")
|
||||
// Return the remaining todo items and allow the outer loop to control when we try again.
|
||||
return todo[i:], nil
|
||||
}
|
||||
busy[pid] = true
|
||||
b.busy = pid
|
||||
b.nextReqCols = cols
|
||||
p.toWorkers <- b.withPeer(pid)
|
||||
p.updateEarliest(b.begin)
|
||||
}
|
||||
return []batch{}, nil
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) updateEarliest(current primitives.Slot) {
|
||||
if current >= p.earliest {
|
||||
return
|
||||
}
|
||||
p.earliest = current
|
||||
oldestBatch.Set(float64(p.earliest))
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) shutdown(err error) {
|
||||
p.cancel()
|
||||
p.shutdownErr <- err
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
@@ -24,7 +25,7 @@ type mockAssigner struct {
|
||||
|
||||
// Assign satisfies the PeerAssigner interface so that mockAssigner can be used in tests
|
||||
// in place of the concrete p2p implementation of PeerAssigner.
|
||||
func (m mockAssigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
func (m mockAssigner) Assign(filter peers.AssignmentFilter) ([]peer.ID, error) {
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
@@ -53,7 +54,8 @@ func TestPoolDetectAllEnded(t *testing.T) {
|
||||
ctxMap, err := sync.ContextByteVersionsForValRoot(bytesutil.ToBytes32(st.GenesisValidatorsRoot()))
|
||||
require.NoError(t, err)
|
||||
bfs := filesystem.NewEphemeralBlobStorage(t)
|
||||
pool.spawn(ctx, nw, startup.NewClock(time.Now(), [32]byte{}), ma, v, ctxMap, mockNewBlobVerifier, bfs)
|
||||
wcfg := &workerCfg{c: startup.NewClock(time.Now(), [32]byte{}), nbv: mockNewBlobVerifier, v: v, cm: ctxMap, bfs: bfs}
|
||||
pool.spawn(ctx, nw, ma, wcfg)
|
||||
br := batcher{min: 10, size: 10}
|
||||
endSeq := br.before(0)
|
||||
require.Equal(t, batchEndSequence, endSeq.state)
|
||||
@@ -72,7 +74,7 @@ type mockPool struct {
|
||||
todoChan chan batch
|
||||
}
|
||||
|
||||
func (m *mockPool) spawn(_ context.Context, _ int, _ *startup.Clock, _ PeerAssigner, _ *verifier, _ sync.ContextByteVersions, _ verification.NewBlobVerifier, _ *filesystem.BlobStorage) {
|
||||
func (m *mockPool) spawn(_ context.Context, _ int, _ PeerAssigner, _ *workerCfg) {
|
||||
}
|
||||
|
||||
func (m *mockPool) todo(b batch) {
|
||||
|
||||
@@ -4,11 +4,14 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -32,14 +35,16 @@ type Service struct {
|
||||
batchSeq *batchSequencer
|
||||
batchSize uint64
|
||||
pool batchWorkerPool
|
||||
verifier *verifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
p2p p2p.P2P
|
||||
pa PeerAssigner
|
||||
batchImporter batchImporter
|
||||
blobStore *filesystem.BlobStorage
|
||||
dcStore *filesystem.DataColumnStorage
|
||||
initSyncWaiter func() error
|
||||
complete chan struct{}
|
||||
workerCfg *workerCfg
|
||||
fuluStart primitives.Slot
|
||||
}
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
@@ -48,23 +53,13 @@ var _ runtime.Service = (*Service)(nil)
|
||||
// to service an RPC blockRequest. The Assign method takes a map of peers that should be excluded,
|
||||
// allowing the caller to avoid making multiple concurrent requests to the same peer.
|
||||
type PeerAssigner interface {
|
||||
Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error)
|
||||
//Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error)
|
||||
Assign(filter peers.AssignmentFilter) ([]peer.ID, error)
|
||||
}
|
||||
|
||||
type minimumSlotter func(primitives.Slot) primitives.Slot
|
||||
type batchImporter func(ctx context.Context, current primitives.Slot, b batch, su *Store) (*dbval.BackfillStatus, error)
|
||||
|
||||
func defaultBatchImporter(ctx context.Context, current primitives.Slot, b batch, su *Store) (*dbval.BackfillStatus, error) {
|
||||
status := su.status()
|
||||
if err := b.ensureParent(bytesutil.ToBytes32(status.LowParentRoot)); err != nil {
|
||||
return status, err
|
||||
}
|
||||
// Import blocks to db and update db state to reflect the newly imported blocks.
|
||||
// Other parts of the beacon node may use the same StatusUpdater instance
|
||||
// via the coverage.AvailableBlocker interface to safely determine if a given slot has been backfilled.
|
||||
return su.fillBack(ctx, current, b.results, b.availabilityStore())
|
||||
}
|
||||
|
||||
// ServiceOption represents a functional option for the backfill service constructor.
|
||||
type ServiceOption func(*Service) error
|
||||
|
||||
@@ -137,48 +132,43 @@ func WithMinimumSlot(s primitives.Slot) ServiceOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithCustodyInfo(custodyInfo *peerdas.CustodyInfo) ServiceOption {
|
||||
return func(s *Service) error {
|
||||
if s.workerCfg == nil {
|
||||
s.workerCfg = &workerCfg{}
|
||||
}
|
||||
s.workerCfg.custodyInfo = custodyInfo
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewService initializes the backfill Service. Like all implementations of the Service interface,
|
||||
// the service won't begin its runloop until Start() is called.
|
||||
func NewService(ctx context.Context, su *Store, bStore *filesystem.BlobStorage, cw startup.ClockWaiter, p p2p.P2P, pa PeerAssigner, opts ...ServiceOption) (*Service, error) {
|
||||
func NewService(ctx context.Context, su *Store, bStore *filesystem.BlobStorage, dcStore *filesystem.DataColumnStorage, cw startup.ClockWaiter, p p2p.P2P, pa PeerAssigner, opts ...ServiceOption) (*Service, error) {
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
store: su,
|
||||
blobStore: bStore,
|
||||
cw: cw,
|
||||
ms: minimumBackfillSlot,
|
||||
p2p: p,
|
||||
pa: pa,
|
||||
batchImporter: defaultBatchImporter,
|
||||
complete: make(chan struct{}),
|
||||
ctx: ctx,
|
||||
store: su,
|
||||
blobStore: bStore,
|
||||
dcStore: dcStore,
|
||||
cw: cw,
|
||||
ms: minimumBackfillSlot,
|
||||
p2p: p,
|
||||
pa: pa,
|
||||
complete: make(chan struct{}),
|
||||
fuluStart: slots.SafeEpochStartOrMax(params.BeaconConfig().FuluForkEpoch),
|
||||
}
|
||||
s.batchImporter = s.defaultBatchImporter
|
||||
for _, o := range opts {
|
||||
if err := o(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
s.pool = newP2PBatchWorkerPool(p, s.nWorkers)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Service) initVerifier(ctx context.Context) (*verifier, sync.ContextByteVersions, error) {
|
||||
cps, err := s.store.originState(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
keys, err := cps.PublicKeys()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to retrieve public keys for all validators in the origin state")
|
||||
}
|
||||
vr := cps.GenesisValidatorsRoot()
|
||||
ctxMap, err := sync.ContextByteVersionsForValRoot(bytesutil.ToBytes32(vr))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root %#x", vr)
|
||||
}
|
||||
v, err := newBackfillVerifier(vr, keys)
|
||||
return v, ctxMap, err
|
||||
}
|
||||
|
||||
func (s *Service) updateComplete() bool {
|
||||
b, err := s.pool.complete()
|
||||
if err != nil {
|
||||
@@ -229,6 +219,18 @@ func (s *Service) importBatches(ctx context.Context) {
|
||||
backfillRemainingBatches.Set(float64(nt))
|
||||
}
|
||||
|
||||
func (s *Service) defaultBatchImporter(ctx context.Context, current primitives.Slot, b batch, su *Store) (*dbval.BackfillStatus, error) {
|
||||
status := su.status()
|
||||
if err := b.ensureParent(bytesutil.ToBytes32(status.LowParentRoot)); err != nil {
|
||||
return status, err
|
||||
}
|
||||
// Import blocks to db and update db state to reflect the newly imported blocks.
|
||||
// Other parts of the beacon node may use the same StatusUpdater instance
|
||||
// via the coverage.AvailableBlocker interface to safely determine if a given slot has been backfilled.
|
||||
|
||||
return su.fillBack(ctx, current, b.results, newMultiStore(s.fuluStart, b))
|
||||
}
|
||||
|
||||
func (s *Service) scheduleTodos() {
|
||||
batches, err := s.batchSeq.sequence()
|
||||
if err != nil {
|
||||
@@ -260,25 +262,19 @@ func (s *Service) Start() {
|
||||
log.Info("Backfill service is shutting down")
|
||||
cancel()
|
||||
}()
|
||||
clock, err := s.cw.WaitForClock(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Backfill service failed to start while waiting for genesis data")
|
||||
return
|
||||
}
|
||||
s.clock = clock
|
||||
v, err := s.verifierWaiter.WaitForInitializer(ctx)
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize blob verifier in backfill service")
|
||||
return
|
||||
}
|
||||
|
||||
if s.store.isGenesisSync() {
|
||||
log.Info("Backfill short-circuit; node synced from genesis")
|
||||
s.markComplete()
|
||||
return
|
||||
}
|
||||
|
||||
clock, err := s.cw.WaitForClock(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Backfill service failed to start while waiting for genesis data")
|
||||
return
|
||||
}
|
||||
s.clock = clock
|
||||
status := s.store.status()
|
||||
// Exit early if there aren't going to be any batches to backfill.
|
||||
if primitives.Slot(status.LowSlot) <= s.ms(s.clock.CurrentSlot()) {
|
||||
@@ -288,11 +284,6 @@ func (s *Service) Start() {
|
||||
s.markComplete()
|
||||
return
|
||||
}
|
||||
s.verifier, s.ctxMap, err = s.initVerifier(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to initialize backfill verifier")
|
||||
return
|
||||
}
|
||||
|
||||
if s.initSyncWaiter != nil {
|
||||
log.Info("Backfill service waiting for initial-sync to reach head before starting")
|
||||
@@ -301,7 +292,14 @@ func (s *Service) Start() {
|
||||
return
|
||||
}
|
||||
}
|
||||
s.pool.spawn(ctx, s.nWorkers, clock, s.pa, s.verifier, s.ctxMap, s.newBlobVerifier, s.blobStore)
|
||||
|
||||
wc, err := initWorkerCfg(ctx, s.workerCfg, s.clock, s.verifierWaiter, s.store, s.blobStore, s.dcStore)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize blob verifier in backfill service")
|
||||
return
|
||||
}
|
||||
|
||||
s.pool.spawn(ctx, s.nWorkers, s.pa, wc)
|
||||
s.batchSeq = newBatchSequencer(s.nWorkers, s.ms(s.clock.CurrentSlot()), primitives.Slot(status.LowSlot), primitives.Slot(s.batchSize))
|
||||
if err = s.initBatches(); err != nil {
|
||||
log.WithError(err).Error("Non-recoverable error in backfill service")
|
||||
@@ -370,6 +368,12 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.
|
||||
}
|
||||
}
|
||||
|
||||
func newDataColumnVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier {
|
||||
return func(cols []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return ini.NewDataColumnsVerifier(cols, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) markComplete() {
|
||||
close(s.complete)
|
||||
log.Info("Backfill service marked as complete")
|
||||
|
||||
@@ -57,7 +57,8 @@ func TestServiceInit(t *testing.T) {
|
||||
pool := &mockPool{todoChan: make(chan batch, nWorkers), finishedChan: make(chan batch, nWorkers)}
|
||||
p2pt := p2ptest.NewTestP2P(t)
|
||||
bfs := filesystem.NewEphemeralBlobStorage(t)
|
||||
srv, err := NewService(ctx, su, bfs, cw, p2pt, &mockAssigner{},
|
||||
dcs := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
srv, err := NewService(ctx, su, bfs, dcs, cw, p2pt, &mockAssigner{},
|
||||
WithBatchSize(batchSize), WithWorkerCount(nWorkers), WithEnableBackfill(true), WithVerifierWaiter(&mockInitalizerWaiter{}))
|
||||
require.NoError(t, err)
|
||||
srv.ms = mockMinimumSlotter{min: primitives.Slot(high - batchSize*uint64(nBatches))}.minimumSlot
|
||||
|
||||
@@ -74,7 +74,7 @@ func (s *Store) status() *dbval.BackfillStatus {
|
||||
// fillBack saves the slice of blocks and updates the BackfillStatus LowSlot/Root/ParentRoot tracker to the values
|
||||
// from the first block in the slice. This method assumes that the block slice has been fully validated and
|
||||
// sorted in slot order by the calling function.
|
||||
func (s *Store) fillBack(ctx context.Context, current primitives.Slot, blocks []blocks.ROBlock, store das.AvailabilityStore) (*dbval.BackfillStatus, error) {
|
||||
func (s *Store) fillBack(ctx context.Context, current primitives.Slot, blocks []blocks.ROBlock, store das.AvailabilityChecker) (*dbval.BackfillStatus, error) {
|
||||
status := s.status()
|
||||
if len(blocks) == 0 {
|
||||
return status, nil
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -23,18 +22,34 @@ var errUnknownDomain = errors.New("runtime error looking up signing domain for f
|
||||
type verifiedROBlocks []blocks.ROBlock
|
||||
|
||||
func (v verifiedROBlocks) blobIdents(retentionStart primitives.Slot) ([]blobSummary, error) {
|
||||
// early return if the newest block is outside the retention window
|
||||
if len(v) > 0 && v[len(v)-1].Block().Slot() < retentionStart {
|
||||
if len(v) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
latest := v[len(v)-1].Block().Slot()
|
||||
// early return if the newest block is outside the retention window
|
||||
if latest < retentionStart {
|
||||
return nil, nil
|
||||
}
|
||||
fuluStart := params.BeaconConfig().FuluForkEpoch
|
||||
// If the batch end slot or last result block are pre-fulu, so are the rest.
|
||||
if slots.ToEpoch(latest) < fuluStart {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
bs := make([]blobSummary, 0)
|
||||
for i := range v {
|
||||
if v[i].Block().Slot() < retentionStart {
|
||||
slot := v[i].Block().Slot()
|
||||
if slot < retentionStart {
|
||||
continue
|
||||
}
|
||||
if v[i].Block().Version() < version.Deneb {
|
||||
continue
|
||||
}
|
||||
// Assuming blocks are sorted, as soon as we see 1 fulu block we know the rest will also be fulu.
|
||||
if slots.ToEpoch(slot) >= fuluStart {
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
c, err := v[i].Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unexpected error checking commitments for block root %#x", v[i].Root())
|
||||
@@ -57,37 +72,31 @@ type verifier struct {
|
||||
domain *domainCache
|
||||
}
|
||||
|
||||
// TODO: rewrite this to use ROBlock.
|
||||
func (vr verifier) verify(blks []interfaces.ReadOnlySignedBeaconBlock) (verifiedROBlocks, error) {
|
||||
func (vr verifier) verify(blks []blocks.ROBlock) (verifiedROBlocks, error) {
|
||||
var err error
|
||||
result := make([]blocks.ROBlock, len(blks))
|
||||
sigSet := bls.NewSet()
|
||||
for i := range blks {
|
||||
result[i], err = blocks.NewROBlock(blks[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if i > 0 && result[i-1].Root() != result[i].Block().ParentRoot() {
|
||||
p, b := result[i-1], result[i]
|
||||
if i > 0 && blks[i-1].Root() != blks[i].Block().ParentRoot() {
|
||||
p, b := blks[i-1], blks[i]
|
||||
return nil, errors.Wrapf(errInvalidBatchChain,
|
||||
"slot %d parent_root=%#x, slot %d root=%#x",
|
||||
b.Block().Slot(), b.Block().ParentRoot(),
|
||||
p.Block().Slot(), p.Root())
|
||||
}
|
||||
set, err := vr.blockSignatureBatch(result[i])
|
||||
set, err := vr.blockSignatureBatch(blks[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "block signature batch")
|
||||
}
|
||||
sigSet.Join(set)
|
||||
}
|
||||
v, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "block signature verification error")
|
||||
return nil, errors.Wrap(err, "SignatureBatch Verify")
|
||||
}
|
||||
if !v {
|
||||
return nil, errors.New("batch block signature verification failed")
|
||||
return nil, errors.New("SignatureBatch Verify invalid")
|
||||
}
|
||||
return result, nil
|
||||
return blks, nil
|
||||
}
|
||||
|
||||
func (vr verifier) blockSignatureBatch(b blocks.ROBlock) (*bls.SignatureBatch, error) {
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -72,12 +71,7 @@ func TestVerify(t *testing.T) {
|
||||
}
|
||||
v, err := newBackfillVerifier(vr, pubkeys)
|
||||
require.NoError(t, err)
|
||||
notrob := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
// We have to unwrap the ROBlocks for this code because that's what it expects (for now).
|
||||
for i := range blks {
|
||||
notrob[i] = blks[i].ReadOnlySignedBeaconBlock
|
||||
}
|
||||
vbs, err := v.verify(notrob)
|
||||
vbs, err := v.verify(blks)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(blks), len(vbs))
|
||||
}
|
||||
|
||||
@@ -4,14 +4,63 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type workerCfg struct {
|
||||
c *startup.Clock
|
||||
v *verifier
|
||||
cm sync.ContextByteVersions
|
||||
nbv verification.NewBlobVerifier
|
||||
ndcv verification.NewDataColumnsVerifier
|
||||
bfs *filesystem.BlobStorage
|
||||
cfs *filesystem.DataColumnStorage
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
func initWorkerCfg(ctx context.Context, cfg *workerCfg, c *startup.Clock, vw InitializerWaiter, store *Store, bfs *filesystem.BlobStorage, cfs *filesystem.DataColumnStorage) (*workerCfg, error) {
|
||||
vi, err := vw.WaitForInitializer(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cps, err := store.originState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys, err := cps.PublicKeys()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve public keys for all validators in the origin state")
|
||||
}
|
||||
vr := cps.GenesisValidatorsRoot()
|
||||
cm, err := sync.ContextByteVersionsForValRoot(bytesutil.ToBytes32(vr))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root %#x", vr)
|
||||
}
|
||||
v, err := newBackfillVerifier(vr, keys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "newBackfillVerifier failed")
|
||||
}
|
||||
if cfg == nil {
|
||||
cfg = &workerCfg{}
|
||||
}
|
||||
cfg.c = c
|
||||
cfg.v = v
|
||||
cfg.cm = cm
|
||||
cfg.bfs = bfs
|
||||
cfg.cfs = cfs
|
||||
cfg.nbv = newBlobVerifierFromInitializer(vi)
|
||||
cfg.ndcv = newDataColumnVerifierFromInitializer(vi)
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
type workerId int
|
||||
|
||||
type p2pWorker struct {
|
||||
@@ -19,23 +68,38 @@ type p2pWorker struct {
|
||||
todo chan batch
|
||||
done chan batch
|
||||
p2p p2p.P2P
|
||||
v *verifier
|
||||
c *startup.Clock
|
||||
cm sync.ContextByteVersions
|
||||
nbv verification.NewBlobVerifier
|
||||
bfs *filesystem.BlobStorage
|
||||
cfg *workerCfg
|
||||
}
|
||||
|
||||
func newP2pWorker(id workerId, p p2p.P2P, todo, done chan batch, cfg *workerCfg) *p2pWorker {
|
||||
return &p2pWorker{
|
||||
id: id,
|
||||
todo: todo,
|
||||
done: done,
|
||||
p2p: p,
|
||||
cfg: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *p2pWorker) run(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case b := <-w.todo:
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
|
||||
if b.state == batchBlobSync {
|
||||
w.done <- w.handleBlobs(ctx, b)
|
||||
} else {
|
||||
w.done <- w.handleBlocks(ctx, b)
|
||||
if err := b.waitUntilReady(ctx); err != nil {
|
||||
log.WithField("batch_id", b.id()).WithError(ctx.Err()).Info("worker context canceled while waiting to retry")
|
||||
continue
|
||||
}
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
|
||||
if b.state == batchSyncBlobs {
|
||||
w.done <- w.handleSidecars(ctx, b)
|
||||
continue
|
||||
}
|
||||
if b.state == batchSyncColumns {
|
||||
w.done <- w.handleColumns(ctx, b)
|
||||
continue
|
||||
}
|
||||
|
||||
w.done <- w.handleBlocks(ctx, b)
|
||||
case <-ctx.Done():
|
||||
log.WithField("backfillWorker", w.id).Info("Backfill worker exiting after context canceled")
|
||||
return
|
||||
@@ -44,21 +108,27 @@ func (w *p2pWorker) run(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
|
||||
cs := w.c.CurrentSlot()
|
||||
blobRetentionStart, err := sync.BlobRPCMinValidSlot(cs)
|
||||
current := w.cfg.c.CurrentSlot()
|
||||
blobRetentionStart, err := sync.BlobRPCMinValidSlot(current)
|
||||
if err != nil {
|
||||
return b.withRetryableError(errors.Wrap(err, "configuration issue, could not compute minimum blob retention slot"))
|
||||
}
|
||||
b.blockPid = b.busy
|
||||
start := time.Now()
|
||||
results, err := sync.SendBeaconBlocksByRangeRequest(ctx, w.c, w.p2p, b.blockPid, b.blockRequest(), blockValidationMetrics)
|
||||
dlt := time.Now()
|
||||
backfillBatchTimeDownloadingBlocks.Observe(float64(dlt.Sub(start).Milliseconds()))
|
||||
results, err := sync.SendBeaconBlocksByRangeRequest(ctx, w.cfg.c, w.p2p, b.blockPid, b.blockRequest(), blockValidationMetrics)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(b.logFields()).Debug("Batch requesting failed")
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
vb, err := w.v.verify(results)
|
||||
dlt := time.Now()
|
||||
backfillBatchTimeDownloadingBlocks.Observe(float64(dlt.Sub(start).Milliseconds()))
|
||||
toVerify, err := blocks.NewROBlockSlice(results)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(b.logFields()).Debug("Batch conversion to ROBlock failed")
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
|
||||
vb, err := w.cfg.v.verify(toVerify)
|
||||
backfillBatchTimeVerifying.Observe(float64(time.Since(dlt).Milliseconds()))
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(b.logFields()).Debug("Batch validation failed")
|
||||
@@ -73,19 +143,26 @@ func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
|
||||
}
|
||||
backfillBlocksApproximateBytes.Add(float64(bdl))
|
||||
log.WithFields(b.logFields()).WithField("dlbytes", bdl).Debug("Backfill batch block bytes downloaded")
|
||||
bs, err := newBlobSync(cs, vb, &blobSyncConfig{retentionStart: blobRetentionStart, nbv: w.nbv, store: w.bfs})
|
||||
bscfg := &blobSyncConfig{retentionStart: blobRetentionStart, nbv: w.cfg.nbv, store: w.cfg.bfs}
|
||||
bs, err := newBlobSync(current, vb, bscfg)
|
||||
if err != nil {
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
return b.withResults(vb, bs)
|
||||
w.cfg.custodyInfo.Mut.RLock()
|
||||
defer w.cfg.custodyInfo.Mut.RUnlock()
|
||||
cs, err := newColumnSync(b, vb, current, w.p2p, vb, w.cfg)
|
||||
if err != nil {
|
||||
return b.withFatalError(err)
|
||||
}
|
||||
return b.postBlockSync(vb, bs, cs)
|
||||
}
|
||||
|
||||
func (w *p2pWorker) handleBlobs(ctx context.Context, b batch) batch {
|
||||
func (w *p2pWorker) handleSidecars(ctx context.Context, b batch) batch {
|
||||
b.blobPid = b.busy
|
||||
start := time.Now()
|
||||
// we don't need to use the response for anything other than metrics, because blobResponseValidation
|
||||
// adds each of them to a batch AvailabilityStore once it is checked.
|
||||
blobs, err := sync.SendBlobsByRangeRequest(ctx, w.c, w.p2p, b.blobPid, w.cm, b.blobRequest(), b.blobResponseValidator(), blobValidationMetrics)
|
||||
blobs, err := sync.SendBlobsByRangeRequest(ctx, w.cfg.c, w.p2p, b.blobPid, w.cfg.cm, b.blobRequest(), b.blobResponseValidator(), blobValidationMetrics)
|
||||
if err != nil {
|
||||
b.bs = nil
|
||||
return b.withRetryableError(err)
|
||||
@@ -98,19 +175,19 @@ func (w *p2pWorker) handleBlobs(ctx context.Context, b batch) batch {
|
||||
backfillBlobsApproximateBytes.Add(float64(sz))
|
||||
log.WithFields(b.logFields()).WithField("dlbytes", sz).Debug("Backfill batch blob bytes downloaded")
|
||||
}
|
||||
return b.postBlobSync()
|
||||
return b.postSidecarSync()
|
||||
}
|
||||
|
||||
func newP2pWorker(id workerId, p p2p.P2P, todo, done chan batch, c *startup.Clock, v *verifier, cm sync.ContextByteVersions, nbv verification.NewBlobVerifier, bfs *filesystem.BlobStorage) *p2pWorker {
|
||||
return &p2pWorker{
|
||||
id: id,
|
||||
todo: todo,
|
||||
done: done,
|
||||
p2p: p,
|
||||
v: v,
|
||||
c: c,
|
||||
cm: cm,
|
||||
nbv: nbv,
|
||||
bfs: bfs,
|
||||
func (w *p2pWorker) handleColumns(ctx context.Context, b batch) batch {
|
||||
b.columnPid = b.busy
|
||||
start := time.Now()
|
||||
vr := b.validatingColumnRequest()
|
||||
// Response is dropped because the validation code adds the columns to the columnSync AvailabilityStore under the hood.
|
||||
_, err := sync.SendDataColumnSidecarsByRangeRequest(ctx, w.cfg.c, w.p2p, b.busy, w.cfg.cm, vr.req, vr.validate)
|
||||
if err != nil {
|
||||
return b.withRetryableError(errors.Wrap(err, "failed to request data column sidecars"))
|
||||
}
|
||||
dlt := time.Now()
|
||||
backfillBatchTimeDownloadingColumns.Observe(float64(dlt.Sub(start).Milliseconds()))
|
||||
return b.postSidecarSync()
|
||||
}
|
||||
|
||||
@@ -181,7 +181,7 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
chain, clock := defaultMockChain(t)
|
||||
chain, clock := defaultMockChain(t, 0)
|
||||
if c.chain == nil {
|
||||
c.chain = chain
|
||||
}
|
||||
@@ -279,7 +279,7 @@ func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
func defaultMockChain(t *testing.T, currentSlot uint64) (*mock.ChainService, *startup.Clock) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
df, err := forks.Fork(de)
|
||||
require.NoError(t, err)
|
||||
@@ -290,8 +290,14 @@ func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
require.NoError(t, err)
|
||||
now := time.Now()
|
||||
genOffset := types.Slot(params.BeaconConfig().SecondsPerSlot) * cs
|
||||
genesis := now.Add(-1 * time.Second * time.Duration(int64(genOffset)))
|
||||
clock := startup.NewClock(genesis, [32]byte{})
|
||||
genesisTime := now.Add(-1 * time.Second * time.Duration(int64(genOffset)))
|
||||
|
||||
clock := startup.NewClock(genesisTime, [32]byte{}, startup.WithNower(
|
||||
func() time.Time {
|
||||
return genesisTime.Add(time.Duration(currentSlot*params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
},
|
||||
))
|
||||
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: fe},
|
||||
Fork: df,
|
||||
|
||||
@@ -78,9 +78,10 @@ func (bb *blockRangeBatcher) next(ctx context.Context, stream libp2pcore.Stream)
|
||||
if !more {
|
||||
return blockBatch{}, false
|
||||
}
|
||||
if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
|
||||
return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
|
||||
}
|
||||
// TODO: Uncomment out of devnet.
|
||||
// if err := bb.limiter.validateRequest(stream, bb.size); err != nil {
|
||||
// return blockBatch{err: errors.Wrap(err, "throttled by rate limiter")}, false
|
||||
// }
|
||||
|
||||
// Wait for the ticker before doing anything expensive, unless this is the first batch.
|
||||
if bb.ticker != nil && bb.current != nil {
|
||||
|
||||
211
beacon-chain/sync/data_column_assignment.go
Normal file
211
beacon-chain/sync/data_column_assignment.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type columnRankedPeer struct {
|
||||
peerID peer.ID
|
||||
nodeID enode.ID
|
||||
custodied []uint64
|
||||
cov float64
|
||||
}
|
||||
|
||||
func (p *columnRankedPeer) covered(needed peerdas.ColumnIndices) []uint64 {
|
||||
covered := make([]uint64, 0, len(p.custodied))
|
||||
for col, want := range needed {
|
||||
if want && p.custodied[col] == 1 {
|
||||
covered = append(covered, uint64(col))
|
||||
}
|
||||
}
|
||||
return covered
|
||||
}
|
||||
|
||||
func (p *columnRankedPeer) coverageScore(rarity []float64) float64 {
|
||||
if p.cov == 0 {
|
||||
p.cov = coverageScore(p.custodied, rarity)
|
||||
}
|
||||
return p.cov
|
||||
}
|
||||
|
||||
type ColumnPeerRank struct {
|
||||
peers map[peer.ID]*columnRankedPeer
|
||||
freq []colFreq
|
||||
rarity []float64
|
||||
rg rand.Rand
|
||||
covScoreRank []*columnRankedPeer
|
||||
}
|
||||
|
||||
func coverageScore(covered []uint64, rarity []float64) float64 {
|
||||
score := 0.0
|
||||
for _, col := range covered {
|
||||
if col >= uint64(len(rarity)) {
|
||||
continue
|
||||
}
|
||||
score += rarity[col]
|
||||
}
|
||||
return score
|
||||
}
|
||||
|
||||
func (m *ColumnPeerRank) HighestForIndices(needed peerdas.ColumnIndices, busy map[peer.ID]bool) (peer.ID, []uint64, error) {
|
||||
// - find the custodied column with the lowest frequency
|
||||
// - collect all the peers that have custody of that column
|
||||
// - score the peers by how many other of the needed columns they ave
|
||||
// -- or, score them by the rank of the columns they have??
|
||||
for _, cf := range m.freq {
|
||||
if !needed[cf.col] {
|
||||
continue
|
||||
}
|
||||
if cf.freq() == 0 {
|
||||
continue
|
||||
}
|
||||
var best *columnRankedPeer
|
||||
bestScore, bestCoverage := 0.0, make([]uint64, 1)
|
||||
for _, p := range cf.custodians {
|
||||
if busy[p.peerID] {
|
||||
continue
|
||||
}
|
||||
coverage := p.covered(needed)
|
||||
if len(coverage) == 0 {
|
||||
continue
|
||||
}
|
||||
pscore := coverageScore(coverage, m.rarity)
|
||||
if pscore > bestScore {
|
||||
best, bestScore, bestCoverage = p, pscore, coverage
|
||||
}
|
||||
}
|
||||
if best != nil {
|
||||
return best.peerID, bestCoverage, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil, errors.New("no peers able to cover needed columns")
|
||||
}
|
||||
|
||||
func NeededCoveredIntersection(needed peerdas.ColumnIndices, covered []uint64) []uint64 {
|
||||
intersection := make([]uint64, 0, len(covered))
|
||||
for _, col := range covered {
|
||||
if needed[col] {
|
||||
intersection = append(intersection, col)
|
||||
}
|
||||
}
|
||||
return intersection
|
||||
}
|
||||
|
||||
// Lowest returns the lowest scoring peer in the set. This can be used to pick a peer
|
||||
// for block requests, preserving the peers that have the highest coverage scores
|
||||
// for column requests.
|
||||
func (m *ColumnPeerRank) Lowest(busy map[peer.ID]bool) (peer.ID, error) {
|
||||
for i := len(m.covScoreRank) - 1; i >= 0; i-- {
|
||||
p := m.covScoreRank[i]
|
||||
if !busy[p.peerID] {
|
||||
return p.peerID, nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("no peers available")
|
||||
}
|
||||
|
||||
type colFreq struct {
|
||||
col uint64
|
||||
custodians []*columnRankedPeer
|
||||
}
|
||||
|
||||
func (f colFreq) rarity() float64 {
|
||||
if f.freq() == 0 {
|
||||
return 1
|
||||
}
|
||||
return 1 / float64(f.freq())
|
||||
}
|
||||
|
||||
func (f colFreq) freq() int {
|
||||
return len(f.custodians)
|
||||
}
|
||||
|
||||
type colFreqs []colFreq
|
||||
|
||||
func (s colFreqs) rarity() []float64 {
|
||||
ra := make([]float64, len(s))
|
||||
for _, f := range s {
|
||||
ra[f.col] = f.rarity()
|
||||
}
|
||||
return ra
|
||||
}
|
||||
|
||||
// ColumnMatrix computes a grid of column custody x peer.
|
||||
func ComputeColumnPeerRank(peers []peer.ID, p2pSvc p2p.P2P) (*ColumnPeerRank, error) {
|
||||
nc := params.BeaconConfig().NumberOfColumns
|
||||
grid := make(map[peer.ID]*columnRankedPeer, len(peers))
|
||||
freqByColumn := make([]colFreq, nc)
|
||||
for i := range freqByColumn {
|
||||
freqByColumn[i].col = uint64(i)
|
||||
}
|
||||
for _, peer := range peers {
|
||||
nodeID, err := p2p.ConvertPeerIDToNodeID(peer)
|
||||
if err != nil {
|
||||
log.WithField("peerID", peer).WithError(err).Debug("Failed to convert peer ID to node ID.")
|
||||
continue
|
||||
}
|
||||
dasInfo, _, err := peerdas.Info(nodeID, p2pSvc.CustodyGroupCountFromPeer(peer))
|
||||
if err != nil {
|
||||
log.WithField("peerID", peer).WithField("nodeID", nodeID).WithError(err).Debug("Failed to derive custody groups from peer.")
|
||||
return nil, errors.Wrap(err, "custody groups")
|
||||
}
|
||||
p := &columnRankedPeer{
|
||||
peerID: peer,
|
||||
nodeID: nodeID,
|
||||
custodied: make([]uint64, nc),
|
||||
}
|
||||
for c, v := range dasInfo.CustodyColumns {
|
||||
if c > nc-1 {
|
||||
return nil, errors.Errorf("column %d is out of bounds", c)
|
||||
}
|
||||
if v {
|
||||
p.custodied[c] = 1
|
||||
freqByColumn[c].custodians = append(freqByColumn[c].custodians, p)
|
||||
}
|
||||
}
|
||||
grid[peer] = p
|
||||
}
|
||||
|
||||
var colByFreq colFreqs
|
||||
colByFreq = slices.SortedFunc(slices.Values(freqByColumn), func(a, b colFreq) int {
|
||||
if a.freq() == b.freq() {
|
||||
return 0
|
||||
}
|
||||
if a.freq() < b.freq() {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
})
|
||||
rarity := colByFreq.rarity()
|
||||
|
||||
covScoreRank := make([]*columnRankedPeer, 0, len(grid))
|
||||
for _, p := range grid {
|
||||
covScoreRank = append(covScoreRank, p)
|
||||
}
|
||||
slices.SortFunc(covScoreRank, func(a, b *columnRankedPeer) int {
|
||||
if a.coverageScore(rarity) == b.coverageScore(rarity) {
|
||||
return 0
|
||||
}
|
||||
if a.coverageScore(rarity) < b.coverageScore(rarity) {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
})
|
||||
|
||||
return &ColumnPeerRank{
|
||||
peers: grid,
|
||||
freq: colByFreq,
|
||||
rg: *rand.NewGenerator(),
|
||||
rarity: rarity,
|
||||
covScoreRank: covScoreRank,
|
||||
}, nil
|
||||
}
|
||||
940
beacon-chain/sync/data_columns.go
Normal file
940
beacon-chain/sync/data_columns.go
Normal file
@@ -0,0 +1,940 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RequestDataColumnSidecarsByRoot is an opinionated, high level function which, for each data column in `dataColumnsToFetch`:
|
||||
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
|
||||
// - Request the data column sidecars from the selected peers.
|
||||
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
|
||||
//
|
||||
// This function:
|
||||
// - returns on success when all the initially missing sidecars in `dataColumnsToFetch` are retrieved, or
|
||||
// - returns an error if all peers in `peers` are exhausted and at least one data column sidecar is still missing.
|
||||
//
|
||||
// TODO: In case at least one column is still missing after peer exhaustion,
|
||||
//
|
||||
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
|
||||
func RequestDataColumnSidecarsByRoot(
|
||||
ctx context.Context,
|
||||
dataColumnsToFetch []uint64,
|
||||
block blocks.ROBlock,
|
||||
peers []core.PeerID,
|
||||
clock *startup.Clock,
|
||||
p2p p2p.P2P,
|
||||
ctxMap ContextByteVersions,
|
||||
newColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(dataColumnsToFetch) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Assemble the peers who can provide the needed data columns.
|
||||
dataColumnsByAdmissiblePeer, _, _, err := AdmissiblePeersForDataColumns(peers, dataColumnsToFetch, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get admissible peers for data columns")
|
||||
}
|
||||
|
||||
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnsToFetch))
|
||||
remainingMissingColumns := make(map[uint64]bool, len(dataColumnsToFetch))
|
||||
for _, column := range dataColumnsToFetch {
|
||||
remainingMissingColumns[column] = true
|
||||
}
|
||||
|
||||
blockRoot := block.Root()
|
||||
|
||||
for len(dataColumnsByAdmissiblePeer) > 0 {
|
||||
peersToFetchFrom, err := SelectPeersToFetchDataColumnsFrom(sliceFromMap(remainingMissingColumns, true /*sorted*/), dataColumnsByAdmissiblePeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers to fetch data columns from")
|
||||
}
|
||||
|
||||
// Request the data columns from each peer.
|
||||
successfulColumns := make(map[uint64]bool, len(remainingMissingColumns))
|
||||
for peer, peerRequestedColumns := range peersToFetchFrom {
|
||||
log := log.WithFields(logrus.Fields{"peer": peer.String(), "blockRoot": fmt.Sprintf("%#x", blockRoot)})
|
||||
|
||||
// Build the requests for the data columns.
|
||||
byRootRequest := ð.DataColumnsByRootIdentifier{BlockRoot: blockRoot[:], Columns: peerRequestedColumns}
|
||||
|
||||
// Send the requests to the peer.
|
||||
peerSidecars, err := SendDataColumnSidecarsByRootRequest(ctx, clock, p2p, peer, ctxMap, &types.DataColumnsByRootIdentifiers{byRootRequest})
|
||||
if err != nil {
|
||||
// Remove this peer since it failed to respond correctly.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": peer.String(),
|
||||
"blockRoot": fmt.Sprintf("%#x", block.Root()),
|
||||
}).WithError(err).Debug("Failed to request data columns from peer")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if returned data columns align with the block.
|
||||
if err := peerdas.DataColumnsAlignWithBlock(block, peerSidecars); err != nil {
|
||||
// Remove this peer since it failed to respond correctly.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Align with block failed")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the received sidecars.
|
||||
verifier := newColumnsVerifier(peerSidecars, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Valid verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Sidecar inclusion proof verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
// Remove this peer if the verification failed.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithError(err).Debug("Sidecar KZG proof verification failed")
|
||||
continue
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
verifiedPeerSidecars, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
return nil, errors.Wrap(err, "verified data columns")
|
||||
}
|
||||
|
||||
// Mark columns as successful
|
||||
for _, sidecar := range verifiedPeerSidecars {
|
||||
successfulColumns[sidecar.Index] = true
|
||||
}
|
||||
|
||||
// Check if all requested columns were successfully returned.
|
||||
peerMissingColumns := make(map[uint64]bool)
|
||||
for _, index := range peerRequestedColumns {
|
||||
if !successfulColumns[index] {
|
||||
peerMissingColumns[index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(peerMissingColumns) > 0 {
|
||||
// Remove this peer if some requested columns were not correctly returned.
|
||||
delete(dataColumnsByAdmissiblePeer, peer)
|
||||
log.WithField("missingColumns", sliceFromMap(peerMissingColumns, true /*sorted*/)).Debug("Peer did not provide all requested data columns")
|
||||
}
|
||||
|
||||
verifiedSidecars = append(verifiedSidecars, verifiedPeerSidecars...)
|
||||
}
|
||||
|
||||
// Update remaining columns for the next retry.
|
||||
for col := range successfulColumns {
|
||||
delete(remainingMissingColumns, col)
|
||||
}
|
||||
|
||||
if len(remainingMissingColumns) > 0 {
|
||||
// Some columns are still missing, retry with the remaining peers.
|
||||
continue
|
||||
}
|
||||
|
||||
return verifiedSidecars, nil
|
||||
}
|
||||
|
||||
// If we still have remaining columns after all retries, return error
|
||||
return nil, errors.Errorf("failed to retrieve all requested data columns after retries for block root=%#x, missing columns=%v", blockRoot, sliceFromMap(remainingMissingColumns, true /*sorted*/))
|
||||
}
|
||||
|
||||
// RequestMissingDataColumnsByRange is an opinionated, high level function which, for each block in `blks`:
|
||||
// - Computes all data column sidecars we should store and which are missing (according to our node ID and `groupCount`),
|
||||
// - Builds an optimized set of data column sidecars by range requests in order to never request a data column that is already stored in the DB,
|
||||
// and in order to minimize the number of total requests, while not exceeding `batchSize` sidecars per requests.
|
||||
// - Greedily selects, among `peers`, the peers that can provide the requested data columns, to minimize the number of requests.
|
||||
// - Request the data column sidecars from the selected peers.
|
||||
// - In case of peers unable to actually provide all the requested data columns, retry with other peers.
|
||||
//
|
||||
// This function:
|
||||
// - returns on success when all the initially missing sidecars for `blks` are retrieved, or
|
||||
// - returns an error if no progress at all is made after 5 consecutives trials.
|
||||
// (If at least one additional data column sidecar is retrieved between two trials, the counter is reset.)
|
||||
//
|
||||
// In case of success, initially missing data columns grouped by block root are returned.
|
||||
// This function expects blocks to be sorted by slot.
|
||||
//
|
||||
// TODO: In case at least one column is still missing after all allowed retries,
|
||||
//
|
||||
// but `peers` custody more than 64 columns, then try to fetch enough columns to reconstruct needed ones.
|
||||
func RequestMissingDataColumnsByRange(
|
||||
ctx context.Context,
|
||||
clock *startup.Clock,
|
||||
ctxMap ContextByteVersions,
|
||||
p2p p2p.P2P,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
groupCount uint64,
|
||||
dataColumnsStorage filesystem.DataColumnStorageSummarizer,
|
||||
blks []blocks.ROBlock,
|
||||
batchSize int,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.RODataColumn, error) {
|
||||
const maxAllowedStall = 5 // Number of trials before giving up.
|
||||
|
||||
if len(blks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the current slot.
|
||||
currentSlot := clock.CurrentSlot()
|
||||
|
||||
// Compute the minimum slot for which we should serve data columns.
|
||||
minimumSlot, err := DataColumnsRPCMinValidSlot(currentSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns RPC min valid slot")
|
||||
}
|
||||
|
||||
// Get blocks by root and compute all missing columns by root.
|
||||
blockByRoot := make(map[[fieldparams.RootLength]byte]blocks.ROBlock, len(blks))
|
||||
missingColumnsByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(blks))
|
||||
for _, blk := range blks {
|
||||
// Extract the block root and the block slot
|
||||
blockRoot, blockSlot := blk.Root(), blk.Block().Slot()
|
||||
|
||||
// Populate the block by root.
|
||||
blockByRoot[blockRoot] = blk
|
||||
|
||||
// Skip blocks that are not in the retention period.
|
||||
if blockSlot < minimumSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
missingColumns, err := MissingDataColumns(blk, p2p.NodeID(), groupCount, dataColumnsStorage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
for _, column := range missingColumns {
|
||||
if _, ok := missingColumnsByRoot[blockRoot]; !ok {
|
||||
missingColumnsByRoot[blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
missingColumnsByRoot[blockRoot][column] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if there are no missing data columns.
|
||||
if len(missingColumnsByRoot) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the number of missing data columns.
|
||||
previousMissingDataColumnsCount := itemsCount(missingColumnsByRoot)
|
||||
|
||||
// Count the number of retries for the same amount of missing data columns.
|
||||
stallCount := 0
|
||||
|
||||
// Add log fields.
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"initialMissingColumnsCount": previousMissingDataColumnsCount,
|
||||
"blockCount": len(blks),
|
||||
"firstSlot": blks[0].Block().Slot(),
|
||||
"lastSlot": blks[len(blks)-1].Block().Slot(),
|
||||
})
|
||||
|
||||
// Log the start of the process.
|
||||
start := time.Now()
|
||||
log.Debug("Requesting data column sidecars - start")
|
||||
|
||||
alignedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn, len(blks))
|
||||
for len(missingColumnsByRoot) > 0 {
|
||||
// Build requests.
|
||||
requests, err := buildDataColumnByRangeRequests(blks, missingColumnsByRoot, batchSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "build data column by range requests")
|
||||
}
|
||||
|
||||
// Requests data column sidecars from peers.
|
||||
retrievedDataColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.RODataColumn)
|
||||
for _, request := range requests {
|
||||
roDataColumns, err := fetchDataColumnsFromPeers(ctx, clock, p2p, rateLimiter, ctxMap, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fetch data columns from peers")
|
||||
}
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
root := roDataColumn.BlockRoot()
|
||||
if _, ok := blockByRoot[root]; !ok {
|
||||
// It may happen if the peer which sent the data columns is on a different fork.
|
||||
continue
|
||||
}
|
||||
|
||||
retrievedDataColumnsByRoot[root] = append(retrievedDataColumnsByRoot[root], roDataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
for root, dataColumns := range retrievedDataColumnsByRoot {
|
||||
// Retrieve the block from the root.
|
||||
block, ok := blockByRoot[root]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found - this should never happen")
|
||||
}
|
||||
|
||||
// Check if the data columns align with blocks.
|
||||
if err := peerdas.DataColumnsAlignWithBlock(block, dataColumns); err != nil {
|
||||
log.WithField("root", root).WithError(err).Debug("Data columns do not align with block")
|
||||
continue
|
||||
}
|
||||
|
||||
alignedDataColumnsByRoot[root] = append(alignedDataColumnsByRoot[root], dataColumns...)
|
||||
|
||||
// Remove aligned data columns from the missing columns.
|
||||
for _, dataColumn := range dataColumns {
|
||||
delete(missingColumnsByRoot[root], dataColumn.Index)
|
||||
if len(missingColumnsByRoot[root]) == 0 {
|
||||
delete(missingColumnsByRoot, root)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
missingDataColumnsCount := itemsCount(missingColumnsByRoot)
|
||||
if missingDataColumnsCount == previousMissingDataColumnsCount {
|
||||
stallCount++
|
||||
} else {
|
||||
stallCount = 0
|
||||
}
|
||||
|
||||
previousMissingDataColumnsCount = missingDataColumnsCount
|
||||
|
||||
if missingDataColumnsCount > 0 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"remainingMissingColumnsCount": missingDataColumnsCount,
|
||||
"stallCount": stallCount,
|
||||
"maxAllowedStall": maxAllowedStall,
|
||||
})
|
||||
|
||||
if stallCount >= maxAllowedStall {
|
||||
// It is very likely `bwbs` contains orphaned blocks, for which no peer has the data columns.
|
||||
// We give up and let the state machine handle the situation.
|
||||
const message = "Requesting data column sidecars - no progress, giving up"
|
||||
log.Warning(message)
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"remainingMissingColumnsCount": missingDataColumnsCount,
|
||||
"stallCount": stallCount,
|
||||
}).Debug("Requesting data column sidecars - continue")
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("duration", time.Since(start)).Debug("Requesting data column sidecars - success")
|
||||
return alignedDataColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// MissingDataColumns looks at the data columns we should store for a given block regarding `custodyGroupCount`,
|
||||
// and returns the indices of the missing ones.
|
||||
func MissingDataColumns(block blocks.ROBlock, nodeID enode.ID, custodyGroupCount uint64, dataColumnStorage filesystem.DataColumnStorageSummarizer) ([]uint64, error) {
|
||||
// Blocks before Fulu have no data columns.
|
||||
if block.Version() < version.Fulu {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the blob commitments from the block.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Nothing to build if there are no commitments.
|
||||
if len(commitments) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute the expected columns.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
expectedColumns := peerInfo.CustodyColumns
|
||||
|
||||
// Get the stored columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
summary := dataColumnStorage.Summary(block.Root())
|
||||
|
||||
storedColumns := make(map[uint64]bool, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if summary.HasIndex(i) {
|
||||
storedColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the missing columns.
|
||||
missingColumns := make([]uint64, 0, len(expectedColumns))
|
||||
for column := range expectedColumns {
|
||||
if !storedColumns[column] {
|
||||
missingColumns = append(missingColumns, column)
|
||||
}
|
||||
}
|
||||
|
||||
return missingColumns, nil
|
||||
}
|
||||
|
||||
// SelectPeersToFetchDataColumnsFrom implements greedy algorithm in order to select peers to fetch data columns from.
|
||||
// https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm
|
||||
func SelectPeersToFetchDataColumnsFrom(neededDataColumns []uint64, dataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID][]uint64, error) {
|
||||
// Copy the provided needed data columns into a set that we will remove elements from.
|
||||
remainingDataColumns := make(map[uint64]bool, len(neededDataColumns))
|
||||
for _, dataColumn := range neededDataColumns {
|
||||
remainingDataColumns[dataColumn] = true
|
||||
}
|
||||
|
||||
dataColumnsFromSelectedPeers := make(map[peer.ID][]uint64)
|
||||
|
||||
// Filter `dataColumnsByPeer` to only contain needed data columns.
|
||||
neededDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(dataColumnsByPeer))
|
||||
for pid, dataColumns := range dataColumnsByPeer {
|
||||
for dataColumn := range dataColumns {
|
||||
if remainingDataColumns[dataColumn] {
|
||||
if _, ok := neededDataColumnsByPeer[pid]; !ok {
|
||||
neededDataColumnsByPeer[pid] = make(map[uint64]bool, len(neededDataColumns))
|
||||
}
|
||||
|
||||
neededDataColumnsByPeer[pid][dataColumn] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
for len(remainingDataColumns) > 0 {
|
||||
// Check if at least one peer remains. If not, it means that we don't have enough peers to fetch all needed data columns.
|
||||
if len(neededDataColumnsByPeer) == 0 {
|
||||
missingDataColumnsSortedSlice := sliceFromMap(remainingDataColumns, true /*sorted*/)
|
||||
return dataColumnsFromSelectedPeers, errors.Errorf("no peer to fetch the following data columns: %v", missingDataColumnsSortedSlice)
|
||||
}
|
||||
|
||||
// Select the peer that custody the most needed data columns (greedy selection).
|
||||
var bestPeer peer.ID
|
||||
for peer, dataColumns := range neededDataColumnsByPeer {
|
||||
if len(dataColumns) > len(neededDataColumnsByPeer[bestPeer]) {
|
||||
bestPeer = peer
|
||||
}
|
||||
}
|
||||
|
||||
dataColumnsSortedSlice := sliceFromMap(neededDataColumnsByPeer[bestPeer], true /*sorted*/)
|
||||
if uint64(len(dataColumnsSortedSlice)) > maxRequestDataColumnSidecars {
|
||||
dataColumnsSortedSlice = dataColumnsSortedSlice[:maxRequestDataColumnSidecars]
|
||||
}
|
||||
dataColumnsFromSelectedPeers[bestPeer] = dataColumnsSortedSlice
|
||||
|
||||
// Remove the selected peer from the list of peers.
|
||||
delete(neededDataColumnsByPeer, bestPeer)
|
||||
|
||||
// Remove the selected peer's data columns from the list of remaining data columns.
|
||||
for _, dataColumn := range dataColumnsSortedSlice {
|
||||
delete(remainingDataColumns, dataColumn)
|
||||
}
|
||||
|
||||
// Remove the selected peer's data columns from the list of needed data columns by peer.
|
||||
for _, dataColumn := range dataColumnsSortedSlice {
|
||||
for peer, dataColumns := range neededDataColumnsByPeer {
|
||||
delete(dataColumns, dataColumn)
|
||||
|
||||
if len(dataColumns) == 0 {
|
||||
delete(neededDataColumnsByPeer, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dataColumnsFromSelectedPeers, nil
|
||||
}
|
||||
|
||||
// AdmissiblePeersForCustodyGroup returns a map of peers that custody at least one custody group listed in `neededCustodyGroups`.
|
||||
//
|
||||
// It returns:
|
||||
// - A map, where the key of the map is the peer, the value is the custody groups of the peer.
|
||||
// - A map, where the key of the map is the custody group, the value is a list of peers that custody the group.
|
||||
// - A slice of descriptions for non admissible peers.
|
||||
// - An error if any.
|
||||
//
|
||||
// NOTE: distributeSamplesToPeer from the DataColumnSampler implements similar logic,
|
||||
// but with only one column queried in each request.
|
||||
func AdmissiblePeersForDataColumns(
|
||||
peers []peer.ID,
|
||||
neededDataColumns []uint64,
|
||||
p2p p2p.P2P,
|
||||
) (map[peer.ID]map[uint64]bool, map[uint64][]peer.ID, []string, error) {
|
||||
peerCount := len(peers)
|
||||
neededDataColumnsCount := uint64(len(neededDataColumns))
|
||||
|
||||
// Create description slice for non admissible peers.
|
||||
descriptions := make([]string, 0, peerCount)
|
||||
|
||||
// Compute custody columns for each peer.
|
||||
dataColumnsByPeer, err := custodyColumnsFromPeers(p2p, peers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "custody columns from peers")
|
||||
}
|
||||
|
||||
// Filter peers which custody at least one needed data column.
|
||||
dataColumnsByAdmissiblePeer, localDescriptions := filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns, dataColumnsByPeer)
|
||||
descriptions = append(descriptions, localDescriptions...)
|
||||
|
||||
// Compute a map from needed data columns to their peers.
|
||||
admissiblePeersByDataColumn := make(map[uint64][]peer.ID, neededDataColumnsCount)
|
||||
for peerId, peerDataColumns := range dataColumnsByAdmissiblePeer {
|
||||
for _, dataColumn := range neededDataColumns {
|
||||
if peerDataColumns[dataColumn] {
|
||||
admissiblePeersByDataColumn[dataColumn] = append(admissiblePeersByDataColumn[dataColumn], peerId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, descriptions, nil
|
||||
}
|
||||
|
||||
// custodyColumnsFromPeers computes all the custody columns indexed by peer.
|
||||
func custodyColumnsFromPeers(p2pIface p2p.P2P, peers []peer.ID) (map[peer.ID]map[uint64]bool, error) {
|
||||
peerCount := len(peers)
|
||||
|
||||
custodyColumnsByPeer := make(map[peer.ID]map[uint64]bool, peerCount)
|
||||
for _, peer := range peers {
|
||||
// Get the node ID from the peer ID.
|
||||
nodeID, err := p2p.ConvertPeerIDToNodeID(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert peer ID to node ID")
|
||||
}
|
||||
|
||||
// Get the custody group count of the peer.
|
||||
custodyGroupCount := p2pIface.CustodyGroupCountFromPeer(peer)
|
||||
|
||||
// Get peerdas info of the peer.
|
||||
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peerdas info")
|
||||
}
|
||||
|
||||
custodyColumnsByPeer[peer] = dasInfo.CustodyColumns
|
||||
}
|
||||
|
||||
return custodyColumnsByPeer, nil
|
||||
}
|
||||
|
||||
// `filterPeerWhichCustodyAtLeastOneDataColumn` filters peers which custody at least one data column
|
||||
// specified in `neededDataColumns`. It returns also a list of descriptions for non admissible peers.
|
||||
func filterPeerWhichCustodyAtLeastOneDataColumn(neededDataColumns []uint64, inputDataColumnsByPeer map[peer.ID]map[uint64]bool) (map[peer.ID]map[uint64]bool, []string) {
|
||||
// Create pretty needed data columns for logs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
outputDataColumnsByPeer := make(map[peer.ID]map[uint64]bool, len(inputDataColumnsByPeer))
|
||||
descriptions := make([]string, 0)
|
||||
|
||||
outerLoop:
|
||||
for peer, peerCustodyDataColumns := range inputDataColumnsByPeer {
|
||||
for _, neededDataColumn := range neededDataColumns {
|
||||
if peerCustodyDataColumns[neededDataColumn] {
|
||||
outputDataColumnsByPeer[peer] = peerCustodyDataColumns
|
||||
|
||||
continue outerLoop
|
||||
}
|
||||
}
|
||||
|
||||
peerCustodyColumnsCount := uint64(len(peerCustodyDataColumns))
|
||||
var peerCustodyColumnsLog interface{} = "all"
|
||||
|
||||
if peerCustodyColumnsCount < numberOfColumns {
|
||||
peerCustodyColumnsLog = sliceFromMap(peerCustodyDataColumns, true /*sorted*/)
|
||||
}
|
||||
|
||||
description := fmt.Sprintf("peer %s: does not custody any needed column, custody columns: %v", peer, peerCustodyColumnsLog)
|
||||
descriptions = append(descriptions, description)
|
||||
}
|
||||
|
||||
return outputDataColumnsByPeer, descriptions
|
||||
}
|
||||
|
||||
// buildDataColumnByRangeRequests builds an optimized slices of data column by range requests:
|
||||
// 1. It will never request a data column that is already stored in the DB if there is no "hole" in `roBlocks` other than missed slots.
|
||||
// 2. It will minimize the number of requests.
|
||||
// It expects blocks to be sorted by slot.
|
||||
func buildDataColumnByRangeRequests(roBlocks []blocks.ROBlock, missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool, batchSize int) ([]*eth.DataColumnSidecarsByRangeRequest, error) {
|
||||
batchSizeSlot := primitives.Slot(batchSize)
|
||||
|
||||
// Return early if there are no blocks to process.
|
||||
if len(roBlocks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// It's safe to get the first item of the slice since we've already checked that it's not empty.
|
||||
firstROBlock, lastROBlock := roBlocks[0], roBlocks[len(roBlocks)-1]
|
||||
firstBlockSlot, lastBlockSlot := firstROBlock.Block().Slot(), lastROBlock.Block().Slot()
|
||||
firstBlockRoot := firstROBlock.Root()
|
||||
|
||||
previousMissingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[firstBlockRoot]))
|
||||
|
||||
if missing, ok := missingColumnsByRoot[firstBlockRoot]; ok {
|
||||
for key, value := range missing {
|
||||
previousMissingDataColumns[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
previousBlockSlot, previousStartBlockSlot := firstBlockSlot, firstBlockSlot
|
||||
|
||||
result := make([]*eth.DataColumnSidecarsByRangeRequest, 0, 1)
|
||||
for index := 1; index < len(roBlocks); index++ {
|
||||
roBlock := roBlocks[index]
|
||||
|
||||
// Extract the block from the RO-block.
|
||||
block := roBlock.Block()
|
||||
|
||||
// Extract the slot from the block.
|
||||
blockRoot, blockSlot := roBlock.Root(), block.Slot()
|
||||
|
||||
if blockSlot <= previousBlockSlot {
|
||||
return nil, errors.Errorf("blocks are not strictly sorted by slot. Previous block slot: %d, current block slot: %d", previousBlockSlot, blockSlot)
|
||||
}
|
||||
|
||||
// Extract KZG commitments count from the current block body
|
||||
blockKzgCommitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the count of KZG commitments.
|
||||
blockKzgCommitmentCount := len(blockKzgCommitments)
|
||||
|
||||
// Skip blocks without commitments.
|
||||
if blockKzgCommitmentCount == 0 {
|
||||
previousBlockSlot = blockSlot
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the missing data columns for the current block.
|
||||
missingDataColumns := make(map[uint64]bool, len(missingColumnsByRoot[blockRoot]))
|
||||
for key, value := range missingColumnsByRoot[blockRoot] {
|
||||
missingDataColumns[key] = value
|
||||
}
|
||||
|
||||
// Compute if the missing data columns differ.
|
||||
missingDataColumnsDiffer := uint64MapDiffer(previousMissingDataColumns, missingDataColumns)
|
||||
|
||||
// Compute if the batch size is reached.
|
||||
batchSizeReached := blockSlot-previousStartBlockSlot >= batchSizeSlot
|
||||
|
||||
if missingDataColumnsDiffer || batchSizeReached {
|
||||
// Append the slice to the result.
|
||||
request := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: previousStartBlockSlot,
|
||||
Count: uint64(blockSlot - previousStartBlockSlot),
|
||||
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
|
||||
}
|
||||
|
||||
result = append(result, request)
|
||||
|
||||
previousStartBlockSlot, previousMissingDataColumns = blockSlot, missingDataColumns
|
||||
}
|
||||
|
||||
previousBlockSlot = blockSlot
|
||||
}
|
||||
|
||||
lastRequest := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: previousStartBlockSlot,
|
||||
Count: uint64(lastBlockSlot - previousStartBlockSlot + 1),
|
||||
Columns: sliceFromMap(previousMissingDataColumns, true /*sorted*/),
|
||||
}
|
||||
|
||||
result = append(result, lastRequest)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// fetchDataColumnsFromPeers requests data columns by range to relevant peers
|
||||
func fetchDataColumnsFromPeers(
|
||||
ctx context.Context,
|
||||
clock *startup.Clock,
|
||||
p2p p2p.P2P,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
ctxMap ContextByteVersions,
|
||||
targetRequest *eth.DataColumnSidecarsByRangeRequest,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Filter out requests with no data columns.
|
||||
if len(targetRequest.Columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get all admissible peers with the data columns they custody.
|
||||
dataColumnsByAdmissiblePeer, err := waitForPeersForDataColumns(p2p, rateLimiter, targetRequest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "wait for peers for data columns")
|
||||
}
|
||||
|
||||
// Select the peers that will be requested.
|
||||
dataColumnsToFetchByPeer, err := SelectPeersToFetchDataColumnsFrom(targetRequest.Columns, dataColumnsByAdmissiblePeer)
|
||||
if err != nil {
|
||||
// This should never happen.
|
||||
return nil, errors.Wrap(err, "select peers to fetch data columns from")
|
||||
}
|
||||
|
||||
var roDataColumns []blocks.RODataColumn
|
||||
for peer, columnsToFetch := range dataColumnsToFetchByPeer {
|
||||
// Build the request.
|
||||
request := ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: targetRequest.StartSlot,
|
||||
Count: targetRequest.Count,
|
||||
Columns: columnsToFetch,
|
||||
}
|
||||
|
||||
peerRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(ctx, clock, p2p, peer, ctxMap, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "send data column sidecars by range request")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, peerRoDataColumns...)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// waitForPeersForDataColumns returns a map, where the key of the map is the peer, the value is the custody columns of the peer.
|
||||
// It uses only peers
|
||||
// - synced up to `lastSlot`, and
|
||||
// - have bandwidth to serve `blockCount` blocks.
|
||||
// It waits until at least one peer per data column is available.
|
||||
func waitForPeersForDataColumns(p2p p2p.P2P, rateLimiter *leakybucket.Collector, request *eth.DataColumnSidecarsByRangeRequest) (map[peer.ID]map[uint64]bool, error) {
|
||||
const delay = 5 * time.Second
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Build nice log fields.
|
||||
lastSlot := request.StartSlot.Add(request.Count).Sub(1)
|
||||
|
||||
var neededDataColumnsLog interface{} = "all"
|
||||
neededDataColumnCount := uint64(len(request.Columns))
|
||||
if neededDataColumnCount < numberOfColumns {
|
||||
neededDataColumnsLog = request.Columns
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"start": request.StartSlot,
|
||||
"targetSlot": lastSlot,
|
||||
"neededDataColumns": neededDataColumnsLog,
|
||||
})
|
||||
|
||||
// Keep only peers with head epoch greater than or equal to the epoch corresponding to the target slot, and
|
||||
// keep only peers with enough bandwidth.
|
||||
filteredPeers, descriptions, err := filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter eers by target slot and bandwidth")
|
||||
}
|
||||
|
||||
// Get the peers that are admissible for the data columns.
|
||||
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err := AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "admissible peers for data columns")
|
||||
}
|
||||
|
||||
descriptions = append(descriptions, moreDescriptions...)
|
||||
|
||||
// Compute data columns without any peer.
|
||||
dataColumnsWithoutPeers := computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
|
||||
|
||||
// Wait if no suitable peers are available.
|
||||
for len(dataColumnsWithoutPeers) > 0 {
|
||||
// Build a nice log fields.
|
||||
var dataColumnsWithoutPeersLog interface{} = "all"
|
||||
dataColumnsWithoutPeersCount := uint64(len(dataColumnsWithoutPeers))
|
||||
if dataColumnsWithoutPeersCount < numberOfColumns {
|
||||
dataColumnsWithoutPeersLog = sliceFromMap(dataColumnsWithoutPeers, true /*sorted*/)
|
||||
}
|
||||
|
||||
log.WithField("columnsWithoutPeer", dataColumnsWithoutPeersLog).Warning("Fetch data columns from peers - no available peers, retrying later")
|
||||
for _, description := range descriptions {
|
||||
log.Debug(description)
|
||||
}
|
||||
|
||||
for pid, peerDataColumns := range dataColumnsByAdmissiblePeer {
|
||||
var peerDataColumnsLog interface{} = "all"
|
||||
peerDataColumnsCount := uint64(len(peerDataColumns))
|
||||
if peerDataColumnsCount < numberOfColumns {
|
||||
peerDataColumnsLog = sliceFromMap(peerDataColumns, true /*sorted*/)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"peerDataColumns": peerDataColumnsLog,
|
||||
}).Debug("Peer data columns")
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
|
||||
// Filter for peers with head epoch greater than or equal to our target epoch for ByRange requests.
|
||||
filteredPeers, descriptions, err = filterPeersByTargetSlotAndBandwidth(p2p, rateLimiter, lastSlot, request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter peers by target slot and bandwidth")
|
||||
}
|
||||
|
||||
// Get the peers that are admissible for the data columns.
|
||||
dataColumnsByAdmissiblePeer, admissiblePeersByDataColumn, moreDescriptions, err = AdmissiblePeersForDataColumns(filteredPeers, request.Columns, p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "admissible peers for data columns")
|
||||
}
|
||||
|
||||
descriptions = append(descriptions, moreDescriptions...)
|
||||
|
||||
// Compute data columns without any peer.
|
||||
dataColumnsWithoutPeers = computeDataColumnsWithoutPeers(request.Columns, admissiblePeersByDataColumn)
|
||||
}
|
||||
|
||||
return dataColumnsByAdmissiblePeer, nil
|
||||
}
|
||||
|
||||
// Filter peers to ensure they are synced to the target slot and have sufficient bandwidth to serve the request.
|
||||
func filterPeersByTargetSlotAndBandwidth(p2p p2p.P2P, rateLimiter *leakybucket.Collector, lastSlot primitives.Slot, blockCount uint64) ([]peer.ID, []string, error) {
|
||||
peers := p2p.Peers().Connected()
|
||||
|
||||
slotPeers, descriptions, err := filterPeersByTargetSlot(p2p, peers, lastSlot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "peers with slot and data columns")
|
||||
}
|
||||
|
||||
// Filter for peers with sufficient bandwidth to serve the request.
|
||||
slotAndBandwidthPeers := hasSufficientBandwidth(rateLimiter, slotPeers, blockCount)
|
||||
|
||||
// Add debugging logs for the filtered peers.
|
||||
peerWithSufficientBandwidthMap := make(map[peer.ID]bool, len(peers))
|
||||
for _, peer := range slotAndBandwidthPeers {
|
||||
peerWithSufficientBandwidthMap[peer] = true
|
||||
}
|
||||
|
||||
for _, peer := range slotPeers {
|
||||
if !peerWithSufficientBandwidthMap[peer] {
|
||||
description := fmt.Sprintf("peer %s: does not have sufficient bandwidth", peer)
|
||||
descriptions = append(descriptions, description)
|
||||
}
|
||||
}
|
||||
return slotAndBandwidthPeers, descriptions, nil
|
||||
}
|
||||
|
||||
func hasSufficientBandwidth(rateLimiter *leakybucket.Collector, peers []peer.ID, count uint64) []peer.ID {
|
||||
var filteredPeers []peer.ID
|
||||
|
||||
for _, p := range peers {
|
||||
if uint64(rateLimiter.Remaining(p.String())) < count {
|
||||
continue
|
||||
}
|
||||
copiedP := p
|
||||
filteredPeers = append(filteredPeers, copiedP)
|
||||
}
|
||||
return filteredPeers
|
||||
}
|
||||
|
||||
func computeDataColumnsWithoutPeers(neededColumns []uint64, peersByColumn map[uint64][]peer.ID) map[uint64]bool {
|
||||
result := make(map[uint64]bool)
|
||||
for _, column := range neededColumns {
|
||||
if _, ok := peersByColumn[column]; !ok {
|
||||
result[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Filter peers with head epoch lower than our target epoch for ByRange requests.
|
||||
func filterPeersByTargetSlot(p2p p2p.P2P, peers []peer.ID, targetSlot primitives.Slot) ([]peer.ID, []string, error) {
|
||||
filteredPeers := make([]peer.ID, 0, len(peers))
|
||||
descriptions := make([]string, 0, len(peers))
|
||||
// Compute the target epoch from the target slot.
|
||||
targetEpoch := slots.ToEpoch(targetSlot)
|
||||
|
||||
for _, peer := range peers {
|
||||
peerChainState, err := p2p.Peers().ChainState(peer)
|
||||
if err != nil {
|
||||
description := fmt.Sprintf("peer %s: error: %s", peer, err)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
if peerChainState == nil {
|
||||
description := fmt.Sprintf("peer %s: chain state is nil", peer)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
peerHeadEpoch := slots.ToEpoch(peerChainState.HeadSlot)
|
||||
|
||||
if peerHeadEpoch < targetEpoch {
|
||||
description := fmt.Sprintf("peer %s: peer head epoch %d < our target epoch %d", peer, peerHeadEpoch, targetEpoch)
|
||||
descriptions = append(descriptions, description)
|
||||
continue
|
||||
}
|
||||
|
||||
filteredPeers = append(filteredPeers, peer)
|
||||
}
|
||||
|
||||
return filteredPeers, descriptions, nil
|
||||
}
|
||||
|
||||
// itemsCount returns the total count of items
|
||||
func itemsCount(missingColumnsByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||
count := 0
|
||||
for _, columns := range missingColumnsByRoot {
|
||||
count += len(columns)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// uint64MapDiffer returns true if the two maps differ.
|
||||
func uint64MapDiffer(left, right map[uint64]bool) bool {
|
||||
if len(left) != len(right) {
|
||||
return true
|
||||
}
|
||||
|
||||
for k := range left {
|
||||
if !right[k] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func DataColumnSidecarsByRangeRequest(columns []uint64, start, end primitives.Slot) *eth.DataColumnSidecarsByRangeRequest {
|
||||
sort.Slice(columns, func(i, j int) bool {
|
||||
return columns[i] < columns[j]
|
||||
})
|
||||
return ð.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: start,
|
||||
Count: uint64(end-start) + 1,
|
||||
Columns: columns,
|
||||
}
|
||||
}
|
||||
|
||||
func RequestDataColumnsByRoot(ctx context.Context, ctxMap ContextByteVersions, pid peer.ID, reqs types.DataColumnsByRootIdentifiers) ([]blocks.RODataColumn, error) {
|
||||
return nil, nil
|
||||
}
|
||||
205
beacon-chain/sync/data_columns_reconstruct.go
Normal file
205
beacon-chain/sync/data_columns_reconstruct.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const broadCastMissingDataColumnsTimeIntoSlot = 3 * time.Second
|
||||
|
||||
func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColumn blocks.VerifiedRODataColumn) error {
|
||||
blockRoot := verifiedRODataColumn.BlockRoot()
|
||||
proposerIndex := verifiedRODataColumn.ProposerIndex()
|
||||
slot := verifiedRODataColumn.Slot()
|
||||
|
||||
// Get the columns we store.
|
||||
storedDataColumns := s.cfg.dataColumnStorage.Summary(blockRoot)
|
||||
storedColumnsCount := storedDataColumns.Count()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If reconstruction is not possible or if all columns are already stored, exit early.
|
||||
if storedColumnsCount < peerdas.MinimumColumnsCountToReconstruct() || storedColumnsCount == numberOfColumns {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lock to prevent concurrent reconstruction.
|
||||
if !s.dataColumsnReconstructionLock.TryLock() {
|
||||
// If the mutex is already locked, it means that another goroutine is already reconstructing the data columns.
|
||||
// In this case, no need to reconstruct again.
|
||||
// TODO: Implement the (pathological) case where we want to reconstruct data columns corresponding to different blocks at the same time.
|
||||
// This should be a rare case and we can ignore it for now, but it needs to be addressed in the future.
|
||||
return nil
|
||||
}
|
||||
|
||||
defer s.dataColumsnReconstructionLock.Unlock()
|
||||
|
||||
// Retrieve the node ID.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.custodyInfo.Mut.RLock()
|
||||
defer s.cfg.custodyInfo.Mut.RUnlock()
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := s.cfg.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve our local node info.
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Load all the possible data columns sidecars, to minimize reconstruction time.
|
||||
verifiedSidecars, err := s.cfg.dataColumnStorage.Get(blockRoot, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get data column sidecars")
|
||||
}
|
||||
|
||||
// Recover cells and proofs.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(verifiedSidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reconstruct data column sidecars")
|
||||
}
|
||||
|
||||
// Filter reconstructed sidecars to save.
|
||||
custodyColumns := localNodeInfo.CustodyColumns
|
||||
toSaveSidecars := make([]blocks.VerifiedRODataColumn, 0, len(custodyColumns))
|
||||
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if custodyColumns[sidecar.Index] {
|
||||
toSaveSidecars = append(toSaveSidecars, sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the data columns sidecars in the database.
|
||||
// Note: We do not call `receiveDataColumn`, because it will ignore
|
||||
// incoming data columns via gossip while we did not broadcast (yet) the reconstructed data columns.
|
||||
if err := s.cfg.dataColumnStorage.Save(toSaveSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
// Schedule the broadcast.
|
||||
if err := s.scheduleReconstructedDataColumnsBroadcast(ctx, blockRoot, proposerIndex, slot); err != nil {
|
||||
return errors.Wrap(err, "schedule reconstructed data columns broadcast")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"slot": slot,
|
||||
"fromColumnsCount": storedColumnsCount,
|
||||
}).Debug("Data columns reconstructed and saved")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) scheduleReconstructedDataColumnsBroadcast(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%x", root),
|
||||
"slot": slot,
|
||||
})
|
||||
|
||||
// Get the time corresponding to the start of the slot.
|
||||
genesisTime := uint64(s.cfg.chain.GenesisTime().Unix())
|
||||
slotStartTime, err := slots.ToTime(genesisTime, slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "to time")
|
||||
}
|
||||
|
||||
// Compute when to broadcast the missing data columns.
|
||||
broadcastTime := slotStartTime.Add(broadCastMissingDataColumnsTimeIntoSlot)
|
||||
|
||||
// Compute the waiting time. This could be negative. In such a case, broadcast immediately.
|
||||
waitingTime := time.Until(broadcastTime)
|
||||
|
||||
time.AfterFunc(waitingTime, func() {
|
||||
s.dataColumsnReconstructionLock.Lock()
|
||||
defer s.dataColumsnReconstructionLock.Unlock()
|
||||
|
||||
// Get the node ID.
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
// Prevent custody group count to change during the rest of the function.
|
||||
s.cfg.custodyInfo.Mut.RLock()
|
||||
defer s.cfg.custodyInfo.Mut.RUnlock()
|
||||
|
||||
// Get the custody group count.
|
||||
custodyGroupCount := s.cfg.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve the local node info.
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Peer info")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the data columns we actually store.
|
||||
summary := s.cfg.dataColumnStorage.Summary(root)
|
||||
|
||||
// Compute the missing data columns (data columns we should custody but we do not have received via gossip.)
|
||||
missingColumns := make([]uint64, 0, len(localNodeInfo.CustodyColumns))
|
||||
for column := range localNodeInfo.CustodyColumns {
|
||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, column) {
|
||||
missingColumns = append(missingColumns, column)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit early if there are no missing data columns.
|
||||
// This is the happy path.
|
||||
if len(missingColumns) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, column := range missingColumns {
|
||||
if !summary.HasIndex(column) {
|
||||
// This column was not received nor reconstructed. This should not happen.
|
||||
log.WithField("column", column).Error("Data column not received nor reconstructed")
|
||||
}
|
||||
}
|
||||
|
||||
// Get the non received but reconstructed data column.
|
||||
verifiedRODataColumnSidecars, err := s.cfg.dataColumnStorage.Get(root, missingColumns)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("get data column sidecars")
|
||||
return
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumnSidecars {
|
||||
// Compute the subnet for this column.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(verifiedRODataColumn.Index)
|
||||
|
||||
// Broadcast the missing data column.
|
||||
if err := s.cfg.p2p.BroadcastDataColumn(root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
|
||||
log.WithError(err).Error("Broadcast data column")
|
||||
}
|
||||
|
||||
// Now, we can set the data column as seen.
|
||||
s.setSeenDataColumnIndex(slot, proposerIndex, verifiedRODataColumn.Index)
|
||||
}
|
||||
|
||||
// Sort the missing data columns.
|
||||
slices.Sort[[]uint64](missingColumns)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"timeIntoSlot": broadCastMissingDataColumnsTimeIntoSlot,
|
||||
"columns": missingColumns,
|
||||
}).Debug("Start broadcasting not seen via gossip but reconstructed data columns")
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
627
beacon-chain/sync/data_columns_sampling.go
Normal file
627
beacon-chain/sync/data_columns_sampling.go
Normal file
@@ -0,0 +1,627 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const PeerRefreshInterval = 1 * time.Minute
|
||||
|
||||
type roundSummary struct {
|
||||
RequestedColumns []uint64
|
||||
MissingColumns map[uint64]bool
|
||||
}
|
||||
|
||||
// DataColumnSampler defines the interface for sampling data columns from peers for requested block root and samples count.
|
||||
type DataColumnSampler interface {
|
||||
// Run starts the data column sampling service.
|
||||
Run(ctx context.Context)
|
||||
}
|
||||
|
||||
var _ DataColumnSampler = (*dataColumnSampler1D)(nil)
|
||||
|
||||
// dataColumnSampler1D implements the DataColumnSampler interface for PeerDAS 1D.
|
||||
type dataColumnSampler1D struct {
|
||||
sync.RWMutex
|
||||
|
||||
p2p p2p.P2P
|
||||
clock *startup.Clock
|
||||
ctxMap ContextByteVersions
|
||||
stateNotifier statefeed.Notifier
|
||||
|
||||
// nonCustodyGroups is a set of groups that are not custodied by the node.
|
||||
nonCustodyGroups map[uint64]bool
|
||||
|
||||
// groupsByPeer maps a peer to the groups it is responsible for custody.
|
||||
groupsByPeer map[peer.ID]map[uint64]bool
|
||||
|
||||
// peersByCustodyGroup maps a group to the peer responsible for custody.
|
||||
peersByCustodyGroup map[uint64]map[peer.ID]bool
|
||||
|
||||
// columnVerifier verifies a column according to the specified requirements.
|
||||
columnVerifier verification.NewDataColumnsVerifier
|
||||
|
||||
// custodyInfo contains the custody information of the node.
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// newDataColumnSampler1D creates a new 1D data column sampler.
|
||||
func newDataColumnSampler1D(
|
||||
p2p p2p.P2P,
|
||||
clock *startup.Clock,
|
||||
ctxMap ContextByteVersions,
|
||||
stateNotifier statefeed.Notifier,
|
||||
colVerifier verification.NewDataColumnsVerifier,
|
||||
custodyInfo *peerdas.CustodyInfo,
|
||||
) *dataColumnSampler1D {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
peersByCustodyGroup := make(map[uint64]map[peer.ID]bool, numberOfCustodyGroups)
|
||||
|
||||
for i := range numberOfCustodyGroups {
|
||||
peersByCustodyGroup[i] = make(map[peer.ID]bool)
|
||||
}
|
||||
|
||||
return &dataColumnSampler1D{
|
||||
p2p: p2p,
|
||||
clock: clock,
|
||||
ctxMap: ctxMap,
|
||||
stateNotifier: stateNotifier,
|
||||
groupsByPeer: make(map[peer.ID]map[uint64]bool),
|
||||
peersByCustodyGroup: peersByCustodyGroup,
|
||||
columnVerifier: colVerifier,
|
||||
custodyInfo: custodyInfo,
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements DataColumnSampler.
|
||||
func (d *dataColumnSampler1D) Run(ctx context.Context) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Get the node ID.
|
||||
nodeID := d.p2p.NodeID()
|
||||
|
||||
// Verify if we need to run sampling or not, if not, return directly.
|
||||
// TODO: Rework this part to take into account dynamic custody group count with peer sampling.
|
||||
custodyGroupCount := d.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve our local node info.
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("peer info")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: custody group count != data column group count
|
||||
if custodyGroupCount >= peerdas.MinimumColumnsCountToReconstruct() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"custodyGroupCount": custodyGroupCount,
|
||||
"totalGroups": numberOfCustodyGroups,
|
||||
}).Debug("The node custodies at least the half of the groups, no need to sample")
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize non custody groups.
|
||||
d.nonCustodyGroups = make(map[uint64]bool)
|
||||
for i := range numberOfCustodyGroups {
|
||||
if !localNodeInfo.CustodyGroups[i] {
|
||||
d.nonCustodyGroups[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize peer info first.
|
||||
d.refreshPeerInfo()
|
||||
|
||||
// periodically refresh peer info to keep peer <-> column mapping up to date.
|
||||
async.RunEvery(ctx, PeerRefreshInterval, d.refreshPeerInfo)
|
||||
|
||||
// start the sampling loop.
|
||||
d.samplingRoutine(ctx)
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) samplingRoutine(ctx context.Context) {
|
||||
stateCh := make(chan *feed.Event, 1)
|
||||
stateSub := d.stateNotifier.StateFeed().Subscribe(stateCh)
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt := <-stateCh:
|
||||
d.handleStateNotification(ctx, evt)
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("DataColumnSampler1D subscription to state feed failed")
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context canceled, exiting data column sampling loop.")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh peer information.
|
||||
func (d *dataColumnSampler1D) refreshPeerInfo() {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
activePeers := d.p2p.Peers().Active()
|
||||
d.prunePeerInfo(activePeers)
|
||||
|
||||
for _, pid := range activePeers {
|
||||
// Retrieve the custody group count of the peer.
|
||||
retrievedCustodyGroupCount := d.p2p.CustodyGroupCountFromPeer(pid)
|
||||
|
||||
// Look into our store the custody storedGroups for this peer.
|
||||
storedGroups, ok := d.groupsByPeer[pid]
|
||||
storedGroupsCount := uint64(len(storedGroups))
|
||||
|
||||
if ok && storedGroupsCount == retrievedCustodyGroupCount {
|
||||
// No change for this peer.
|
||||
continue
|
||||
}
|
||||
|
||||
nodeID, err := p2p.ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to convert peer ID to node ID")
|
||||
continue
|
||||
}
|
||||
|
||||
// Retrieve the peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, retrievedCustodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid.String()).Error("Failed to determine peer info")
|
||||
}
|
||||
|
||||
d.groupsByPeer[pid] = peerInfo.CustodyGroups
|
||||
for group := range peerInfo.CustodyGroups {
|
||||
d.peersByCustodyGroup[group][pid] = true
|
||||
}
|
||||
}
|
||||
|
||||
groupsWithoutPeers := make([]uint64, 0)
|
||||
for group, peers := range d.peersByCustodyGroup {
|
||||
if len(peers) == 0 {
|
||||
groupsWithoutPeers = append(groupsWithoutPeers, group)
|
||||
}
|
||||
}
|
||||
|
||||
if len(groupsWithoutPeers) > 0 {
|
||||
slices.Sort[[]uint64](groupsWithoutPeers)
|
||||
log.WithField("groups", groupsWithoutPeers).Warn("Some groups have no peers responsible for custody")
|
||||
}
|
||||
}
|
||||
|
||||
// prunePeerInfo prunes inactive peers from peerByGroup and groupByPeer.
|
||||
// This should not be called outside of refreshPeerInfo without being locked.
|
||||
func (d *dataColumnSampler1D) prunePeerInfo(activePeers []peer.ID) {
|
||||
active := make(map[peer.ID]bool)
|
||||
for _, pid := range activePeers {
|
||||
active[pid] = true
|
||||
}
|
||||
|
||||
for pid := range d.groupsByPeer {
|
||||
if !active[pid] {
|
||||
d.prunePeer(pid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// prunePeer removes a peer from stored peer info map, it should be called with lock held.
|
||||
func (d *dataColumnSampler1D) prunePeer(pid peer.ID) {
|
||||
delete(d.groupsByPeer, pid)
|
||||
for _, peers := range d.peersByCustodyGroup {
|
||||
delete(peers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event *feed.Event) {
|
||||
if event.Type != statefeed.BlockProcessed {
|
||||
return
|
||||
}
|
||||
|
||||
data, ok := event.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *statefeed.BlockProcessedData")
|
||||
return
|
||||
}
|
||||
|
||||
if !data.Verified {
|
||||
// We only process blocks that have been verified
|
||||
log.Error("Data is not verified")
|
||||
return
|
||||
}
|
||||
|
||||
if data.SignedBlock.Version() < version.Fulu {
|
||||
log.Debug("Pre Fulu block, skipping data column sampling")
|
||||
return
|
||||
}
|
||||
|
||||
// Determine if we need to sample data columns for this block.
|
||||
beaconConfig := params.BeaconConfig()
|
||||
samplesPerSlots := beaconConfig.SamplesPerSlot
|
||||
halfOfCustodyGroups := beaconConfig.NumberOfCustodyGroups / 2
|
||||
nonCustodyGroupsCount := uint64(len(d.nonCustodyGroups))
|
||||
|
||||
if nonCustodyGroupsCount <= halfOfCustodyGroups {
|
||||
// Nothing to sample.
|
||||
return
|
||||
}
|
||||
|
||||
// Get the commitments for this block.
|
||||
commitments, err := data.SignedBlock.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
return
|
||||
}
|
||||
|
||||
// Skip if there are no commitments.
|
||||
if len(commitments) == 0 {
|
||||
log.Debug("No commitments in block, skipping data column sampling")
|
||||
return
|
||||
}
|
||||
|
||||
// Randomize columns for sample selection.
|
||||
randomizedColumns, err := randomizeColumns(d.nonCustodyGroups)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to randomize columns")
|
||||
return
|
||||
}
|
||||
|
||||
samplesCount := min(samplesPerSlots, nonCustodyGroupsCount-halfOfCustodyGroups)
|
||||
|
||||
// TODO: Use the first output of `incrementalDAS` as input of the fork choice rule.
|
||||
_, _, err = d.incrementalDAS(ctx, data, randomizedColumns, samplesCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to run incremental DAS")
|
||||
}
|
||||
}
|
||||
|
||||
// incrementalDAS samples data columns from active peers using incremental DAS.
|
||||
// https://ethresear.ch/t/lossydas-lossy-incremental-and-diagonal-sampling-for-data-availability/18963#incrementaldas-dynamically-increase-the-sample-size-10
|
||||
// According to https://github.com/ethereum/consensus-specs/issues/3825, we're going to select query samples exclusively from the non custody columns.
|
||||
func (d *dataColumnSampler1D) incrementalDAS(
|
||||
ctx context.Context,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
columns []uint64,
|
||||
sampleCount uint64,
|
||||
) (bool, []roundSummary, error) {
|
||||
allowedFailures := uint64(0)
|
||||
firstColumnToSample, extendedSampleCount := uint64(0), peerdas.ExtendedSampleCount(sampleCount, allowedFailures)
|
||||
roundSummaries := make([]roundSummary, 0, 1) // We optimistically allocate only one round summary.
|
||||
blockRoot := blockProcessedData.BlockRoot
|
||||
columnsCount := uint64(len(columns))
|
||||
|
||||
start := time.Now()
|
||||
|
||||
for round := 1; ; /*No exit condition */ round++ {
|
||||
if extendedSampleCount > columnsCount {
|
||||
// We already tried to sample all possible columns, this is the unhappy path.
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"round": round - 1,
|
||||
}).Warning("Some columns are still missing after trying to sample all possible columns")
|
||||
return false, roundSummaries, nil
|
||||
}
|
||||
|
||||
// Get the columns to sample for this round.
|
||||
columnsToSample := columns[firstColumnToSample:extendedSampleCount]
|
||||
columnsToSampleCount := extendedSampleCount - firstColumnToSample
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"columns": columnsToSample,
|
||||
"round": round,
|
||||
}).Debug("Start data columns sampling")
|
||||
|
||||
// Sample data columns from peers in parallel.
|
||||
retrievedSamples, err := d.sampleDataColumns(ctx, blockProcessedData, columnsToSample)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "sample data columns")
|
||||
}
|
||||
|
||||
missingSamples := make(map[uint64]bool)
|
||||
for _, column := range columnsToSample {
|
||||
if !retrievedSamples[column] {
|
||||
missingSamples[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
roundSummaries = append(roundSummaries, roundSummary{
|
||||
RequestedColumns: columnsToSample,
|
||||
MissingColumns: missingSamples,
|
||||
})
|
||||
|
||||
retrievedSampleCount := uint64(len(retrievedSamples))
|
||||
if retrievedSampleCount == columnsToSampleCount {
|
||||
// All columns were correctly sampled, this is the happy path.
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"neededRounds": round,
|
||||
"duration": time.Since(start),
|
||||
}).Debug("All columns were successfully sampled")
|
||||
return true, roundSummaries, nil
|
||||
}
|
||||
|
||||
if retrievedSampleCount > columnsToSampleCount {
|
||||
// This should never happen.
|
||||
return false, nil, errors.New("retrieved more columns than requested")
|
||||
}
|
||||
|
||||
// There is still some missing columns, extend the samples.
|
||||
allowedFailures += columnsToSampleCount - retrievedSampleCount
|
||||
oldExtendedSampleCount := extendedSampleCount
|
||||
firstColumnToSample = extendedSampleCount
|
||||
extendedSampleCount = peerdas.ExtendedSampleCount(sampleCount, allowedFailures)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"round": round,
|
||||
"missingColumnsCount": allowedFailures,
|
||||
"currentSampleIndex": oldExtendedSampleCount,
|
||||
"nextSampleIndex": extendedSampleCount,
|
||||
}).Debug("Some columns are still missing after sampling this round.")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) sampleDataColumns(
|
||||
ctx context.Context,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
columns []uint64,
|
||||
) (map[uint64]bool, error) {
|
||||
// distribute samples to peer
|
||||
peerToColumns, err := d.distributeSamplesToPeer(columns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "distribute samples to peer")
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
res := make(map[uint64]bool)
|
||||
|
||||
sampleFromPeer := func(pid peer.ID, cols map[uint64]bool) {
|
||||
defer wg.Done()
|
||||
retrieved := d.sampleDataColumnsFromPeer(ctx, pid, blockProcessedData, cols)
|
||||
|
||||
mu.Lock()
|
||||
for col := range retrieved {
|
||||
res[col] = true
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// sample from peers in parallel
|
||||
for pid, cols := range peerToColumns {
|
||||
wg.Add(1)
|
||||
go sampleFromPeer(pid, cols)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// distributeSamplesToPeer distributes samples to peers based on the columns they are responsible for.
|
||||
// Currently it randomizes peer selection for a column and did not take into account whole peer distribution balance. It could be improved if needed.
|
||||
func (d *dataColumnSampler1D) distributeSamplesToPeer(columns []uint64) (map[peer.ID]map[uint64]bool, error) {
|
||||
dist := make(map[peer.ID]map[uint64]bool)
|
||||
|
||||
for _, column := range columns {
|
||||
custodyGroup, err := peerdas.ComputeCustodyGroupForColumn(column)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute custody group for column")
|
||||
}
|
||||
|
||||
peers := d.peersByCustodyGroup[custodyGroup]
|
||||
if len(peers) == 0 {
|
||||
log.WithField("column", column).Warning("No peers responsible for custody of column")
|
||||
continue
|
||||
}
|
||||
|
||||
pid, err := selectRandomPeer(peers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select random peer")
|
||||
}
|
||||
|
||||
if _, ok := dist[pid]; !ok {
|
||||
dist[pid] = make(map[uint64]bool)
|
||||
}
|
||||
|
||||
dist[pid][column] = true
|
||||
}
|
||||
|
||||
return dist, nil
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) sampleDataColumnsFromPeer(
|
||||
ctx context.Context,
|
||||
pid peer.ID,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
requestedColumns map[uint64]bool,
|
||||
) map[uint64]bool {
|
||||
retrievedColumns := make(map[uint64]bool)
|
||||
|
||||
cols := make([]uint64, 0, len(requestedColumns))
|
||||
for col := range requestedColumns {
|
||||
cols = append(cols, col)
|
||||
}
|
||||
req := ð.DataColumnsByRootIdentifier{
|
||||
BlockRoot: blockProcessedData.BlockRoot[:],
|
||||
Columns: cols,
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
roDataColumns, err := SendDataColumnSidecarsByRootRequest(ctx, d.clock, d.p2p, pid, d.ctxMap, &types.DataColumnsByRootIdentifiers{req})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to send data column sidecar by root")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Once peer sampling is used, we should verify all sampled data columns in a single batch instead of looping over columns.
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
if verifyColumn(roDataColumn, blockProcessedData, pid, requestedColumns, d.columnVerifier) {
|
||||
retrievedColumns[roDataColumn.Index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(retrievedColumns) == len(requestedColumns) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", blockProcessedData.BlockRoot),
|
||||
"requestedColumns": sliceFromMap(requestedColumns, true /*sorted*/),
|
||||
}).Debug("Sampled columns from peer successfully")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", blockProcessedData.BlockRoot),
|
||||
"requestedColumns": sliceFromMap(requestedColumns, true /*sorted*/),
|
||||
"retrievedColumns": sliceFromMap(retrievedColumns, true /*sorted*/),
|
||||
}).Debug("Sampled columns from peer with some errors")
|
||||
}
|
||||
|
||||
return retrievedColumns
|
||||
}
|
||||
|
||||
// randomizeColumns returns a slice containing randomly ordered columns belonging to the input `groups`.
|
||||
func randomizeColumns(custodyGroups map[uint64]bool) ([]uint64, error) {
|
||||
// Compute the number of columns per group.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
// Compute the number of columns.
|
||||
groupCount := uint64(len(custodyGroups))
|
||||
expectedColumnCount := groupCount * columnsPerGroup
|
||||
|
||||
// Compute the columns.
|
||||
columns := make([]uint64, 0, expectedColumnCount)
|
||||
for group := range custodyGroups {
|
||||
columnsGroup, err := peerdas.ComputeColumnsForCustodyGroup(group)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute columns for custody group")
|
||||
}
|
||||
|
||||
columns = append(columns, columnsGroup...)
|
||||
}
|
||||
|
||||
actualColumnCount := len(columns)
|
||||
|
||||
// Safety check.
|
||||
if uint64(actualColumnCount) != expectedColumnCount {
|
||||
return nil, errors.New("invalid number of columns, should never happen")
|
||||
}
|
||||
|
||||
// Shuffle the columns.
|
||||
rand.NewGenerator().Shuffle(actualColumnCount, func(i, j int) {
|
||||
columns[i], columns[j] = columns[j], columns[i]
|
||||
})
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// sliceFromMap returns a sorted list of keys from a map.
|
||||
func sliceFromMap(m map[uint64]bool, sorted ...bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
if len(sorted) > 0 && sorted[0] {
|
||||
slices.Sort(result)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// selectRandomPeer returns a random peer from the given list of peers.
|
||||
func selectRandomPeer(peers map[peer.ID]bool) (peer.ID, error) {
|
||||
peersCount := uint64(len(peers))
|
||||
pick := rand.NewGenerator().Uint64() % peersCount
|
||||
|
||||
for peer := range peers {
|
||||
if pick == 0 {
|
||||
return peer, nil
|
||||
}
|
||||
|
||||
pick--
|
||||
}
|
||||
|
||||
// This should never be reached.
|
||||
return peer.ID(""), errors.New("failed to select random peer")
|
||||
}
|
||||
|
||||
// verifyColumn verifies the retrieved column against the root, the index,
|
||||
// the KZG inclusion and the KZG proof.
|
||||
func verifyColumn(
|
||||
roDataColumn blocks.RODataColumn,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
pid peer.ID,
|
||||
requestedColumns map[uint64]bool,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) bool {
|
||||
retrievedColumn := roDataColumn.Index
|
||||
|
||||
// Filter out columns that were not requested.
|
||||
if !requestedColumns[retrievedColumn] {
|
||||
columnsToSampleList := sliceFromMap(requestedColumns, true /*sorted*/)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"requestedColumns": columnsToSampleList,
|
||||
"retrievedColumn": retrievedColumn,
|
||||
}).Debug("Retrieved column was not requested")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
roBlock, err := blocks.NewROBlock(blockProcessedData.SignedBlock)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to create ROBlock")
|
||||
}
|
||||
|
||||
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
||||
|
||||
if err := peerdas.DataColumnsAlignWithBlock(roBlock, roDataColumns); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
||||
verifier := newDataColumnsVerifier(roDataColumns, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to verify data column")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to prove inclusion")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to verify KZG proof")
|
||||
}
|
||||
|
||||
_, err = verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to upgrade RODataColumns to VerifiedRODataColumns - should never happen")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
554
beacon-chain/sync/data_columns_sampling_test.go
Normal file
554
beacon-chain/sync/data_columns_sampling_test.go
Normal file
@@ -0,0 +1,554 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
func TestRandomizeColumns(t *testing.T) {
|
||||
const count uint64 = 128
|
||||
|
||||
// Generate groups.
|
||||
groups := make(map[uint64]bool, count)
|
||||
for i := uint64(0); i < count; i++ {
|
||||
groups[i] = true
|
||||
}
|
||||
|
||||
// Randomize columns.
|
||||
randomizedColumns, err := randomizeColumns(groups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert back to a map.
|
||||
randomizedColumnsMap := make(map[uint64]bool, count)
|
||||
for _, column := range randomizedColumns {
|
||||
randomizedColumnsMap[column] = true
|
||||
}
|
||||
|
||||
// Check duplicates and missing columns.
|
||||
require.Equal(t, len(groups), len(randomizedColumnsMap))
|
||||
|
||||
// Check the values.
|
||||
for column := range randomizedColumnsMap {
|
||||
require.Equal(t, true, column < count)
|
||||
}
|
||||
}
|
||||
|
||||
// createAndConnectPeer creates a peer with a private key `offset` fixed.
|
||||
// The peer is added and connected to `p2pService`.
|
||||
// If a `RPCDataColumnSidecarsByRootTopicV1` request is made with column index `i`,
|
||||
// then the peer will respond with the `dataColumnSidecars[i]` if it is not in `columnsNotToRespond`.
|
||||
// (If `len(dataColumnSidecars) < i`, then this function will panic.)
|
||||
func createAndConnectPeer(
|
||||
t *testing.T,
|
||||
p2pService *p2ptest.TestP2P,
|
||||
chainService *mock.ChainService,
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar,
|
||||
custodyGroupCount uint64,
|
||||
columnsNotToRespond map[uint64]bool,
|
||||
offset int,
|
||||
) *p2ptest.TestP2P {
|
||||
// Create the private key, depending on the offset.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(offset + i)
|
||||
}
|
||||
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the peer.
|
||||
peer := p2ptest.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
|
||||
peer.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1+"/ssz_snappy", func(stream network.Stream) {
|
||||
// Decode the request.
|
||||
req := new(p2pTypes.DataColumnsByRootIdentifiers)
|
||||
err := peer.Encoding().DecodeWithMaxLength(stream, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, identifier := range *req {
|
||||
for _, column := range identifier.Columns {
|
||||
// Filter out the columns not to respond.
|
||||
if columnsNotToRespond[column] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the response.
|
||||
resp := dataColumnSidecars[column]
|
||||
|
||||
// Send the response.
|
||||
err := WriteDataColumnSidecarChunk(stream, chainService, p2pService.Encoding(), resp)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the stream.
|
||||
closeStream(stream, log)
|
||||
})
|
||||
|
||||
// Create the record and set the custody count.
|
||||
enr := &enr.Record{}
|
||||
enr.Set(peerdas.Cgc(custodyGroupCount))
|
||||
|
||||
// Add the peer and connect it.
|
||||
p2pService.Peers().Add(enr, peer.PeerID(), nil, network.DirOutbound)
|
||||
p2pService.Peers().SetConnectionState(peer.PeerID(), peers.Connected)
|
||||
p2pService.Connect(peer)
|
||||
|
||||
return peer
|
||||
}
|
||||
|
||||
type dataSamplerTest struct {
|
||||
ctx context.Context
|
||||
p2pSvc *p2ptest.TestP2P
|
||||
peers []*p2ptest.TestP2P
|
||||
ctxMap map[[4]byte]int
|
||||
chainSvc *mock.ChainService
|
||||
blockProcessedData *statefeed.BlockProcessedData
|
||||
blobs []kzg.Blob
|
||||
kzgCommitments [][]byte
|
||||
kzgProofs [][]byte
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar
|
||||
}
|
||||
|
||||
func setupDefaultDataColumnSamplerTest(t *testing.T) (*dataSamplerTest, *dataColumnSampler1D) {
|
||||
const (
|
||||
blobCount uint64 = 3
|
||||
custodyRequirement uint64 = 4
|
||||
)
|
||||
|
||||
test, sampler := setupDataColumnSamplerTest(t, blobCount)
|
||||
|
||||
// Custody columns: [6, 38, 70, 102]
|
||||
p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 1)
|
||||
|
||||
// Custody columns: [3, 35, 67, 99]
|
||||
p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 2)
|
||||
|
||||
// Custody columns: [12, 44, 76, 108]
|
||||
p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 3)
|
||||
|
||||
test.peers = []*p2ptest.TestP2P{p1, p2, p3}
|
||||
|
||||
return test, sampler
|
||||
}
|
||||
|
||||
func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTest, *dataColumnSampler1D) {
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs, commitments and inclusion proofs.
|
||||
blobs := make([]kzg.Blob, blobCount)
|
||||
kzgCommitments := make([][]byte, blobCount)
|
||||
kzgProofs := make([][]byte, blobCount)
|
||||
|
||||
for i := uint64(0); i < blobCount; i++ {
|
||||
blob := getRandBlob(t, int64(i))
|
||||
|
||||
kzgCommitment, kzgProof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob
|
||||
kzgCommitments[i] = kzgCommitment[:]
|
||||
kzgProofs[i] = kzgProof[:]
|
||||
}
|
||||
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
dbBlock.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := dataColumnSidecars[0].GetSignedBlockHeader().Header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blockProcessedData := &statefeed.BlockProcessedData{
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: sBlock,
|
||||
}
|
||||
|
||||
p2pSvc := p2ptest.NewTestP2P(t)
|
||||
chainSvc, clock := defaultMockChain(t, 0)
|
||||
|
||||
test := &dataSamplerTest{
|
||||
ctx: context.Background(),
|
||||
p2pSvc: p2pSvc,
|
||||
peers: []*p2ptest.TestP2P{},
|
||||
ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Fulu},
|
||||
chainSvc: chainSvc,
|
||||
blockProcessedData: blockProcessedData,
|
||||
blobs: blobs,
|
||||
kzgCommitments: kzgCommitments,
|
||||
kzgProofs: kzgProofs,
|
||||
dataColumnSidecars: dataColumnSidecars,
|
||||
}
|
||||
clockSync := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clockSync.SetClock(clock))
|
||||
iniWaiter := verification.NewInitializerWaiter(clockSync, nil, nil)
|
||||
ini, err := iniWaiter.WaitForInitializer(context.Background())
|
||||
require.NoError(t, err)
|
||||
sampler := newDataColumnSampler1D(p2pSvc, clock, test.ctxMap, nil, newDataColumnsVerifierFromInitializer(ini), &peerdas.CustodyInfo{})
|
||||
|
||||
return test, sampler
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_PeerManagement(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
numPeers int
|
||||
custodyRequirement uint64
|
||||
expectedColumns [][]uint64
|
||||
prunePeers map[int]bool // Peers to prune.
|
||||
}{
|
||||
{
|
||||
name: "custodyRequirement=4",
|
||||
numPeers: 3,
|
||||
custodyRequirement: 4,
|
||||
expectedColumns: [][]uint64{
|
||||
{6, 37, 48, 113},
|
||||
{35, 79, 92, 109},
|
||||
{31, 44, 58, 97},
|
||||
},
|
||||
prunePeers: map[int]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custodyRequirement=8",
|
||||
numPeers: 3,
|
||||
custodyRequirement: 8,
|
||||
expectedColumns: [][]uint64{
|
||||
{1, 6, 37, 48, 51, 87, 112, 113},
|
||||
{24, 25, 35, 52, 79, 92, 109, 126},
|
||||
{31, 44, 58, 64, 91, 97, 116, 127},
|
||||
},
|
||||
prunePeers: map[int]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.CustodyRequirement = tc.custodyRequirement
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers))
|
||||
for i := 0; i < tc.numPeers; i++ {
|
||||
p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1)
|
||||
test.peers = append(test.peers, p)
|
||||
}
|
||||
|
||||
// confirm everything works
|
||||
sampler.refreshPeerInfo()
|
||||
require.Equal(t, params.BeaconConfig().NumberOfColumns, uint64(len(sampler.peersByCustodyGroup)))
|
||||
|
||||
require.Equal(t, tc.numPeers, len(sampler.groupsByPeer))
|
||||
for i, peer := range test.peers {
|
||||
// confirm peer has the expected columns
|
||||
require.Equal(t, len(tc.expectedColumns[i]), len(sampler.groupsByPeer[peer.PeerID()]))
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
require.Equal(t, true, sampler.groupsByPeer[peer.PeerID()][column])
|
||||
}
|
||||
|
||||
// confirm column to peer mapping are correct
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
require.Equal(t, true, sampler.peersByCustodyGroup[column][peer.PeerID()])
|
||||
}
|
||||
}
|
||||
|
||||
// prune peers
|
||||
for peer := range tc.prunePeers {
|
||||
err := test.p2pSvc.Disconnect(test.peers[peer].PeerID())
|
||||
test.p2pSvc.Peers().SetConnectionState(test.peers[peer].PeerID(), peers.Disconnected)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
require.Equal(t, tc.numPeers-len(tc.prunePeers), len(sampler.groupsByPeer))
|
||||
for i, peer := range test.peers {
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
expected := true
|
||||
if tc.prunePeers[i] {
|
||||
expected = false
|
||||
}
|
||||
require.Equal(t, expected, sampler.peersByCustodyGroup[column][peer.PeerID()])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_SampleDistribution(t *testing.T) {
|
||||
// TODO: Use `t.Run`.
|
||||
testCases := []struct {
|
||||
numPeers int
|
||||
custodyRequirement uint64
|
||||
columnsToDistribute [][]uint64
|
||||
expectedDistribution []map[int][]uint64
|
||||
}{
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 4,
|
||||
// peer custody maps
|
||||
// p0: {6, 37, 48, 113},
|
||||
// p1: {35, 79, 92, 109},
|
||||
// p2: {31, 44, 58, 97},
|
||||
columnsToDistribute: [][]uint64{
|
||||
{6, 35, 31},
|
||||
{6, 48, 79, 109, 31, 97},
|
||||
{6, 37, 113},
|
||||
{11},
|
||||
},
|
||||
expectedDistribution: []map[int][]uint64{
|
||||
{
|
||||
0: {6}, // p0
|
||||
1: {35}, // p1
|
||||
2: {31}, // p2
|
||||
},
|
||||
{
|
||||
0: {6, 48}, // p0
|
||||
1: {79, 109}, // p1
|
||||
2: {31, 97}, // p2
|
||||
},
|
||||
{
|
||||
0: {6, 37, 113}, // p0
|
||||
},
|
||||
{},
|
||||
},
|
||||
},
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 8,
|
||||
// peer custody maps
|
||||
// p0: {6, 37, 48, 113, 1, 112, 87, 51},
|
||||
// p1: {35, 79, 92, 109, 52, 126, 25, 24},
|
||||
// p2: {31, 44, 58, 97, 116, 91, 64, 127},
|
||||
columnsToDistribute: [][]uint64{
|
||||
{6, 48, 79, 25, 24, 97}, // all covered by peers
|
||||
{6, 35, 31, 32}, // `32` is not in covered by peers
|
||||
},
|
||||
expectedDistribution: []map[int][]uint64{
|
||||
{
|
||||
0: {6, 48}, // p0
|
||||
1: {79, 25, 24}, // p1
|
||||
2: {97}, // p2
|
||||
},
|
||||
{
|
||||
0: {6}, // p0
|
||||
1: {35}, // p1
|
||||
2: {31}, // p2
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
params.SetupTestConfigCleanup(t)
|
||||
for _, tc := range testCases {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.CustodyRequirement = tc.custodyRequirement
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers))
|
||||
for i := 0; i < tc.numPeers; i++ {
|
||||
p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1)
|
||||
test.peers = append(test.peers, p)
|
||||
}
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
for idx, columns := range tc.columnsToDistribute {
|
||||
result, err := sampler.distributeSamplesToPeer(columns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(tc.expectedDistribution[idx]), len(result), fmt.Sprintf("%v - %v", tc.expectedDistribution[idx], result))
|
||||
|
||||
for peerIdx, dist := range tc.expectedDistribution[idx] {
|
||||
for _, column := range dist {
|
||||
peerID := test.peers[peerIdx].PeerID()
|
||||
require.Equal(t, true, result[peerID][column])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) {
|
||||
test, sampler := setupDefaultDataColumnSamplerTest(t)
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
t.Run("sample all columns", func(t *testing.T) {
|
||||
sampleColumns := []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97}
|
||||
retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 12, len(retrieved))
|
||||
for _, column := range sampleColumns {
|
||||
require.Equal(t, true, retrieved[column])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("sample a subset of columns", func(t *testing.T) {
|
||||
sampleColumns := []uint64{35, 31, 79, 48, 113, 97}
|
||||
retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(retrieved))
|
||||
for _, column := range sampleColumns {
|
||||
require.Equal(t, true, retrieved[column])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("sample a subset of columns with missing columns", func(t *testing.T) {
|
||||
sampleColumns := []uint64{35, 31, 100, 79}
|
||||
retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrieved))
|
||||
require.DeepEqual(t, map[uint64]bool{35: true, 31: true, 79: true}, retrieved)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.DataColumnSidecarSubnetCount = 32
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
samplesCount uint64
|
||||
possibleColumnsToRequest []uint64
|
||||
columnsNotToRespond map[uint64]bool
|
||||
expectedSuccess bool
|
||||
expectedRoundSummaries []roundSummary
|
||||
}{
|
||||
{
|
||||
name: "All columns are correctly sampled in a single round",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97},
|
||||
columnsNotToRespond: map[uint64]bool{},
|
||||
expectedSuccess: true,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{6, 35, 31, 37, 79},
|
||||
MissingColumns: map[uint64]bool{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Two missing columns in the first round, ok in the second round",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97},
|
||||
columnsNotToRespond: map[uint64]bool{6: true, 31: true},
|
||||
expectedSuccess: true,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{6, 35, 31, 37, 79},
|
||||
MissingColumns: map[uint64]bool{6: true, 31: true},
|
||||
},
|
||||
{
|
||||
RequestedColumns: []uint64{44, 48, 92, 58, 113, 109},
|
||||
MissingColumns: map[uint64]bool{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Two missing columns in the first round, one missing in the second round. Fail to sample.",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97},
|
||||
columnsNotToRespond: map[uint64]bool{6: true, 31: true, 48: true},
|
||||
expectedSuccess: false,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{6, 35, 31, 37, 79},
|
||||
MissingColumns: map[uint64]bool{6: true, 31: true},
|
||||
},
|
||||
{
|
||||
RequestedColumns: []uint64{44, 48, 92, 58, 113, 109},
|
||||
MissingColumns: map[uint64]bool{48: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
test, sampler := setupDataColumnSamplerTest(t, 3)
|
||||
p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 1)
|
||||
p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 2)
|
||||
p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 3)
|
||||
test.peers = []*p2ptest.TestP2P{p1, p2, p3}
|
||||
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
success, summaries, err := sampler.incrementalDAS(test.ctx, test.blockProcessedData, tc.possibleColumnsToRequest, tc.samplesCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSuccess, success)
|
||||
require.DeepEqual(t, tc.expectedRoundSummaries, summaries)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func deterministicRandomness(t *testing.T, seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
require.NoError(t, err)
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(t *testing.T, seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(t, seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(t *testing.T, seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
for i := 0; i < len(blob); i += 32 {
|
||||
fieldElementBytes := getRandFieldElement(t, seed+int64(i))
|
||||
copy(blob[i:i+32], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
1605
beacon-chain/sync/data_columns_test.go
Normal file
1605
beacon-chain/sync/data_columns_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
@@ -46,6 +47,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -81,6 +83,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -125,6 +128,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -167,6 +171,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -211,6 +216,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -255,6 +261,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
@@ -274,6 +281,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV3+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -73,6 +74,7 @@ go_test(
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
prysmsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -23,7 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
mathPrysm "github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
p2ppb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -34,7 +36,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// maxPendingRequests limits how many concurrent fetch request one can initiate.
|
||||
maxPendingRequests = 64
|
||||
// peersPercentagePerRequest caps percentage of peers to be used in a request.
|
||||
@@ -78,6 +79,10 @@ type blocksFetcherConfig struct {
|
||||
peerFilterCapacityWeight float64
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewDataColumnsVerifier
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// blocksFetcher is a service to fetch chain data from peers.
|
||||
@@ -94,6 +99,9 @@ type blocksFetcher struct {
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewDataColumnsVerifier
|
||||
blocksPerPeriod uint64
|
||||
rateLimiter *leakybucket.Collector
|
||||
peerLocks map[peer.ID]*peerLock
|
||||
@@ -102,6 +110,7 @@ type blocksFetcher struct {
|
||||
capacityWeight float64 // how remaining capacity affects peer selection
|
||||
mode syncMode // allows to use fetcher in different sync scenarios
|
||||
quit chan struct{} // termination notifier
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// peerLock restricts fetcher actions on per peer basis. Currently, used for rate limiting.
|
||||
@@ -124,7 +133,7 @@ type fetchRequestResponse struct {
|
||||
blobsFrom peer.ID
|
||||
start primitives.Slot
|
||||
count uint64
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
err error
|
||||
}
|
||||
|
||||
@@ -162,6 +171,9 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
bs: cfg.bs,
|
||||
dcs: cfg.dcs,
|
||||
bv: cfg.bv,
|
||||
cv: cfg.cv,
|
||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||
rateLimiter: rateLimiter,
|
||||
peerLocks: make(map[peer.ID]*peerLock),
|
||||
@@ -170,6 +182,7 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
capacityWeight: capacityWeight,
|
||||
mode: cfg.mode,
|
||||
quit: make(chan struct{}),
|
||||
custodyInfo: cfg.custodyInfo,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,7 +194,7 @@ func maxBatchLimit() int {
|
||||
if params.DenebEnabled() {
|
||||
maxLimit = params.BeaconConfig().MaxRequestBlocksDeneb
|
||||
}
|
||||
castedMaxLimit, err := math.Int(maxLimit)
|
||||
castedMaxLimit, err := mathPrysm.Int(maxLimit)
|
||||
if err != nil {
|
||||
// Should be impossible to hit this case.
|
||||
log.WithError(err).Error("Unable to calculate the max batch limit")
|
||||
@@ -298,7 +311,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
response := &fetchRequestResponse{
|
||||
start: start,
|
||||
count: count,
|
||||
bwb: []blocks.BlockWithROBlobs{},
|
||||
bwb: []blocks.BlockWithROSidecars{},
|
||||
err: nil,
|
||||
}
|
||||
|
||||
@@ -317,30 +330,96 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
if f.mode == modeStopOnFinalizedEpoch {
|
||||
highestFinalizedSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(targetEpoch + 1))
|
||||
if start > highestFinalizedSlot {
|
||||
response.err = fmt.Errorf("%w, slot: %d, highest finalized slot: %d",
|
||||
errSlotIsTooHigh, start, highestFinalizedSlot)
|
||||
response.err = fmt.Errorf(
|
||||
"%w, slot: %d, highest finalized slot: %d",
|
||||
errSlotIsTooHigh, start, highestFinalizedSlot,
|
||||
)
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
response.bwb, response.blocksFrom, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
|
||||
if response.err == nil {
|
||||
pid, bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.blocksFrom, peers)
|
||||
pid, err := f.fetchSidecars(ctx, response.blocksFrom, peers, response.bwb)
|
||||
if err != nil {
|
||||
response.err = err
|
||||
}
|
||||
response.bwb = bwb
|
||||
|
||||
response.blobsFrom = pid
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer.
|
||||
// fetchSidecars fetches sidecars corresponding to blocks in `response.bwb`.
|
||||
// It mutates `Blobs` and `Columns` fields of `response.bwb` with fetched sidecars.
|
||||
func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []peer.ID, bwScs []blocks.BlockWithROSidecars) (peer.ID, error) {
|
||||
const batchSize = 32
|
||||
|
||||
// Find the first block with a slot greater than or equal to the first Fulu slot.
|
||||
// (Blocks are sorted by slot.)
|
||||
firstFuluIndex := sort.Search(len(bwScs), func(i int) bool {
|
||||
return bwScs[i].Block.Version() >= version.Fulu
|
||||
})
|
||||
|
||||
blocksWithBlobs := bwScs[:firstFuluIndex]
|
||||
blocksWithDataColumns := bwScs[firstFuluIndex:]
|
||||
|
||||
if len(blocksWithBlobs) == 0 && len(blocksWithDataColumns) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var (
|
||||
blobsPid peer.ID
|
||||
err error
|
||||
)
|
||||
|
||||
if len(blocksWithBlobs) > 0 {
|
||||
// Fetch blob sidecars.
|
||||
blobsPid, err = f.fetchBlobsFromPeer(ctx, blocksWithBlobs, pid, peers)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "fetch blobs from peer")
|
||||
}
|
||||
}
|
||||
|
||||
if len(blocksWithDataColumns) == 0 {
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// Extract blocks.
|
||||
dataColumnBlocks := make([]blocks.ROBlock, 0, len(blocksWithBlobs))
|
||||
for _, blockWithSidecars := range blocksWithDataColumns {
|
||||
block := blockWithSidecars.Block
|
||||
dataColumnBlocks = append(dataColumnBlocks, block)
|
||||
}
|
||||
|
||||
// Fetch data column sidecars.
|
||||
actualGroupCount := f.custodyInfo.ActualGroupCount()
|
||||
fetchedDataColumnsByRoot, err := prysmsync.RequestMissingDataColumnsByRange(ctx, f.clock, f.ctxMap, f.p2p, f.rateLimiter, actualGroupCount, f.dcs, dataColumnBlocks, batchSize)
|
||||
if err != nil {
|
||||
return blobsPid, errors.Wrap(err, "fetch missing data columns from peers")
|
||||
}
|
||||
|
||||
// Populate the response.
|
||||
for i := range bwScs {
|
||||
bwSc := &bwScs[i]
|
||||
root := bwSc.Block.Root()
|
||||
if columns, ok := fetchedDataColumnsByRoot[root]; ok {
|
||||
bwSc.Columns = columns
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Return the (multiple) peer IDs that provided the data columns and not only the one for blobs.
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer, sorted by slot.
|
||||
func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
ctx context.Context,
|
||||
start primitives.Slot, count uint64,
|
||||
peers []peer.ID,
|
||||
) ([]blocks.BlockWithROBlobs, peer.ID, error) {
|
||||
) ([]blocks.BlockWithROSidecars, peer.ID, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlocksFromPeer")
|
||||
defer span.End()
|
||||
|
||||
@@ -355,39 +434,38 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
// peers are dialed first.
|
||||
peers = append(bestPeers, peers...)
|
||||
peers = dedupPeers(peers)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
blocks, err := f.requestBlocks(ctx, req, p)
|
||||
for _, peer := range peers {
|
||||
blocks, err := f.requestBlocks(ctx, req, peer)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Could not request blocks by range from peer")
|
||||
log.WithField("peer", peer).WithError(err).Debug("Could not request blocks by range from peer")
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(peer)
|
||||
robs, err := sortedBlockWithVerifiedBlobSlice(blocks)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("invalid BeaconBlocksByRange response")
|
||||
log.WithField("peer", peer).WithError(err).Debug("invalid BeaconBlocksByRange response")
|
||||
continue
|
||||
}
|
||||
if len(features.Get().BlacklistedRoots) > 0 {
|
||||
for _, b := range robs {
|
||||
if features.BlacklistedBlock(b.Block.Root()) {
|
||||
return nil, p, prysmsync.ErrInvalidFetchedData
|
||||
return nil, peer, prysmsync.ErrInvalidFetchedData
|
||||
}
|
||||
}
|
||||
}
|
||||
return robs, p, err
|
||||
return robs, peer, err
|
||||
}
|
||||
return nil, "", errNoPeersAvailable
|
||||
}
|
||||
|
||||
func sortedBlockWithVerifiedBlobSlice(bs []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROBlobs, error) {
|
||||
rb := make([]blocks.BlockWithROBlobs, len(bs))
|
||||
for i, b := range bs {
|
||||
func sortedBlockWithVerifiedBlobSlice(blks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROSidecars, error) {
|
||||
rb := make([]blocks.BlockWithROSidecars, len(blks))
|
||||
for i, b := range blks {
|
||||
ro, err := blocks.NewROBlock(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rb[i] = blocks.BlockWithROBlobs{Block: ro}
|
||||
rb[i] = blocks.BlockWithROSidecars{Block: ro}
|
||||
}
|
||||
sort.Sort(blocks.BlockWithROBlobsSlice(rb))
|
||||
return rb, nil
|
||||
@@ -403,7 +481,7 @@ type commitmentCountList []commitmentCount
|
||||
|
||||
// countCommitments makes a list of all blocks that have commitments that need to be satisfied.
|
||||
// This gives us a representation to finish building the request that is lightweight and readable for testing.
|
||||
func countCommitments(bwb []blocks.BlockWithROBlobs, retentionStart primitives.Slot) commitmentCountList {
|
||||
func countCommitments(bwb []blocks.BlockWithROSidecars, retentionStart primitives.Slot) commitmentCountList {
|
||||
if len(bwb) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -485,7 +563,9 @@ func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
|
||||
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
|
||||
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
|
||||
|
||||
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) ([]blocks.BlockWithROBlobs, error) {
|
||||
// verifyAndPopulateBlobs mutate the input `bwb` argument by adding verified blobs.
|
||||
// This function mutates the input `bwb` argument.
|
||||
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||
blobsByRoot := make(map[[32]byte][]blocks.ROBlob)
|
||||
for i := range blobs {
|
||||
if blobs[i].Slot() < req.StartSlot {
|
||||
@@ -495,46 +575,53 @@ func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob
|
||||
blobsByRoot[br] = append(blobsByRoot[br], blobs[i])
|
||||
}
|
||||
for i := range bwb {
|
||||
bwi, err := populateBlock(bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
err := populateBlock(&bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
if err != nil {
|
||||
if errors.Is(err, errDidntPopulate) {
|
||||
continue
|
||||
}
|
||||
return bwb, err
|
||||
return err
|
||||
}
|
||||
bwb[i] = bwi
|
||||
}
|
||||
return bwb, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var errDidntPopulate = errors.New("skipping population of block")
|
||||
|
||||
func populateBlock(bw blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks.BlockWithROBlobs, error) {
|
||||
// populateBlock verifies and populates blobs for a block.
|
||||
// This function mutates the input `bw` argument.
|
||||
func populateBlock(bw *blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||
blk := bw.Block
|
||||
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
commits, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
if len(commits) == 0 {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
// Drop blobs on the floor if we already have them.
|
||||
if bss != nil && bss.Summary(blk.Root()).AllAvailable(len(commits)) {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
if len(commits) != len(blobs) {
|
||||
return bw, missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||
return missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||
}
|
||||
|
||||
for ci := range commits {
|
||||
if err := verify.BlobAlignsWithBlock(blobs[ci], blk); err != nil {
|
||||
return bw, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bw.Blobs = blobs
|
||||
return bw, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
||||
@@ -547,29 +634,34 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e
|
||||
}
|
||||
|
||||
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROBlobs, pid peer.ID, peers []peer.ID) (peer.ID, []blocks.BlockWithROBlobs, error) {
|
||||
// This function mutates the input `bwb` argument.
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROSidecars, pid peer.ID, peers []peer.ID) (peer.ID, error) {
|
||||
if len(bwb) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
|
||||
defer span.End()
|
||||
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
|
||||
return "", bwb, nil
|
||||
return "", nil
|
||||
}
|
||||
blobWindowStart, err := prysmsync.BlobRPCMinValidSlot(f.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", err
|
||||
}
|
||||
// Construct request message based on observed interval of blocks in need of blobs.
|
||||
req := countCommitments(bwb, blobWindowStart).blobRange(f.bs).Request()
|
||||
if req == nil {
|
||||
return "", bwb, nil
|
||||
return "", nil
|
||||
}
|
||||
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
|
||||
// We dial the initial peer first to ensure that we get the desired set of blobs.
|
||||
wantedPeers := append([]peer.ID{pid}, peers...)
|
||||
bestPeers := f.hasSufficientBandwidth(wantedPeers, req.Count)
|
||||
peers = append([]peer.ID{pid}, peers...)
|
||||
peers = f.hasSufficientBandwidth(peers, req.Count)
|
||||
// We append the best peers to the front so that higher capacity
|
||||
// peers are dialed first. If all of them fail, we fallback to the
|
||||
// initial peer we wanted to request blobs from.
|
||||
peers = append(bestPeers, pid)
|
||||
peers = append(peers, pid)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
blobs, err := f.requestBlobs(ctx, req, p)
|
||||
@@ -578,16 +670,31 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.Blo
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
robs, err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs)
|
||||
if err != nil {
|
||||
if err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs); err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
|
||||
continue
|
||||
}
|
||||
return p, robs, err
|
||||
return p, err
|
||||
}
|
||||
return "", nil, errNoPeersAvailable
|
||||
return "", errNoPeersAvailable
|
||||
}
|
||||
|
||||
// sortedSliceFromMap returns a sorted slice of keys from a map.
|
||||
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i] < result[j]
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// waitForPeersFo
|
||||
|
||||
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
||||
func (f *blocksFetcher) requestBlocks(
|
||||
ctx context.Context,
|
||||
@@ -642,6 +749,7 @@ func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecar
|
||||
}
|
||||
f.rateLimiter.Add(pid.String(), int64(req.Count))
|
||||
l.Unlock()
|
||||
|
||||
return prysmsync.SendBlobsByRangeRequest(ctx, f.clock, f.p2p, pid, f.ctxMap, req)
|
||||
}
|
||||
|
||||
@@ -682,7 +790,7 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
||||
// Exit early if we have sufficient capacity
|
||||
return nil
|
||||
}
|
||||
intCount, err := math.Int(count)
|
||||
intCount, err := mathPrysm.Int(count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -699,7 +807,8 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
||||
}
|
||||
|
||||
func (f *blocksFetcher) hasSufficientBandwidth(peers []peer.ID, count uint64) []peer.ID {
|
||||
filteredPeers := []peer.ID{}
|
||||
var filteredPeers []peer.ID
|
||||
|
||||
for _, p := range peers {
|
||||
if uint64(f.rateLimiter.Remaining(p.String())) < count {
|
||||
continue
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pm "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
beaconsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
@@ -266,7 +266,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
|
||||
p := p2pt.NewTestP2P(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
connectPeers(t, p, tt.peers, p.Peers())
|
||||
cache.RLock()
|
||||
genesisRoot := cache.rootCache[0]
|
||||
@@ -307,9 +307,9 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
fetcher.stop()
|
||||
}()
|
||||
|
||||
processFetchedBlocks := func() ([]blocks.BlockWithROBlobs, error) {
|
||||
processFetchedBlocks := func() ([]blocks.BlockWithROSidecars, error) {
|
||||
defer cancel()
|
||||
var unionRespBlocks []blocks.BlockWithROBlobs
|
||||
var unionRespBlocks []blocks.BlockWithROSidecars
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -398,6 +398,7 @@ func TestBlocksFetcher_scheduleRequest(t *testing.T) {
|
||||
fetcher.scheduleRequest(context.Background(), 1, blockBatchLimit))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
blockBatchLimit := flags.Get().BlockBatchLimit
|
||||
chainConfig := struct {
|
||||
@@ -455,7 +456,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
var bwb []blocks.BlockWithROBlobs
|
||||
var bwb []blocks.BlockWithROSidecars
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Error(ctx.Err())
|
||||
@@ -531,9 +532,9 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p3 := p2pt.NewTestP2P(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p3 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
p1.Connect(p3)
|
||||
require.Equal(t, 2, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
@@ -543,7 +544,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
Count: 64,
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||
streamHandlerFn := func(stream network.Stream) {
|
||||
assert.NoError(t, stream.Close())
|
||||
@@ -602,15 +603,15 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
Count: 64,
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||
streamHandlerFn := func(stream network.Stream) {
|
||||
assert.NoError(t, stream.Close())
|
||||
@@ -638,7 +639,7 @@ func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
req *ethpb.BeaconBlocksByRangeRequest
|
||||
@@ -883,7 +884,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
},
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p1.Encoding().ProtocolSuffix())
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -893,7 +894,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
p2.BHost.SetStreamHandler(protocol, tt.handlerGenFn(tt.req))
|
||||
@@ -993,7 +994,7 @@ func TestBlobRangeForBlocks(t *testing.T) {
|
||||
func TestBlobRequest(t *testing.T) {
|
||||
var nilReq *ethpb.BlobSidecarsByRangeRequest
|
||||
// no blocks
|
||||
req := countCommitments([]blocks.BlockWithROBlobs{}, 0).blobRange(nil).Request()
|
||||
req := countCommitments([]blocks.BlockWithROSidecars{}, 0).blobRange(nil).Request()
|
||||
require.Equal(t, nilReq, req)
|
||||
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
@@ -1026,22 +1027,16 @@ func TestBlobRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCountCommitments(t *testing.T) {
|
||||
// no blocks
|
||||
// blocks before retention start filtered
|
||||
// blocks without commitments filtered
|
||||
// pre-deneb filtered
|
||||
// variety of commitment counts are accurate, from 1 to max
|
||||
type testcase struct {
|
||||
name string
|
||||
bwb func(t *testing.T, c testcase) []blocks.BlockWithROBlobs
|
||||
numBlocks int
|
||||
retStart primitives.Slot
|
||||
resCount int
|
||||
name string
|
||||
bwb func(t *testing.T, c testcase) []blocks.BlockWithROSidecars
|
||||
retStart primitives.Slot
|
||||
resCount int
|
||||
}
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "nil blocks is safe",
|
||||
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROBlobs {
|
||||
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROSidecars {
|
||||
return nil
|
||||
},
|
||||
retStart: 0,
|
||||
@@ -1179,7 +1174,7 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROBlobs, []blocks.ROBlob) {
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROSidecars, []blocks.ROBlob) {
|
||||
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
for i := range blks {
|
||||
@@ -1190,7 +1185,7 @@ func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROB
|
||||
return bwb, blobs
|
||||
}
|
||||
|
||||
func testReqFromResp(bwb []blocks.BlockWithROBlobs) *ethpb.BlobSidecarsByRangeRequest {
|
||||
func testReqFromResp(bwb []blocks.BlockWithROSidecars) *ethpb.BlobSidecarsByRangeRequest {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: bwb[0].Block.Block().Slot(),
|
||||
Count: uint64(bwb[len(bwb)-1].Block.Block().Slot()-bwb[0].Block.Block().Slot()) + 1,
|
||||
@@ -1207,7 +1202,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, len(blobs), len(expectedCommits))
|
||||
|
||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
require.NoError(t, err)
|
||||
for _, bw := range bwb {
|
||||
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
|
||||
@@ -1228,7 +1223,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
})
|
||||
t.Run("missing blobs", func(t *testing.T) {
|
||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||
_, err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
|
||||
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
|
||||
})
|
||||
t.Run("no blobs for last block", func(t *testing.T) {
|
||||
@@ -1240,7 +1235,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
blobs = blobs[0 : len(blobs)-len(cmts)]
|
||||
lastBlk, _ = util.GenerateTestDenebBlockWithSidecar(t, lastBlk.Block().ParentRoot(), lastBlk.Block().Slot(), 0)
|
||||
bwb[lastIdx].Block = lastBlk
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("blobs not copied if all locally available", func(t *testing.T) {
|
||||
@@ -1254,7 +1249,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
r7: {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(bwb[i1].Blobs))
|
||||
require.Equal(t, 0, len(bwb[i7].Blobs))
|
||||
@@ -1302,3 +1297,11 @@ func TestBlockFetcher_HasSufficientBandwidth(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, 2, len(receivedPeers))
|
||||
}
|
||||
|
||||
func TestSortedSliceFromMap(t *testing.T) {
|
||||
m := map[uint64]bool{1: true, 3: true, 2: true, 4: true}
|
||||
expected := []uint64{1, 2, 3, 4}
|
||||
|
||||
actual := sortedSliceFromMap(m)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
type forkData struct {
|
||||
blocksFrom peer.ID
|
||||
blobsFrom peer.ID
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
}
|
||||
|
||||
// nonSkippedSlotAfter checks slots after the given one in an attempt to find a non-empty future slot.
|
||||
@@ -188,7 +188,7 @@ func (f *blocksFetcher) findFork(ctx context.Context, slot primitives.Slot) (*fo
|
||||
"peer": pid,
|
||||
"step": fmt.Sprintf("%d/%d", i+1, len(peers)),
|
||||
}).Debug("Searching for alternative blocks")
|
||||
fork, err := f.findForkWithPeer(ctx, pid, slot)
|
||||
fork, err := f.findForkWithPeer(ctx, pid, peers, slot)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
@@ -208,7 +208,7 @@ func findForkReqRangeSize() uint64 {
|
||||
}
|
||||
|
||||
// findForkWithPeer loads some blocks from a peer in an attempt to find alternative blocks.
|
||||
func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot primitives.Slot) (*forkData, error) {
|
||||
func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, peers []peer.ID, slot primitives.Slot) (*forkData, error) {
|
||||
reqCount := findForkReqRangeSize()
|
||||
// Safe-guard, since previous epoch is used when calculating.
|
||||
if uint64(slot) < reqCount {
|
||||
@@ -237,21 +237,21 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
||||
Count: reqCount,
|
||||
Step: 1,
|
||||
}
|
||||
blocks, err := f.requestBlocks(ctx, req, pid)
|
||||
reqBlocks, err := f.requestBlocks(ctx, req, pid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch blocks: %w", err)
|
||||
}
|
||||
if len(blocks) == 0 {
|
||||
if len(reqBlocks) == 0 {
|
||||
return nil, errNoAlternateBlocks
|
||||
}
|
||||
|
||||
// If the first block is not connected to the current canonical chain, we'll stop processing this batch.
|
||||
// Instead, we'll work backwards from the first block until we find a common ancestor,
|
||||
// and then begin processing from there.
|
||||
first := blocks[0]
|
||||
first := reqBlocks[0]
|
||||
if !f.chain.HasBlock(ctx, first.Block().ParentRoot()) {
|
||||
// Backtrack on a root, to find a common ancestor from which we can resume syncing.
|
||||
fork, err := f.findAncestor(ctx, pid, first)
|
||||
fork, err := f.findAncestor(ctx, pid, peers, first)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find common ancestor: %w", err)
|
||||
}
|
||||
@@ -261,8 +261,8 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
||||
// Traverse blocks, and if we've got one that doesn't have parent in DB, backtrack on it.
|
||||
// Note that we start from the second element in the array, because we know that the first element is in the db,
|
||||
// otherwise we would have gone into the findAncestor early return path above.
|
||||
for i := 1; i < len(blocks); i++ {
|
||||
block := blocks[i]
|
||||
for i := 1; i < len(reqBlocks); i++ {
|
||||
block := reqBlocks[i]
|
||||
parentRoot := block.Block().ParentRoot()
|
||||
// Step through blocks until we find one that is not in the chain. The goal is to find the point where the
|
||||
// chain observed in the peer diverges from the locally known chain, and then collect up the remainder of the
|
||||
@@ -275,25 +275,27 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
||||
"slot": block.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", parentRoot),
|
||||
}).Debug("Block with unknown parent root has been found")
|
||||
altBlocks, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(reqBlocks[i-1:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid blocks received in findForkWithPeer")
|
||||
}
|
||||
|
||||
sidecarsPid, err := f.fetchSidecars(ctx, pid, peers, bwb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "fetch sidecars")
|
||||
}
|
||||
|
||||
// We need to fetch the blobs for the given alt-chain if any exist, so that we can try to verify and import
|
||||
// the blocks.
|
||||
bpid, bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
|
||||
}
|
||||
// The caller will use the BlocksWith VerifiedBlobs in bwb as the starting point for
|
||||
// round-robin syncing the alternate chain.
|
||||
return &forkData{blocksFrom: pid, blobsFrom: bpid, bwb: bwb}, nil
|
||||
return &forkData{blocksFrom: pid, blobsFrom: sidecarsPid, bwb: bwb}, nil
|
||||
}
|
||||
return nil, errNoAlternateBlocks
|
||||
}
|
||||
|
||||
// findAncestor tries to figure out common ancestor slot that connects a given root to known block.
|
||||
func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfaces.ReadOnlySignedBeaconBlock) (*forkData, error) {
|
||||
func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, peers []peer.ID, b interfaces.ReadOnlySignedBeaconBlock) (*forkData, error) {
|
||||
outBlocks := []interfaces.ReadOnlySignedBeaconBlock{b}
|
||||
for i := uint64(0); i < backtrackingMaxHops; i++ {
|
||||
parentRoot := outBlocks[len(outBlocks)-1].Block().ParentRoot()
|
||||
@@ -303,15 +305,14 @@ func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfa
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "received invalid blocks in findAncestor")
|
||||
}
|
||||
var bpid peer.ID
|
||||
bpid, bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid, []peer.ID{pid})
|
||||
sidecarsPid, err := f.fetchSidecars(ctx, pid, peers, bwb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findAncestor")
|
||||
return nil, errors.Wrap(err, "fetch sidecars")
|
||||
}
|
||||
return &forkData{
|
||||
blocksFrom: pid,
|
||||
bwb: bwb,
|
||||
blobsFrom: bpid,
|
||||
blobsFrom: sidecarsPid,
|
||||
}, nil
|
||||
}
|
||||
// Request block's parent.
|
||||
@@ -350,9 +351,12 @@ func (f *blocksFetcher) calculateHeadAndTargetEpochs() (headEpoch, targetEpoch p
|
||||
cp := f.chain.FinalizedCheckpt()
|
||||
headEpoch = cp.Epoch
|
||||
targetEpoch, peers = f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch)
|
||||
} else {
|
||||
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
|
||||
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
|
||||
|
||||
return headEpoch, targetEpoch, peers
|
||||
}
|
||||
|
||||
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
|
||||
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
|
||||
|
||||
return headEpoch, targetEpoch, peers
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pm "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
@@ -188,10 +189,11 @@ func TestBlocksFetcher_findFork(t *testing.T) {
|
||||
fetcher := newBlocksFetcher(
|
||||
ctx,
|
||||
&blocksFetcherConfig{
|
||||
chain: mc,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
p2p: p2p,
|
||||
db: beaconDB,
|
||||
chain: mc,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
p2p: p2p,
|
||||
db: beaconDB,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
)
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, 1*time.Second, false)
|
||||
@@ -356,10 +358,11 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
fetcher := newBlocksFetcher(
|
||||
ctx,
|
||||
&blocksFetcherConfig{
|
||||
chain: mc,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
p2p: p1,
|
||||
db: beaconDB,
|
||||
chain: mc,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
p2p: p1,
|
||||
db: beaconDB,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
)
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, 1*time.Second, false)
|
||||
@@ -371,13 +374,13 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
|
||||
t.Run("slot is too early", func(t *testing.T) {
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 0)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), nil, 0)
|
||||
assert.ErrorContains(t, "slot is too low to backtrack", err)
|
||||
})
|
||||
|
||||
t.Run("no peer status", func(t *testing.T) {
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 64)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), nil, 64)
|
||||
assert.ErrorContains(t, "cannot obtain peer's status", err)
|
||||
})
|
||||
|
||||
@@ -391,7 +394,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
HeadRoot: nil,
|
||||
HeadSlot: 0,
|
||||
})
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 64)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), nil, 64)
|
||||
assert.ErrorContains(t, "cannot locate non-empty slot for a peer", err)
|
||||
})
|
||||
|
||||
@@ -401,7 +404,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2, 64)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
|
||||
assert.ErrorContains(t, "no alternative blocks exist within scanned range", err)
|
||||
})
|
||||
|
||||
@@ -413,7 +416,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
fork, err := fetcher.findForkWithPeer(ctx, p2, 64)
|
||||
fork, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(fork.bwb))
|
||||
assert.Equal(t, forkedSlot, fork.bwb[0].Block.Block().Slot(), "Expected slot %d to be ancestor", forkedSlot)
|
||||
@@ -426,7 +429,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2, 64)
|
||||
_, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
|
||||
require.ErrorContains(t, "failed to find common ancestor", err)
|
||||
})
|
||||
|
||||
@@ -438,7 +441,7 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, p1.Disconnect(p2))
|
||||
}()
|
||||
fork, err := fetcher.findForkWithPeer(ctx, p2, 64)
|
||||
fork, err := fetcher.findForkWithPeer(ctx, p2, nil, 64)
|
||||
require.NoError(t, err)
|
||||
|
||||
reqEnd := testForkStartSlot(t, 64) + primitives.Slot(findForkReqRangeSize())
|
||||
@@ -512,7 +515,7 @@ func TestBlocksFetcher_findAncestor(t *testing.T) {
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[4])
|
||||
require.NoError(t, err)
|
||||
_, err = fetcher.findAncestor(ctx, p2.PeerID(), wsb)
|
||||
_, err = fetcher.findAncestor(ctx, p2.PeerID(), nil, wsb)
|
||||
assert.ErrorContains(t, "protocols not supported", err)
|
||||
})
|
||||
|
||||
@@ -525,7 +528,7 @@ func TestBlocksFetcher_findAncestor(t *testing.T) {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(knownBlocks[4])
|
||||
require.NoError(t, err)
|
||||
|
||||
fork, err := fetcher.findAncestor(ctx, p2.PeerID(), wsb)
|
||||
fork, err := fetcher.findAncestor(ctx, p2.PeerID(), nil, wsb)
|
||||
assert.ErrorContains(t, "no common ancestor found", err)
|
||||
assert.Equal(t, (*forkData)(nil), fork)
|
||||
})
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -72,6 +73,10 @@ type blocksQueueConfig struct {
|
||||
db db.ReadOnlyDatabase
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewDataColumnsVerifier
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
|
||||
@@ -96,7 +101,7 @@ type blocksQueue struct {
|
||||
type blocksQueueFetchedData struct {
|
||||
blocksFrom peer.ID
|
||||
blobsFrom peer.ID
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
}
|
||||
|
||||
// newBlocksQueue creates initialized priority queue.
|
||||
@@ -109,12 +114,16 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
|
||||
log.Warn("rpc fetcher starting without blob availability cache, duplicate blobs may be requested.")
|
||||
}
|
||||
blocksFetcher = newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
ctxMap: cfg.ctxMap,
|
||||
chain: cfg.chain,
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
clock: cfg.clock,
|
||||
bs: cfg.bs,
|
||||
ctxMap: cfg.ctxMap,
|
||||
chain: cfg.chain,
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
clock: cfg.clock,
|
||||
bs: cfg.bs,
|
||||
dcs: cfg.dcs,
|
||||
bv: cfg.bv,
|
||||
cv: cfg.cv,
|
||||
custodyInfo: cfg.custodyInfo,
|
||||
})
|
||||
}
|
||||
highestExpectedSlot := cfg.highestExpectedSlot
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
@@ -36,9 +37,10 @@ func TestBlocksQueue_InitStartStop(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
|
||||
t.Run("stop without start", func(t *testing.T) {
|
||||
@@ -253,9 +255,10 @@ func TestBlocksQueue_Loop(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
@@ -263,7 +266,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
|
||||
highestExpectedSlot: tt.highestExpectedSlot,
|
||||
})
|
||||
assert.NoError(t, queue.start())
|
||||
processBlock := func(b blocks.BlockWithROBlobs) error {
|
||||
processBlock := func(b blocks.BlockWithROSidecars) error {
|
||||
block := b.Block
|
||||
if !beaconDB.HasBlock(ctx, block.Block().ParentRoot()) {
|
||||
return fmt.Errorf("%w: %#x", errParentDoesNotExist, block.Block().ParentRoot())
|
||||
@@ -275,7 +278,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
|
||||
return mc.ReceiveBlock(ctx, block, root, nil)
|
||||
}
|
||||
|
||||
var blocks []blocks.BlockWithROBlobs
|
||||
var blocks []blocks.BlockWithROSidecars
|
||||
for data := range queue.fetchedData {
|
||||
for _, b := range data.bwb {
|
||||
if err := processBlock(b); err != nil {
|
||||
@@ -311,8 +314,9 @@ func TestBlocksQueue_onScheduleEvent(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
|
||||
t.Run("expired context", func(t *testing.T) {
|
||||
@@ -406,8 +410,9 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
|
||||
t.Run("expired context", func(t *testing.T) {
|
||||
@@ -538,7 +543,7 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
response := &fetchRequestResponse{
|
||||
blocksFrom: "abc",
|
||||
bwb: []blocks.BlockWithROBlobs{
|
||||
bwb: []blocks.BlockWithROSidecars{
|
||||
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsb}},
|
||||
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsbCopy}},
|
||||
},
|
||||
@@ -565,8 +570,9 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
|
||||
t.Run("expired context", func(t *testing.T) {
|
||||
@@ -624,8 +630,9 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
const pidDataParsed = "abc"
|
||||
t.Run("send from the first machine", func(t *testing.T) {
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
@@ -640,7 +647,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
queue.smm.machines[256].fetched.blocksFrom = pidDataParsed
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROBlobs{
|
||||
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||
{Block: rwsb},
|
||||
}
|
||||
|
||||
@@ -653,8 +660,9 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
|
||||
t.Run("previous machines are not processed - do not send", func(t *testing.T) {
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
@@ -674,7 +682,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||
{Block: rwsb},
|
||||
}
|
||||
|
||||
@@ -688,8 +696,9 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
|
||||
t.Run("previous machines are processed - send", func(t *testing.T) {
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
@@ -705,7 +714,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||
{Block: rwsb},
|
||||
}
|
||||
|
||||
@@ -723,8 +732,9 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
|
||||
t.Run("expired context", func(t *testing.T) {
|
||||
@@ -852,8 +862,9 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
{blocks: makeSequence(1, 160), finalizedEpoch: 5, headSlot: 128},
|
||||
}, p.Peers())
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p,
|
||||
chain: mc,
|
||||
p2p: p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
@@ -879,8 +890,9 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
{blocks: makeSequence(200, 320), finalizedEpoch: 8, headSlot: 300},
|
||||
}, p.Peers())
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p,
|
||||
chain: mc,
|
||||
p2p: p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
@@ -929,8 +941,9 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
{blocks: makeSequence(200, 320), finalizedEpoch: 8, headSlot: 320},
|
||||
}, p.Peers())
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p,
|
||||
chain: mc,
|
||||
p2p: p,
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
@@ -1060,10 +1073,11 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
fetcher := newBlocksFetcher(
|
||||
ctx,
|
||||
&blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
db: beaconDB,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
db: beaconDB,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
)
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, 1*time.Second, false)
|
||||
@@ -1282,10 +1296,11 @@ func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) {
|
||||
fetcher := newBlocksFetcher(
|
||||
ctx,
|
||||
&blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
db: beaconDB,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
db: beaconDB,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
custodyInfo: &peerdas.CustodyInfo{},
|
||||
},
|
||||
)
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, 1*time.Second, false)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
"github.com/pkg/errors"
|
||||
@@ -77,6 +79,9 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S
|
||||
highestExpectedSlot: highestSlot,
|
||||
mode: mode,
|
||||
bs: s.cfg.BlobStorage,
|
||||
dcs: s.cfg.DataColumnStorage,
|
||||
cv: s.newDataColumnsVerifier,
|
||||
custodyInfo: s.cfg.CustodyInfo,
|
||||
}
|
||||
queue := newBlocksQueue(ctx, cfg)
|
||||
if err := queue.start(); err != nil {
|
||||
@@ -156,30 +161,75 @@ func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQue
|
||||
log.WithError(err).Debug("batch did not contain a valid sequence of unprocessed blocks")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(bwb) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
"firstUnprocessed": bwb[0].Block.Block().Slot(),
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Separate blocks with blobs from blocks with data columns.
|
||||
fistDataColumnIndex := sort.Search(len(bwb), func(i int) bool {
|
||||
return bwb[i].Block.Version() >= version.Fulu
|
||||
})
|
||||
|
||||
blocksWithBlobs := bwb[:fistDataColumnIndex]
|
||||
blocksWithDataColumns := bwb[fistDataColumnIndex:]
|
||||
|
||||
blobBatchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
lazilyPersistentStoreBlobs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, blobBatchVerifier)
|
||||
lazilyPersistentStoreColumn := das.NewLazilyPersistentStoreColumn(s.cfg.DataColumnStorage, nodeID, s.newDataColumnsVerifier, s.cfg.CustodyInfo)
|
||||
|
||||
log := log.WithField("firstSlot", data.bwb[0].Block.Block().Slot())
|
||||
logBlobs, logDataColumns := log, log
|
||||
|
||||
if len(blocksWithBlobs) > 0 {
|
||||
logBlobs = logBlobs.WithField("firstUnprocessed", blocksWithBlobs[0].Block.Block().Slot())
|
||||
}
|
||||
for i, b := range bwb {
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues")
|
||||
|
||||
for i, b := range blocksWithBlobs {
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(b.Blobs)
|
||||
|
||||
if err := lazilyPersistentStoreBlobs.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warning("Batch failure due to BlobSidecar issues")
|
||||
return uint64(i), err
|
||||
}
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
|
||||
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, lazilyPersistentStoreBlobs); err != nil {
|
||||
if errors.Is(err, errParentDoesNotExist) {
|
||||
log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
logBlobs.WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
|
||||
} else {
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||
}
|
||||
|
||||
return uint64(i), err
|
||||
}
|
||||
}
|
||||
|
||||
for i, b := range blocksWithDataColumns {
|
||||
logDataColumns := logDataColumns.WithFields(syncFields(b.Block))
|
||||
|
||||
sicecars := blocks.NewSidecarsFromDataColumnSidecars(b.Columns)
|
||||
|
||||
if err := lazilyPersistentStoreColumn.Persist(s.clock.CurrentSlot(), sicecars...); err != nil {
|
||||
logDataColumns.WithError(err).Warning("Batch failure due to DataColumnSidecar issues")
|
||||
return uint64(i), err
|
||||
}
|
||||
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, lazilyPersistentStoreColumn); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, errParentDoesNotExist):
|
||||
logDataColumns.
|
||||
WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
Debug("Could not process batch blocks due to missing parent")
|
||||
return uint64(i), err
|
||||
default:
|
||||
logDataColumns.WithError(err).Warning("Block processing failure")
|
||||
return uint64(i), err
|
||||
}
|
||||
}
|
||||
}
|
||||
return uint64(len(bwb)), nil
|
||||
}
|
||||
|
||||
@@ -191,12 +241,18 @@ func syncFields(b blocks.ROBlock) logrus.Fields {
|
||||
}
|
||||
|
||||
// highestFinalizedEpoch returns the absolute highest finalized epoch of all connected peers.
|
||||
// Note this can be lower than our finalized epoch if we have no peers or peers that are all behind us.
|
||||
// It returns `0` if no peers are connected.
|
||||
// Note this can be lower than our finalized epoch if our connected peers are all behind us.
|
||||
func (s *Service) highestFinalizedEpoch() primitives.Epoch {
|
||||
highest := primitives.Epoch(0)
|
||||
for _, pid := range s.cfg.P2P.Peers().Connected() {
|
||||
peerChainState, err := s.cfg.P2P.Peers().ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch > highest {
|
||||
|
||||
if err != nil || peerChainState == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if peerChainState.FinalizedEpoch > highest {
|
||||
highest = peerChainState.FinalizedEpoch
|
||||
}
|
||||
}
|
||||
@@ -248,7 +304,7 @@ func (s *Service) logBatchSyncStatus(firstBlk blocks.ROBlock, nBlocks int) {
|
||||
func (s *Service) processBlock(
|
||||
ctx context.Context,
|
||||
genesis time.Time,
|
||||
bwb blocks.BlockWithROBlobs,
|
||||
bwb blocks.BlockWithROSidecars,
|
||||
blockReceiver blockReceiverFn,
|
||||
avs das.AvailabilityStore,
|
||||
) error {
|
||||
@@ -267,7 +323,7 @@ func (s *Service) processBlock(
|
||||
|
||||
type processedChecker func(context.Context, blocks.ROBlock) bool
|
||||
|
||||
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROBlobs, error) {
|
||||
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROSidecars, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROSidecars, error) {
|
||||
// use a pointer to avoid confusing the zero-value with the case where the first element is processed.
|
||||
var processed *int
|
||||
for i := range bwb {
|
||||
@@ -297,40 +353,100 @@ func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSl
|
||||
return bwb[nonProcessedIdx:], nil
|
||||
}
|
||||
|
||||
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROBlobs, bFunc batchBlockReceiverFn) (uint64, error) {
|
||||
if len(bwb) == 0 {
|
||||
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn) (uint64, error) {
|
||||
bwbCount := uint64(len(bwb))
|
||||
if bwbCount == 0 {
|
||||
return 0, errors.New("0 blocks provided into method")
|
||||
}
|
||||
|
||||
headSlot := s.cfg.Chain.HeadSlot()
|
||||
var err error
|
||||
bwb, err = validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
|
||||
bwb, err := validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(bwb) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
first := bwb[0].Block
|
||||
if !s.cfg.Chain.HasBlock(ctx, first.Block().ParentRoot()) {
|
||||
firstBlock := bwb[0].Block
|
||||
if !s.cfg.Chain.HasBlock(ctx, firstBlock.Block().ParentRoot()) {
|
||||
return 0, fmt.Errorf("%w: %#x (in processBatchedBlocks, slot=%d)",
|
||||
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
|
||||
errParentDoesNotExist, firstBlock.Block().ParentRoot(), firstBlock.Block().Slot())
|
||||
}
|
||||
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
s.logBatchSyncStatus(first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
if len(bb.Blobs) == 0 {
|
||||
// Seaerate blocks with blobs from blocks with data columns.
|
||||
fistDataColumnIndex := sort.Search(len(bwb), func(i int) bool {
|
||||
return bwb[i].Block.Version() >= version.Fulu
|
||||
})
|
||||
|
||||
blocksWithBlobs := bwb[:fistDataColumnIndex]
|
||||
blocksWithDataColumns := bwb[fistDataColumnIndex:]
|
||||
|
||||
if err := s.processBlocksWithBlobs(ctx, blocksWithBlobs, bFunc, firstBlock); err != nil {
|
||||
return 0, errors.Wrap(err, "processing blocks with blobs")
|
||||
}
|
||||
|
||||
if err := s.processBlocksWithDataColumns(ctx, blocksWithDataColumns, bFunc, firstBlock); err != nil {
|
||||
return 0, errors.Wrap(err, "processing blocks with data columns")
|
||||
}
|
||||
|
||||
return bwbCount, nil
|
||||
}
|
||||
|
||||
func (s *Service) processBlocksWithBlobs(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
|
||||
bwbCount := len(bwbs)
|
||||
if bwbCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
batchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
persistentStore := das.NewLazilyPersistentStore(s.cfg.BlobStorage, batchVerifier)
|
||||
s.logBatchSyncStatus(firstBlock, bwbCount)
|
||||
|
||||
for _, bwb := range bwbs {
|
||||
if len(bwb.Blobs) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), bb.Blobs...); err != nil {
|
||||
return 0, err
|
||||
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(bwb.Blobs)
|
||||
|
||||
if err := persistentStore.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
return errors.Wrap(err, "persisting blobs")
|
||||
}
|
||||
}
|
||||
|
||||
robs := blocks.BlockWithROBlobsSlice(bwb).ROBlocks()
|
||||
return uint64(len(bwb)), bFunc(ctx, robs, avs)
|
||||
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
|
||||
if err := bFunc(ctx, robs, persistentStore); err != nil {
|
||||
return errors.Wrap(err, "processing blocks with blobs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processBlocksWithDataColumns(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
|
||||
bwbCount := len(bwbs)
|
||||
if bwbCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
persistentStoreColumn := das.NewLazilyPersistentStoreColumn(s.cfg.DataColumnStorage, s.cfg.P2P.NodeID(), s.newDataColumnsVerifier, s.cfg.CustodyInfo)
|
||||
s.logBatchSyncStatus(firstBlock, bwbCount)
|
||||
|
||||
for _, bwb := range bwbs {
|
||||
if len(bwb.Columns) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sidecars := blocks.NewSidecarsFromDataColumnSidecars(bwb.Columns)
|
||||
|
||||
if err := persistentStoreColumn.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
return errors.Wrap(err, "persisting columns")
|
||||
}
|
||||
}
|
||||
|
||||
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
|
||||
if err := bFunc(ctx, robs, persistentStoreColumn); err != nil {
|
||||
return errors.Wrap(err, "process post-Fulu blocks")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isPunishableError(err error) bool {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
@@ -309,7 +310,7 @@ func TestService_roundRobinSync(t *testing.T) {
|
||||
clock := startup.NewClock(gt, vr)
|
||||
s := &Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB},
|
||||
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB, CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
synced: abool.New(),
|
||||
chainStarted: abool.NewBool(true),
|
||||
clock: clock,
|
||||
@@ -373,7 +374,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||
return nil
|
||||
@@ -385,7 +386,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err = blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
return nil
|
||||
}, nil)
|
||||
@@ -396,7 +397,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err = blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||
return nil
|
||||
@@ -432,7 +433,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
s.genesisTime = genesis
|
||||
|
||||
t.Run("process non-linear batch", func(t *testing.T) {
|
||||
var batch []blocks.BlockWithROBlobs
|
||||
var batch []blocks.BlockWithROSidecars
|
||||
currBlockRoot := genesisBlkRoot
|
||||
for i := primitives.Slot(1); i < 10; i++ {
|
||||
parentRoot := currBlockRoot
|
||||
@@ -446,11 +447,11 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
|
||||
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
var batch2 []blocks.BlockWithROBlobs
|
||||
var batch2 []blocks.BlockWithROSidecars
|
||||
for i := primitives.Slot(10); i < 20; i++ {
|
||||
parentRoot := currBlockRoot
|
||||
blk1 := util.NewBeaconBlock()
|
||||
@@ -463,7 +464,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
batch2 = append(batch2, blocks.BlockWithROBlobs{Block: rowsb})
|
||||
batch2 = append(batch2, blocks.BlockWithROSidecars{Block: rowsb})
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
@@ -485,7 +486,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
assert.ErrorContains(t, "block is already processed", err)
|
||||
require.Equal(t, uint64(0), count)
|
||||
|
||||
var badBatch2 []blocks.BlockWithROBlobs
|
||||
var badBatch2 []blocks.BlockWithROSidecars
|
||||
for i, b := range batch2 {
|
||||
// create a non-linear batch
|
||||
if i%3 == 0 && i != 0 {
|
||||
@@ -569,7 +570,7 @@ func TestService_blockProviderScoring(t *testing.T) {
|
||||
clock := startup.NewClock(gt, vr)
|
||||
s := &Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB},
|
||||
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB, CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
synced: abool.New(),
|
||||
chainStarted: abool.NewBool(true),
|
||||
clock: clock,
|
||||
@@ -638,7 +639,7 @@ func TestService_syncToFinalizedEpoch(t *testing.T) {
|
||||
}
|
||||
s := &Service{
|
||||
ctx: context.Background(),
|
||||
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB},
|
||||
cfg: &Config{Chain: mc, P2P: p, DB: beaconDB, CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
synced: abool.New(),
|
||||
chainStarted: abool.NewBool(true),
|
||||
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||
@@ -685,7 +686,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, genesisBlk)
|
||||
|
||||
var batch []blocks.BlockWithROBlobs
|
||||
var batch []blocks.BlockWithROSidecars
|
||||
currBlockRoot := genesisBlkRoot
|
||||
for i := primitives.Slot(1); i < 10; i++ {
|
||||
parentRoot := currBlockRoot
|
||||
@@ -699,7 +700,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
|
||||
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
@@ -53,22 +54,25 @@ type Config struct {
|
||||
ClockWaiter startup.ClockWaiter
|
||||
InitialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
CustodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// Service service.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
genesisTime time.Time
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
genesisTime time.Time
|
||||
}
|
||||
|
||||
// Option is a functional option for the initial-sync Service.
|
||||
@@ -149,6 +153,7 @@ func (s *Service) Start() {
|
||||
return
|
||||
}
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
s.newDataColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
gt := clock.GenesisTime()
|
||||
if gt.IsZero() {
|
||||
@@ -175,20 +180,22 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.chainStarted.Set()
|
||||
log.Info("Starting initial chain sync...")
|
||||
|
||||
// Are we already in sync, or close to it?
|
||||
if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) {
|
||||
log.Info("Already synced to the current chain head")
|
||||
s.markSynced()
|
||||
return
|
||||
}
|
||||
|
||||
peers, err := s.waitForMinimumPeers()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error waiting for minimum number of peers")
|
||||
return
|
||||
}
|
||||
if err := s.fetchOriginBlobs(peers); err != nil {
|
||||
log.WithError(err).Error("Failed to fetch missing blobs for checkpoint origin")
|
||||
return
|
||||
|
||||
if err := s.fetchOriginSidecars(peers); err != nil {
|
||||
log.WithError(err).Error("Error fetching origin sidecars")
|
||||
}
|
||||
if err := s.roundRobinSync(); err != nil {
|
||||
if errors.Is(s.ctx.Err(), context.Canceled) {
|
||||
@@ -200,6 +207,47 @@ func (s *Service) Start() {
|
||||
s.markSynced()
|
||||
}
|
||||
|
||||
// fetchOriginSidecars fetches origin sidecars
|
||||
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
block, err := s.cfg.DB.Block(s.ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "block")
|
||||
}
|
||||
|
||||
currentSlot, blockSlot := s.clock.CurrentSlot(), block.Block().Slot()
|
||||
currentEpoch, blockEpoch := slots.ToEpoch(currentSlot), slots.ToEpoch(blockSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
if blockEpoch >= beaconConfig.FuluForkEpoch {
|
||||
if err := s.fetchOriginColumns(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin columns")
|
||||
}
|
||||
}
|
||||
|
||||
if blockEpoch >= beaconConfig.DenebForkEpoch {
|
||||
if err := s.fetchOriginBlobs(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin blobs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop initial sync.
|
||||
func (s *Service) Stop() error {
|
||||
s.cancel()
|
||||
@@ -304,23 +352,9 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
r, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
}
|
||||
blk, err := s.cfg.DB.Block(s.ctx, r)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).Error("Block for checkpoint sync origin root not found in db")
|
||||
return err
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(s.clock.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
rob, err := blocks.NewROBlockWithRoot(blk, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
r := rob.Root()
|
||||
|
||||
req, err := missingBlobRequest(rob, s.cfg.BlobStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -331,10 +365,13 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
}
|
||||
shufflePeers(pids)
|
||||
for i := range pids {
|
||||
sidecars, err := sync.SendBlobSidecarByRoot(s.ctx, s.clock, s.cfg.P2P, pids[i], s.ctxMap, &req, rob.Block().Slot())
|
||||
blobSidecars, err := sync.SendBlobSidecarByRoot(s.ctx, s.clock, s.cfg.P2P, pids[i], s.ctxMap, &req, rob.Block().Slot())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
if len(sidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
@@ -344,6 +381,7 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
if err := avs.Persist(current, sidecars...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Blobs from peer for origin block were unusable")
|
||||
continue
|
||||
@@ -354,6 +392,29 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) error {
|
||||
actualSamplingSize := s.cfg.CustodyInfo.CustodyGroupSamplingSize(peerdas.Actual)
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
storage := s.cfg.DataColumnStorage
|
||||
|
||||
missingColumns, err := sync.MissingDataColumns(roBlock, nodeID, actualSamplingSize, storage)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
sidecars, err := sync.RequestDataColumnSidecarsByRoot(s.ctx, missingColumns, roBlock, pids, s.clock, s.cfg.P2P, s.ctxMap, s.newDataColumnsVerifier)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "request data column sidecars")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"columnCount": len(sidecars),
|
||||
}).Info("Successfully downloaded data columns for checkpoint sync block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func shufflePeers(pids []peer.ID) {
|
||||
rg := rand.NewGenerator()
|
||||
rg.Shuffle(len(pids), func(i, j int) {
|
||||
@@ -366,3 +427,9 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier {
|
||||
return func(roDataColumns []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return ini.NewDataColumnsVerifier(roDataColumns, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
@@ -161,6 +162,7 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
ClockWaiter: gs,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
InitialSyncComplete: make(chan struct{}),
|
||||
CustodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
s.verifierWaiter = verification.NewInitializerWaiter(gs, nil, nil)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
@@ -197,7 +199,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
s := &Service{
|
||||
cfg: &Config{Chain: mc, StateNotifier: mc.StateNotifier(), ClockWaiter: cs, InitialSyncComplete: make(chan struct{})},
|
||||
cfg: &Config{Chain: mc, StateNotifier: mc.StateNotifier(), ClockWaiter: cs, InitialSyncComplete: make(chan struct{}), CustodyInfo: &peerdas.CustodyInfo{}},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
synced: abool.New(),
|
||||
@@ -305,6 +307,7 @@ func TestService_markSynced(t *testing.T) {
|
||||
Chain: mc,
|
||||
StateNotifier: mc.StateNotifier(),
|
||||
InitialSyncComplete: make(chan struct{}),
|
||||
CustodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
require.NotNil(t, s)
|
||||
assert.Equal(t, false, s.chainStarted.IsSet())
|
||||
@@ -390,6 +393,7 @@ func TestService_Resync(t *testing.T) {
|
||||
Chain: mc,
|
||||
StateNotifier: mc.StateNotifier(),
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
CustodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
assert.NotNil(t, s)
|
||||
assert.Equal(t, primitives.Slot(0), s.cfg.Chain.HeadSlot())
|
||||
@@ -409,6 +413,7 @@ func TestService_Resync(t *testing.T) {
|
||||
func TestService_Initialized(t *testing.T) {
|
||||
s := NewService(context.Background(), &Config{
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
CustodyInfo: &peerdas.CustodyInfo{},
|
||||
})
|
||||
s.chainStarted.Set()
|
||||
assert.Equal(t, true, s.Initialized())
|
||||
@@ -495,8 +500,8 @@ func TestOriginOutsideRetention(t *testing.T) {
|
||||
bdb := dbtest.SetupDB(t)
|
||||
genesis := time.Unix(0, 0)
|
||||
secsPerEpoch := params.BeaconConfig().SecondsPerSlot * uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
retentionPeriod := time.Second * time.Duration(uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)*secsPerEpoch)
|
||||
outsideRetention := genesis.Add(retentionPeriod)
|
||||
retentionDuration := time.Second * time.Duration(uint64(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)*secsPerEpoch)
|
||||
outsideRetention := genesis.Add(retentionDuration)
|
||||
now := func() time.Time {
|
||||
return outsideRetention
|
||||
}
|
||||
@@ -509,5 +514,6 @@ func TestOriginOutsideRetention(t *testing.T) {
|
||||
require.NoError(t, concreteDB.SaveOriginCheckpointBlockRoot(ctx, blk.Root()))
|
||||
// This would break due to missing service dependencies, but will return nil fast due to being outside retention.
|
||||
require.Equal(t, false, params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(clock.CurrentSlot())))
|
||||
require.NoError(t, s.fetchOriginBlobs([]peer.ID{}))
|
||||
|
||||
require.NoError(t, s.fetchOriginSidecars([]peer.ID{}))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user