mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Implement PeerDAS sync (#15564)
* PeerDAS: Implement sync * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Satyajit's comment. * Partially fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Fix Potuz's comment. * Add tests for `sendDataColumnSidecarsRequest`. * Fix Satyajit's comment. * Implement `TestSendDataColumnSidecarsRequest`. * Implement `TestFetchDataColumnSidecarsFromPeers`. * Implement `TestUpdateResults`. * Implement `TestSelectPeers`. * Implement `TestCategorizeIndices`. * Fix James' comment. * Fix James's comment. * Fix James' commit. * Fix James' comment. * Fix James' comment. * Fix flakiness in `TestSelectPeers`. * Update cmd/beacon-chain/flags/config.go Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com> * Fix Preston's comment. * Fix James's comment. * Implement `TestFetchDataColumnSidecars`. * Revert "Fix Potuz's comment." This reverts commitc45230b455. * Fix Potuz's comment. * Revert "Fix James' comment." This reverts commita3f919205a. * Fix James' comment. * Fix Preston's comment. * Fix James' comment. * `selectPeers`: Avoid map with key but empty value. * Fix typo. * Fix Potuz's comment. * Fix Potuz's comment. * Fix James' comment. * Add DataColumnStorage and SubscribeAllDataSubnets flag. * Add extra flags * Fix Potuz's and Preston's comment. * Add rate limiter check. --------- Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com> Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
This commit is contained in:
@@ -6,20 +6,20 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||||
func Verify(sidecars ...blocks.ROBlob) error {
|
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||||
if len(sidecars) == 0 {
|
if len(blobSidecars) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if len(sidecars) == 1 {
|
if len(blobSidecars) == 1 {
|
||||||
return kzgContext.VerifyBlobKZGProof(
|
return kzgContext.VerifyBlobKZGProof(
|
||||||
bytesToBlob(sidecars[0].Blob),
|
bytesToBlob(blobSidecars[0].Blob),
|
||||||
bytesToCommitment(sidecars[0].KzgCommitment),
|
bytesToCommitment(blobSidecars[0].KzgCommitment),
|
||||||
bytesToKZGProof(sidecars[0].KzgProof))
|
bytesToKZGProof(blobSidecars[0].KzgProof))
|
||||||
}
|
}
|
||||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
blobs := make([]GoKZG.Blob, len(blobSidecars))
|
||||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
|
||||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
|
||||||
for i, sidecar := range sidecars {
|
for i, sidecar := range blobSidecars {
|
||||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||||
|
|||||||
@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestVerify(t *testing.T) {
|
func TestVerify(t *testing.T) {
|
||||||
sidecars := make([]blocks.ROBlob, 0)
|
blobSidecars := make([]blocks.ROBlob, 0)
|
||||||
require.NoError(t, Verify(sidecars...))
|
require.NoError(t, Verify(blobSidecars...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBytesToAny(t *testing.T) {
|
func TestBytesToAny(t *testing.T) {
|
||||||
|
|||||||
@@ -240,9 +240,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||||
}
|
}
|
||||||
|
|
||||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||||
JustifiedCheckpoint: jCheckpoints[i],
|
JustifiedCheckpoint: jCheckpoints[i],
|
||||||
FinalizedCheckpoint: fCheckpoints[i]}
|
FinalizedCheckpoint: fCheckpoints[i]}
|
||||||
@@ -308,6 +309,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
|||||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||||
|
blockVersion := roBlock.Version()
|
||||||
|
block := roBlock.Block()
|
||||||
|
slot := block.Slot()
|
||||||
|
|
||||||
|
if blockVersion >= version.Fulu {
|
||||||
|
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||||
|
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if blockVersion >= version.Deneb {
|
||||||
|
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
|
||||||
|
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||||
e := coreTime.CurrentEpoch(st)
|
e := coreTime.CurrentEpoch(st)
|
||||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||||
|
|||||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
|||||||
|
|
||||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||||
helpers.ClearCache()
|
helpers.ClearCache()
|
||||||
|
params.SetupTestConfigCleanup(t)
|
||||||
|
|
||||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||||
syncCommittee := ðpb.SyncCommittee{
|
syncCommittee := ðpb.SyncCommittee{
|
||||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||||
|
params.SetupTestConfigCleanup(t)
|
||||||
helpers.ClearCache()
|
helpers.ClearCache()
|
||||||
|
|
||||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ go_library(
|
|||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"availability_blobs.go",
|
"availability_blobs.go",
|
||||||
"availability_columns.go",
|
|
||||||
"blob_cache.go",
|
"blob_cache.go",
|
||||||
"data_column_cache.go",
|
"data_column_cache.go",
|
||||||
"iface.go",
|
"iface.go",
|
||||||
@@ -13,7 +12,6 @@ go_library(
|
|||||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/peerdas:go_default_library",
|
|
||||||
"//beacon-chain/db/filesystem:go_default_library",
|
"//beacon-chain/db/filesystem:go_default_library",
|
||||||
"//beacon-chain/verification:go_default_library",
|
"//beacon-chain/verification:go_default_library",
|
||||||
"//config/fieldparams:go_default_library",
|
"//config/fieldparams:go_default_library",
|
||||||
@@ -23,7 +21,6 @@ go_library(
|
|||||||
"//runtime/logging:go_default_library",
|
"//runtime/logging:go_default_library",
|
||||||
"//runtime/version:go_default_library",
|
"//runtime/version:go_default_library",
|
||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
],
|
],
|
||||||
@@ -33,7 +30,6 @@ go_test(
|
|||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = [
|
srcs = [
|
||||||
"availability_blobs_test.go",
|
"availability_blobs_test.go",
|
||||||
"availability_columns_test.go",
|
|
||||||
"blob_cache_test.go",
|
"blob_cache_test.go",
|
||||||
"data_column_cache_test.go",
|
"data_column_cache_test.go",
|
||||||
],
|
],
|
||||||
@@ -49,7 +45,6 @@ go_test(
|
|||||||
"//testing/require:go_default_library",
|
"//testing/require:go_default_library",
|
||||||
"//testing/util:go_default_library",
|
"//testing/util:go_default_library",
|
||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -53,30 +53,25 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
|||||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROBlob) error {
|
||||||
if len(sidecars) == 0 {
|
if len(sidecars) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sidecars)
|
if len(sidecars) > 1 {
|
||||||
if err != nil {
|
firstRoot := sidecars[0].BlockRoot()
|
||||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
for _, sidecar := range sidecars[1:] {
|
||||||
}
|
|
||||||
|
|
||||||
if len(blobSidecars) > 1 {
|
|
||||||
firstRoot := blobSidecars[0].BlockRoot()
|
|
||||||
for _, sidecar := range blobSidecars[1:] {
|
|
||||||
if sidecar.BlockRoot() != firstRoot {
|
if sidecar.BlockRoot() != firstRoot {
|
||||||
return errMixedRoots
|
return errMixedRoots
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !params.WithinDAPeriod(slots.ToEpoch(blobSidecars[0].Slot()), slots.ToEpoch(current)) {
|
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
key := keyFromSidecar(blobSidecars[0])
|
key := keyFromSidecar(sidecars[0])
|
||||||
entry := s.cache.ensure(key)
|
entry := s.cache.ensure(key)
|
||||||
for _, blobSidecar := range blobSidecars {
|
for _, blobSidecar := range sidecars {
|
||||||
if err := entry.stash(&blobSidecar); err != nil {
|
if err := entry.stash(&blobSidecar); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -118,23 +118,21 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
|||||||
|
|
||||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||||
|
|
||||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
|
||||||
|
|
||||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||||
as := NewLazilyPersistentStore(store, mbv)
|
as := NewLazilyPersistentStore(store, mbv)
|
||||||
|
|
||||||
// Only one commitment persisted, should return error with other indices
|
// Only one commitment persisted, should return error with other indices
|
||||||
require.NoError(t, as.Persist(1, scs[2]))
|
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||||
err := as.IsDataAvailable(ctx, 1, blk)
|
err := as.IsDataAvailable(ctx, 1, blk)
|
||||||
require.ErrorIs(t, err, errMissingSidecar)
|
require.ErrorIs(t, err, errMissingSidecar)
|
||||||
|
|
||||||
// All but one persisted, return missing idx
|
// All but one persisted, return missing idx
|
||||||
require.NoError(t, as.Persist(1, scs[0]))
|
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||||
err = as.IsDataAvailable(ctx, 1, blk)
|
err = as.IsDataAvailable(ctx, 1, blk)
|
||||||
require.ErrorIs(t, err, errMissingSidecar)
|
require.ErrorIs(t, err, errMissingSidecar)
|
||||||
|
|
||||||
// All persisted, return nil
|
// All persisted, return nil
|
||||||
require.NoError(t, as.Persist(1, scs...))
|
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||||
|
|
||||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||||
}
|
}
|
||||||
@@ -149,10 +147,8 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
|||||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||||
as := NewLazilyPersistentStore(store, mbv)
|
as := NewLazilyPersistentStore(store, mbv)
|
||||||
|
|
||||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
|
||||||
|
|
||||||
// Only one commitment persisted, should return error with other indices
|
// Only one commitment persisted, should return error with other indices
|
||||||
require.NoError(t, as.Persist(1, scs[0]))
|
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||||
err := as.IsDataAvailable(ctx, 1, blk)
|
err := as.IsDataAvailable(ctx, 1, blk)
|
||||||
require.NotNil(t, err)
|
require.NotNil(t, err)
|
||||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||||
@@ -161,29 +157,25 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
|||||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||||
|
|
||||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
|
||||||
|
|
||||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||||
// stashes as expected
|
// stashes as expected
|
||||||
require.NoError(t, as.Persist(1, scs...))
|
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||||
// ignores duplicates
|
// ignores duplicates
|
||||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||||
|
|
||||||
// ignores index out of bound
|
// ignores index out of bound
|
||||||
blobSidecars[0].Index = 6
|
blobSidecars[0].Index = 6
|
||||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||||
|
|
||||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||||
|
|
||||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
|
||||||
|
|
||||||
// ignores sidecars before the retention period
|
// ignores sidecars before the retention period
|
||||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, as.Persist(32+slotOOB, more[0]))
|
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||||
|
|
||||||
// doesn't ignore new sidecars with a different block root
|
// doesn't ignore new sidecars with a different block root
|
||||||
require.NoError(t, as.Persist(1, more...))
|
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockBlobBatchVerifier struct {
|
type mockBlobBatchVerifier struct {
|
||||||
|
|||||||
@@ -1,213 +0,0 @@
|
|||||||
package das
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
|
||||||
errors "github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
|
||||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
|
||||||
// block, at which time they will undergo full verification and be saved to the disk.
|
|
||||||
type LazilyPersistentStoreColumn struct {
|
|
||||||
store *filesystem.DataColumnStorage
|
|
||||||
nodeID enode.ID
|
|
||||||
cache *dataColumnCache
|
|
||||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
|
||||||
custodyGroupCount uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
|
||||||
|
|
||||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
|
||||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
|
||||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
|
||||||
// they are all available, the interface takes a slice of data column sidecars.
|
|
||||||
type DataColumnsVerifier interface {
|
|
||||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
|
||||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
|
||||||
func NewLazilyPersistentStoreColumn(
|
|
||||||
store *filesystem.DataColumnStorage,
|
|
||||||
nodeID enode.ID,
|
|
||||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
|
||||||
custodyGroupCount uint64,
|
|
||||||
) *LazilyPersistentStoreColumn {
|
|
||||||
return &LazilyPersistentStoreColumn{
|
|
||||||
store: store,
|
|
||||||
nodeID: nodeID,
|
|
||||||
cache: newDataColumnCache(),
|
|
||||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
|
||||||
custodyGroupCount: custodyGroupCount,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
|
||||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
|
||||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
|
||||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
|
||||||
if len(sidecars) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
|
||||||
}
|
|
||||||
|
|
||||||
// It is safe to retrieve the first sidecar.
|
|
||||||
firstSidecar := dataColumnSidecars[0]
|
|
||||||
|
|
||||||
if len(sidecars) > 1 {
|
|
||||||
firstRoot := firstSidecar.BlockRoot()
|
|
||||||
for _, sidecar := range dataColumnSidecars[1:] {
|
|
||||||
if sidecar.BlockRoot() != firstRoot {
|
|
||||||
return errMixedRoots
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
|
||||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
|
||||||
entry := s.cache.ensure(key)
|
|
||||||
|
|
||||||
for _, sidecar := range dataColumnSidecars {
|
|
||||||
if err := entry.stash(&sidecar); err != nil {
|
|
||||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
|
||||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
|
||||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
|
|
||||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return early for blocks that do not have any commitments.
|
|
||||||
if blockCommitments.count() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the root of the block.
|
|
||||||
blockRoot := block.Root()
|
|
||||||
|
|
||||||
// Build the cache key for the block.
|
|
||||||
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
|
|
||||||
|
|
||||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
|
||||||
entry := s.cache.ensure(key)
|
|
||||||
|
|
||||||
// Delete the cache entry for the block at the end.
|
|
||||||
defer s.cache.delete(key)
|
|
||||||
|
|
||||||
// Set the disk summary for the block in the cache entry.
|
|
||||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
|
||||||
|
|
||||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
|
||||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
|
||||||
// ignore their response and decrease their peer score.
|
|
||||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "entry filter")
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
|
||||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
|
||||||
|
|
||||||
if err := verifier.ValidFields(); err != nil {
|
|
||||||
return errors.Wrap(err, "valid fields")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
|
||||||
return errors.Wrap(err, "sidecar inclusion proven")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
|
||||||
return errors.Wrap(err, "sidecar KZG proof verified")
|
|
||||||
}
|
|
||||||
|
|
||||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "verified RO data columns - should never happen")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.store.Save(verifiedRoDataColumns); err != nil {
|
|
||||||
return errors.Wrap(err, "save data column sidecars")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
|
||||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
|
||||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
|
||||||
|
|
||||||
// Return early for blocks that are pre-Fulu.
|
|
||||||
if block.Version() < version.Fulu {
|
|
||||||
return &safeCommitmentsArray{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute the block epoch.
|
|
||||||
blockSlot := block.Block().Slot()
|
|
||||||
blockEpoch := slots.ToEpoch(blockSlot)
|
|
||||||
|
|
||||||
// Compute the current epoch.
|
|
||||||
currentEpoch := slots.ToEpoch(currentSlot)
|
|
||||||
|
|
||||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
|
||||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
|
||||||
return &safeCommitmentsArray{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve the KZG commitments for the block.
|
|
||||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return early if there are no commitments in the block.
|
|
||||||
if len(kzgCommitments) == 0 {
|
|
||||||
return &safeCommitmentsArray{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve peer info.
|
|
||||||
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
|
|
||||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "peer info")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a safe commitments array for the custody columns.
|
|
||||||
commitmentsArray := &safeCommitmentsArray{}
|
|
||||||
commitmentsArraySize := uint64(len(commitmentsArray))
|
|
||||||
|
|
||||||
for column := range peerInfo.CustodyColumns {
|
|
||||||
if column >= commitmentsArraySize {
|
|
||||||
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
|
|
||||||
}
|
|
||||||
|
|
||||||
commitmentsArray[column] = kzgCommitments
|
|
||||||
}
|
|
||||||
|
|
||||||
return commitmentsArray, nil
|
|
||||||
}
|
|
||||||
@@ -1,313 +0,0 @@
|
|||||||
package das
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
|
||||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
|
||||||
)
|
|
||||||
|
|
||||||
var commitments = [][]byte{
|
|
||||||
bytesutil.PadTo([]byte("a"), 48),
|
|
||||||
bytesutil.PadTo([]byte("b"), 48),
|
|
||||||
bytesutil.PadTo([]byte("c"), 48),
|
|
||||||
bytesutil.PadTo([]byte("d"), 48),
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPersist(t *testing.T) {
|
|
||||||
t.Run("no sidecars", func(t *testing.T) {
|
|
||||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
||||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
|
||||||
err := lazilyPersistentStoreColumns.Persist(0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("mixed roots", func(t *testing.T) {
|
|
||||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
||||||
|
|
||||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
|
||||||
{Slot: 1, Index: 1},
|
|
||||||
{Slot: 2, Index: 2},
|
|
||||||
}
|
|
||||||
|
|
||||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
|
||||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
|
||||||
|
|
||||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
|
||||||
require.ErrorIs(t, err, errMixedRoots)
|
|
||||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("outside DA period", func(t *testing.T) {
|
|
||||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
||||||
|
|
||||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
|
||||||
{Slot: 1, Index: 1},
|
|
||||||
}
|
|
||||||
|
|
||||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
|
||||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
|
||||||
|
|
||||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("nominal", func(t *testing.T) {
|
|
||||||
const slot = 42
|
|
||||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
||||||
|
|
||||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
|
||||||
{Slot: slot, Index: 1},
|
|
||||||
{Slot: slot, Index: 5},
|
|
||||||
}
|
|
||||||
|
|
||||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
|
||||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
|
||||||
|
|
||||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
|
||||||
|
|
||||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
|
||||||
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
|
|
||||||
require.Equal(t, true, ok)
|
|
||||||
|
|
||||||
// A call to Persist does NOT save the sidecars to disk.
|
|
||||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
|
||||||
|
|
||||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
|
||||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
|
||||||
|
|
||||||
for i, roDataColumn := range entry.scs {
|
|
||||||
if map[int]bool{1: true, 5: true}[i] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
require.IsNil(t, roDataColumn)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsDataAvailable(t *testing.T) {
|
|
||||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
|
||||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := t.Context()
|
|
||||||
|
|
||||||
t.Run("without commitments", func(t *testing.T) {
|
|
||||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
|
||||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
|
||||||
|
|
||||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
||||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
|
||||||
|
|
||||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("with commitments", func(t *testing.T) {
|
|
||||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
|
||||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
|
||||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
|
||||||
block := signedRoBlock.Block()
|
|
||||||
slot := block.Slot()
|
|
||||||
proposerIndex := block.ProposerIndex()
|
|
||||||
parentRoot := block.ParentRoot()
|
|
||||||
stateRoot := block.StateRoot()
|
|
||||||
bodyRoot, err := block.Body().HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
root := signedRoBlock.Root()
|
|
||||||
|
|
||||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
|
||||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
|
||||||
|
|
||||||
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
|
||||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
|
||||||
for _, index := range indices {
|
|
||||||
dataColumnParams := util.DataColumnParam{
|
|
||||||
Index: index,
|
|
||||||
KzgCommitments: commitments,
|
|
||||||
|
|
||||||
Slot: slot,
|
|
||||||
ProposerIndex: proposerIndex,
|
|
||||||
ParentRoot: parentRoot[:],
|
|
||||||
StateRoot: stateRoot[:],
|
|
||||||
BodyRoot: bodyRoot[:],
|
|
||||||
}
|
|
||||||
|
|
||||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
|
||||||
|
|
||||||
key := cacheKey{root: root}
|
|
||||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
|
||||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
|
||||||
|
|
||||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
|
||||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
summary := dataColumnStorage.Summary(root)
|
|
||||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
|
||||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
|
||||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
commitments [][]byte
|
|
||||||
block func(*testing.T) blocks.ROBlock
|
|
||||||
slot primitives.Slot
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Pre-Fulu block",
|
|
||||||
block: func(t *testing.T) blocks.ROBlock {
|
|
||||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Commitments outside data availability window",
|
|
||||||
block: func(t *testing.T) blocks.ROBlock {
|
|
||||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
|
||||||
|
|
||||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
|
||||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
|
||||||
|
|
||||||
return newSignedRoBlock(t, beaconBlockElectra)
|
|
||||||
},
|
|
||||||
slot: windowSlots + 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Commitments within data availability window",
|
|
||||||
block: func(t *testing.T) blocks.ROBlock {
|
|
||||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
|
||||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
|
||||||
signedBeaconBlockFulu.Block.Slot = 100
|
|
||||||
|
|
||||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
|
||||||
},
|
|
||||||
commitments: commitments,
|
|
||||||
slot: 100,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
|
||||||
|
|
||||||
b := tc.block(t)
|
|
||||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
|
|
||||||
|
|
||||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, commitments := range commitmentsArray {
|
|
||||||
require.DeepEqual(t, tc.commitments, commitments)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
|
||||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
|
|
||||||
|
|
||||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
|
||||||
for _, roDataColumn := range roDataColumns {
|
|
||||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
|
||||||
}
|
|
||||||
|
|
||||||
return roSidecars, roDataColumns
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
|
||||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
rb, err := blocks.NewROBlock(sb)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return rb
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockDataColumnsVerifier struct {
|
|
||||||
t *testing.T
|
|
||||||
dataColumnSidecars []blocks.RODataColumn
|
|
||||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
|
||||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
|
||||||
|
|
||||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
|
||||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
|
||||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
|
||||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
|
||||||
}
|
|
||||||
|
|
||||||
return verifiedDataColumnSidecars, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
|
||||||
m.validCalled = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
|
||||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
|
||||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
|
||||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
|
||||||
m.SidecarInclusionProvenCalled = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
|
||||||
m.SidecarKzgProofVerifiedCalled = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
|
||||||
@@ -15,5 +15,5 @@ import (
|
|||||||
// durably persisted before returning a non-error value.
|
// durably persisted before returning a non-error value.
|
||||||
type AvailabilityStore interface {
|
type AvailabilityStore interface {
|
||||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,13 +5,12 @@ import (
|
|||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||||
errors "github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||||
type MockAvailabilityStore struct {
|
type MockAvailabilityStore struct {
|
||||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||||
PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error
|
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||||
@@ -25,13 +24,9 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
func (m *MockAvailabilityStore) Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error {
|
||||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
|
||||||
}
|
|
||||||
if m.PersistBlobsCallback != nil {
|
if m.PersistBlobsCallback != nil {
|
||||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
return m.PersistBlobsCallback(current, blobSidecar...)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -100,6 +100,14 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DataColumnStorageReader is an interface to read data column sidecars from the filesystem.
|
||||||
|
type DataColumnStorageReader interface {
|
||||||
|
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||||
|
Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ DataColumnStorageReader = &DataColumnStorage{}
|
||||||
|
|
||||||
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
|
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
|
||||||
func WithDataColumnBasePath(base string) DataColumnStorageOption {
|
func WithDataColumnBasePath(base string) DataColumnStorageOption {
|
||||||
return func(b *DataColumnStorage) error {
|
return func(b *DataColumnStorage) error {
|
||||||
|
|||||||
@@ -84,12 +84,6 @@ func (s DataColumnStorageSummary) Stored() map[uint64]bool {
|
|||||||
return stored
|
return stored
|
||||||
}
|
}
|
||||||
|
|
||||||
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
|
|
||||||
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
|
||||||
type DataColumnStorageSummarizer interface {
|
|
||||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
type dataColumnStorageSummaryCache struct {
|
type dataColumnStorageSummaryCache struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
dataColumnCount float64
|
dataColumnCount float64
|
||||||
@@ -98,8 +92,6 @@ type dataColumnStorageSummaryCache struct {
|
|||||||
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
|
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
|
|
||||||
|
|
||||||
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
|
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
|
||||||
return &dataColumnStorageSummaryCache{
|
return &dataColumnStorageSummaryCache{
|
||||||
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
|
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
|
||||||
|
|||||||
@@ -144,14 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
|
|||||||
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
|
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
|
||||||
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
|
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
|
|
||||||
c := newDataColumnStorageSummaryCache()
|
|
||||||
for root, indices := range set {
|
|
||||||
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -115,6 +115,17 @@ type NoHeadAccessDatabase interface {
|
|||||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||||
|
|
||||||
|
// Genesis operations.
|
||||||
|
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||||
|
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||||
|
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||||
|
|
||||||
|
// Support for checkpoint sync and backfill.
|
||||||
|
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||||
|
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||||
|
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||||
|
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||||
|
|
||||||
// Custody operations.
|
// Custody operations.
|
||||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||||
@@ -131,16 +142,6 @@ type HeadAccessDatabase interface {
|
|||||||
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||||
HeadBlockRoot() ([32]byte, error)
|
HeadBlockRoot() ([32]byte, error)
|
||||||
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||||
|
|
||||||
// Genesis operations.
|
|
||||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
|
||||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
|
||||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
|
||||||
|
|
||||||
// Support for checkpoint sync and backfill.
|
|
||||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
|
||||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
|
||||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||||
|
|||||||
@@ -845,6 +845,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
|||||||
ClockWaiter: b.clockWaiter,
|
ClockWaiter: b.clockWaiter,
|
||||||
InitialSyncComplete: complete,
|
InitialSyncComplete: complete,
|
||||||
BlobStorage: b.BlobStorage,
|
BlobStorage: b.BlobStorage,
|
||||||
|
DataColumnStorage: b.DataColumnStorage,
|
||||||
}, opts...)
|
}, opts...)
|
||||||
return b.services.RegisterService(is)
|
return b.services.RegisterService(is)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
|||||||
},
|
},
|
||||||
check: func(scorer *scorers.GossipScorer) {
|
check: func(scorer *scorers.GossipScorer) {
|
||||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||||
|
|||||||
@@ -169,7 +169,7 @@ var (
|
|||||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||||
|
|
||||||
// DataColumnSidecarsByRoot v1 Message
|
// DataColumnSidecarsByRoot v1 Message
|
||||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Maps all registered protocol prefixes.
|
// Maps all registered protocol prefixes.
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ go_library(
|
|||||||
"broadcast_bls_changes.go",
|
"broadcast_bls_changes.go",
|
||||||
"context.go",
|
"context.go",
|
||||||
"custody.go",
|
"custody.go",
|
||||||
|
"data_column_sidecars.go",
|
||||||
"data_columns_reconstruct.go",
|
"data_columns_reconstruct.go",
|
||||||
"deadlines.go",
|
"deadlines.go",
|
||||||
"decode_pubsub.go",
|
"decode_pubsub.go",
|
||||||
@@ -167,6 +168,7 @@ go_test(
|
|||||||
"broadcast_bls_changes_test.go",
|
"broadcast_bls_changes_test.go",
|
||||||
"context_test.go",
|
"context_test.go",
|
||||||
"custody_test.go",
|
"custody_test.go",
|
||||||
|
"data_column_sidecars_test.go",
|
||||||
"data_columns_reconstruct_test.go",
|
"data_columns_reconstruct_test.go",
|
||||||
"decode_pubsub_test.go",
|
"decode_pubsub_test.go",
|
||||||
"error_test.go",
|
"error_test.go",
|
||||||
@@ -281,6 +283,7 @@ go_test(
|
|||||||
"@com_github_golang_snappy//:go_default_library",
|
"@com_github_golang_snappy//:go_default_library",
|
||||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||||
|
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||||
|
|||||||
@@ -91,9 +91,7 @@ func (bs *blobSync) validateNext(rb blocks.ROBlob) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sc := blocks.NewSidecarFromBlobSidecar(rb)
|
if err := bs.store.Persist(bs.current, rb); err != nil {
|
||||||
|
|
||||||
if err := bs.store.Persist(bs.current, sc); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
878
beacon-chain/sync/data_column_sidecars.go
Normal file
878
beacon-chain/sync/data_column_sidecars.go
Normal file
@@ -0,0 +1,878 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"math"
|
||||||
|
"slices"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||||
|
prysmP2P "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||||
|
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||||
|
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||||
|
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||||
|
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||||
|
goPeer "github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DataColumnSidecarsParams stores the common parameters needed to
|
||||||
|
// fetch data column sidecars from peers.
|
||||||
|
type DataColumnSidecarsParams struct {
|
||||||
|
Ctx context.Context // Context
|
||||||
|
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
|
||||||
|
P2P prysmP2P.P2P // P2P network interface
|
||||||
|
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
|
||||||
|
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
|
||||||
|
Storage filesystem.DataColumnStorageReader // Data columns storage
|
||||||
|
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given
|
||||||
|
// blocks and requested data column indices. It employs a multi-step strategy:
|
||||||
|
//
|
||||||
|
// 1. Direct retrieval: If all requested columns are available in storage, they are
|
||||||
|
// retrieved directly without reconstruction.
|
||||||
|
// 2. Reconstruction-based retrieval: If some requested columns are missing but sufficient
|
||||||
|
// stored columns exist (at least the minimum required for reconstruction), the function
|
||||||
|
// reconstructs all columns and extracts the requested indices.
|
||||||
|
// 3. Peer retrieval: If storage and reconstruction fail, missing columns are requested
|
||||||
|
// from connected peers that are expected to custody the required data.
|
||||||
|
//
|
||||||
|
// The function returns a map of block roots to their corresponding verified read-only data
|
||||||
|
// columns. It returns an error if data column storage is unavailable, if storage/reconstruction
|
||||||
|
// operations fail unexpectedly, or if not all requested columns could be retrieved from peers.
|
||||||
|
func FetchDataColumnSidecars(
|
||||||
|
params DataColumnSidecarsParams,
|
||||||
|
roBlocks []blocks.ROBlock,
|
||||||
|
indicesMap map[uint64]bool,
|
||||||
|
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||||
|
if len(roBlocks) == 0 || len(indicesMap) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
indices := sortedSliceFromMap(indicesMap)
|
||||||
|
slotsWithCommitments := make(map[primitives.Slot]bool)
|
||||||
|
indicesByRootToQuery := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||||
|
indicesByRootStored := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||||
|
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||||
|
|
||||||
|
for _, roBlock := range roBlocks {
|
||||||
|
// Filter out blocks without commitments.
|
||||||
|
block := roBlock.Block()
|
||||||
|
commitments, err := block.Body().BlobKzgCommitments()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "get blob kzg commitments for block root %#x", roBlock.Root())
|
||||||
|
}
|
||||||
|
if len(commitments) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
slotsWithCommitments[block.Slot()] = true
|
||||||
|
root := roBlock.Root()
|
||||||
|
|
||||||
|
// Step 1: Get the requested sidecars for this root if available in storage
|
||||||
|
requestedColumns, err := tryGetDirectColumns(params.Storage, root, indices)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||||
|
}
|
||||||
|
if requestedColumns != nil {
|
||||||
|
result[root] = requestedColumns
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: If step 1 failed, reconstruct the requested sidecars from what is available in storage
|
||||||
|
requestedColumns, err = tryGetReconstructedColumns(params.Storage, root, indices)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "try get reconstructed columns for root %#x", root)
|
||||||
|
}
|
||||||
|
if requestedColumns != nil {
|
||||||
|
result[root] = requestedColumns
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3a: If steps 1 and 2 failed, keep track of the sidecars that need to be queried from peers
|
||||||
|
// and those that are already stored.
|
||||||
|
indicesToQueryMap, indicesStoredMap := categorizeIndices(params.Storage, root, indices)
|
||||||
|
|
||||||
|
if len(indicesToQueryMap) > 0 {
|
||||||
|
indicesByRootToQuery[root] = indicesToQueryMap
|
||||||
|
}
|
||||||
|
if len(indicesStoredMap) > 0 {
|
||||||
|
indicesByRootStored[root] = indicesStoredMap
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Early return if no sidecars need to be queried from peers.
|
||||||
|
if len(indicesByRootToQuery) == 0 {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3b: Request missing sidecars from peers.
|
||||||
|
start, count := time.Now(), computeTotalCount(indicesByRootToQuery)
|
||||||
|
fromPeersResult, err := tryRequestingColumnsFromPeers(params, roBlocks, slotsWithCommitments, indicesByRootToQuery)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "request from peers")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{"duration": time.Since(start), "count": count}).Debug("Requested data column sidecars from peers")
|
||||||
|
|
||||||
|
for root, verifiedSidecars := range fromPeersResult {
|
||||||
|
result[root] = append(result[root], verifiedSidecars...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3c: Load the stored sidecars.
|
||||||
|
for root, indicesStored := range indicesByRootStored {
|
||||||
|
requestedColumns, err := tryGetDirectColumns(params.Storage, root, sortedSliceFromMap(indicesStored))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||||
|
}
|
||||||
|
|
||||||
|
result[root] = append(result[root], requestedColumns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryGetDirectColumns attempts to retrieve all requested columns directly from storage
|
||||||
|
// if they are all available. Returns the columns if successful, and nil if at least one
|
||||||
|
// requested sidecar is not available in the storage.
|
||||||
|
func tryGetDirectColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||||
|
// Check if all requested indices are present in cache
|
||||||
|
storedIndices := storage.Summary(blockRoot).Stored()
|
||||||
|
allRequestedPresent := true
|
||||||
|
for _, requestedIndex := range indices {
|
||||||
|
if !storedIndices[requestedIndex] {
|
||||||
|
allRequestedPresent = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allRequestedPresent {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// All requested data is present, retrieve directly from DB
|
||||||
|
requestedColumns, err := storage.Get(blockRoot, indices)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to get data columns for block root %#x", blockRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
return requestedColumns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryGetReconstructedColumns attempts to retrieve columns using reconstruction
|
||||||
|
// if sufficient columns are available. Returns the columns if successful, nil and nil if insufficient columns,
|
||||||
|
// or nil and error if an error occurs.
|
||||||
|
func tryGetReconstructedColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||||
|
// Check if we have enough columns for reconstruction
|
||||||
|
summary := storage.Summary(blockRoot)
|
||||||
|
if summary.Count() < peerdas.MinimumColumnCountToReconstruct() {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve all stored columns for reconstruction
|
||||||
|
allStoredColumns, err := storage.Get(blockRoot, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to get all stored columns for reconstruction for block root %#x", blockRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt reconstruction
|
||||||
|
reconstructedColumns, err := peerdas.ReconstructDataColumnSidecars(allStoredColumns)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to reconstruct data columns for block root %#x", blockRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Health check: ensure we have the expected number of columns
|
||||||
|
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||||
|
if uint64(len(reconstructedColumns)) != numberOfColumns {
|
||||||
|
return nil, errors.Errorf("reconstructed %d columns but expected %d for block root %#x", len(reconstructedColumns), numberOfColumns, blockRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract only the requested indices from reconstructed data using direct indexing
|
||||||
|
requestedColumns := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||||
|
for _, requestedIndex := range indices {
|
||||||
|
if requestedIndex >= numberOfColumns {
|
||||||
|
return nil, errors.Errorf("requested column index %d exceeds maximum %d for block root %#x", requestedIndex, numberOfColumns-1, blockRoot)
|
||||||
|
}
|
||||||
|
requestedColumns = append(requestedColumns, reconstructedColumns[requestedIndex])
|
||||||
|
}
|
||||||
|
|
||||||
|
return requestedColumns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// categorizeIndices separates indices into those that need to be queried from peers
|
||||||
|
// and those that are already stored.
|
||||||
|
func categorizeIndices(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) (map[uint64]bool, map[uint64]bool) {
|
||||||
|
indicesToQuery := make(map[uint64]bool, len(indices))
|
||||||
|
indicesStored := make(map[uint64]bool, len(indices))
|
||||||
|
|
||||||
|
allStoredIndices := storage.Summary(blockRoot).Stored()
|
||||||
|
for _, index := range indices {
|
||||||
|
if allStoredIndices[index] {
|
||||||
|
indicesStored[index] = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
indicesToQuery[index] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return indicesToQuery, indicesStored
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryRequestingColumnsFromPeers attempts to request missing data column sidecars from connected peers.
|
||||||
|
// It explores the connected peers to find those that are expected to custody the requested columns
|
||||||
|
// and returns only when all requested columns are either retrieved or have been tried to be retrieved
|
||||||
|
// by all possible peers.
|
||||||
|
// Returns a map of block roots to their verified read-only data column sidecars and a map of block roots.
|
||||||
|
// Returns an error if at least one requested column could not be retrieved.
|
||||||
|
// WARNING: This function alters `missingIndicesByRoot`. The caller should NOT use it after running this function.
|
||||||
|
func tryRequestingColumnsFromPeers(
|
||||||
|
p DataColumnSidecarsParams,
|
||||||
|
roBlocks []blocks.ROBlock,
|
||||||
|
slotsWithCommitments map[primitives.Slot]bool,
|
||||||
|
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||||
|
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||||
|
// Create a new random source for peer selection.
|
||||||
|
randomSource := rand.NewGenerator()
|
||||||
|
|
||||||
|
// Compute slots by block root.
|
||||||
|
slotByRoot := computeSlotByBlockRoot(roBlocks)
|
||||||
|
|
||||||
|
// Determine all sidecars each peers are expected to custody.
|
||||||
|
connectedPeersSlice := p.P2P.Peers().Connected()
|
||||||
|
connectedPeers := make(map[goPeer.ID]bool, len(connectedPeersSlice))
|
||||||
|
for _, peer := range connectedPeersSlice {
|
||||||
|
connectedPeers[peer] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
indicesByRootByPeer, err := computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "explore peers")
|
||||||
|
}
|
||||||
|
|
||||||
|
verifiedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||||
|
for len(missingIndicesByRoot) > 0 && len(indicesByRootByPeer) > 0 {
|
||||||
|
// Select peers to query the missing sidecars from.
|
||||||
|
indicesByRootByPeerToQuery, err := selectPeers(p, randomSource, len(missingIndicesByRoot), indicesByRootByPeer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "select peers")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove selected peers from the maps.
|
||||||
|
for peer := range indicesByRootByPeerToQuery {
|
||||||
|
delete(connectedPeers, peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the sidecars from the chosen peers.
|
||||||
|
roDataColumnsByPeer := fetchDataColumnSidecarsFromPeers(p, slotByRoot, slotsWithCommitments, indicesByRootByPeerToQuery)
|
||||||
|
|
||||||
|
// Verify the received data column sidecars.
|
||||||
|
verifiedRoDataColumnSidecars, err := verifyDataColumnSidecarsByPeer(p.P2P, p.NewVerifier, roDataColumnsByPeer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "verify data columns sidecars by peer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
|
||||||
|
newMissingIndicesByRoot, localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
|
||||||
|
missingIndicesByRoot = newMissingIndicesByRoot
|
||||||
|
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
|
||||||
|
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute indices by root by peers with the updated missing indices and connected peers.
|
||||||
|
indicesByRootByPeer, err = computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "explore peers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(missingIndicesByRoot) > 0 {
|
||||||
|
return nil, errors.New("not all requested data column sidecars were retrieved from peers")
|
||||||
|
}
|
||||||
|
|
||||||
|
return verifiedColumnsByRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// selectPeers selects peers to query the sidecars.
|
||||||
|
// It begins by randomly selecting a peer in `origIndicesByRootByPeer` that has enough bandwidth,
|
||||||
|
// and assigns to it all its available sidecars. Then, it randomly select an other peer, until
|
||||||
|
// all sidecars in `missingIndicesByRoot` are covered.
|
||||||
|
func selectPeers(
|
||||||
|
p DataColumnSidecarsParams,
|
||||||
|
randomSource *rand.Rand,
|
||||||
|
count int,
|
||||||
|
origIndicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||||
|
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||||
|
const randomPeerTimeout = 30 * time.Second
|
||||||
|
|
||||||
|
// Select peers to query the missing sidecars from.
|
||||||
|
indicesByRootByPeer := copyIndicesByRootByPeer(origIndicesByRootByPeer)
|
||||||
|
internalIndicesByRootByPeer := copyIndicesByRootByPeer(indicesByRootByPeer)
|
||||||
|
indicesByRootByPeerToQuery := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||||
|
for len(internalIndicesByRootByPeer) > 0 {
|
||||||
|
// Randomly select a peer with enough bandwidth.
|
||||||
|
peer, err := func() (goPeer.ID, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(p.Ctx, randomPeerTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
peer, err := randomPeer(ctx, randomSource, p.RateLimiter, count, internalIndicesByRootByPeer)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "select random peer")
|
||||||
|
}
|
||||||
|
|
||||||
|
return peer, err
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query all the sidecars that peer can offer us.
|
||||||
|
newIndicesByRoot, ok := internalIndicesByRootByPeer[peer]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("peer %s not found in internal indices by root by peer map", peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
indicesByRootByPeerToQuery[peer] = newIndicesByRoot
|
||||||
|
|
||||||
|
// Remove this peer from the maps to avoid re-selection.
|
||||||
|
delete(indicesByRootByPeer, peer)
|
||||||
|
delete(internalIndicesByRootByPeer, peer)
|
||||||
|
|
||||||
|
// Delete the corresponding sidecars from other peers in the internal map
|
||||||
|
// to avoid re-selection during this iteration.
|
||||||
|
for peer, indicesByRoot := range internalIndicesByRootByPeer {
|
||||||
|
for root, indices := range indicesByRoot {
|
||||||
|
newIndices := newIndicesByRoot[root]
|
||||||
|
for index := range newIndices {
|
||||||
|
delete(indices, index)
|
||||||
|
}
|
||||||
|
if len(indices) == 0 {
|
||||||
|
delete(indicesByRoot, root)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(indicesByRoot) == 0 {
|
||||||
|
delete(internalIndicesByRootByPeer, peer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return indicesByRootByPeerToQuery, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateResults updates the missing indices and verified sidecars maps based on the newly verified sidecars.
|
||||||
|
func updateResults(
|
||||||
|
verifiedSidecars []blocks.VerifiedRODataColumn,
|
||||||
|
origMissingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||||
|
) (map[[fieldparams.RootLength]byte]map[uint64]bool, map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn) {
|
||||||
|
// Copy the original map to avoid modifying it directly.
|
||||||
|
missingIndicesByRoot := copyIndicesByRoot(origMissingIndicesByRoot)
|
||||||
|
verifiedSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||||
|
for _, verifiedSidecar := range verifiedSidecars {
|
||||||
|
blockRoot := verifiedSidecar.BlockRoot()
|
||||||
|
index := verifiedSidecar.Index
|
||||||
|
|
||||||
|
// Add to the result map grouped by block root
|
||||||
|
verifiedSidecarsByRoot[blockRoot] = append(verifiedSidecarsByRoot[blockRoot], verifiedSidecar)
|
||||||
|
|
||||||
|
if indices, ok := missingIndicesByRoot[blockRoot]; ok {
|
||||||
|
delete(indices, index)
|
||||||
|
if len(indices) == 0 {
|
||||||
|
delete(missingIndicesByRoot, blockRoot)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return missingIndicesByRoot, verifiedSidecarsByRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchDataColumnSidecarsFromPeers retrieves data column sidecars from peers.
|
||||||
|
func fetchDataColumnSidecarsFromPeers(
|
||||||
|
params DataColumnSidecarsParams,
|
||||||
|
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||||
|
slotsWithCommitments map[primitives.Slot]bool,
|
||||||
|
indicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||||
|
) map[goPeer.ID][]blocks.RODataColumn {
|
||||||
|
var (
|
||||||
|
wg sync.WaitGroup
|
||||||
|
mut sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
roDataColumnsByPeer := make(map[goPeer.ID][]blocks.RODataColumn)
|
||||||
|
wg.Add(len(indicesByRootByPeer))
|
||||||
|
for peerID, indicesByRoot := range indicesByRootByPeer {
|
||||||
|
go func(peerID goPeer.ID, indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
requestedCount := 0
|
||||||
|
for _, indices := range indicesByRoot {
|
||||||
|
requestedCount += len(indices)
|
||||||
|
}
|
||||||
|
|
||||||
|
log := log.WithFields(logrus.Fields{
|
||||||
|
"peerID": peerID,
|
||||||
|
"agent": agentString(peerID, params.P2P.Host()),
|
||||||
|
"blockCount": len(indicesByRoot),
|
||||||
|
"totalRequestedCount": requestedCount,
|
||||||
|
})
|
||||||
|
|
||||||
|
roDataColumns, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, peerID, indicesByRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Warning("Failed to send data column sidecars request")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mut.Lock()
|
||||||
|
defer mut.Unlock()
|
||||||
|
roDataColumnsByPeer[peerID] = roDataColumns
|
||||||
|
}(peerID, indicesByRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
return roDataColumnsByPeer
|
||||||
|
}
|
||||||
|
|
||||||
|
func sendDataColumnSidecarsRequest(
|
||||||
|
params DataColumnSidecarsParams,
|
||||||
|
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||||
|
slotsWithCommitments map[primitives.Slot]bool,
|
||||||
|
peerID goPeer.ID,
|
||||||
|
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||||
|
) ([]blocks.RODataColumn, error) {
|
||||||
|
const batchSize = 32
|
||||||
|
|
||||||
|
rootCount := int64(len(indicesByRoot))
|
||||||
|
requestedSidecarsCount := 0
|
||||||
|
for _, indices := range indicesByRoot {
|
||||||
|
requestedSidecarsCount += len(indices)
|
||||||
|
}
|
||||||
|
|
||||||
|
log := log.WithFields(logrus.Fields{
|
||||||
|
"peerID": peerID,
|
||||||
|
"agent": agentString(peerID, params.P2P.Host()),
|
||||||
|
"requestedSidecars": requestedSidecarsCount,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Try to build a by range byRangeRequest first.
|
||||||
|
byRangeRequests, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "craft by range request")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have a valid by range request, send it.
|
||||||
|
if len(byRangeRequests) > 0 {
|
||||||
|
count := 0
|
||||||
|
for _, indices := range indicesByRoot {
|
||||||
|
count += len(indices)
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
roDataColumns := make([]blocks.RODataColumn, 0, count)
|
||||||
|
for _, request := range byRangeRequests {
|
||||||
|
if params.RateLimiter != nil {
|
||||||
|
params.RateLimiter.Add(peerID.String(), rootCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
localRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(params, peerID, request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "send data column sidecars by range request to peer %s", peerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
roDataColumns = append(roDataColumns, localRoDataColumns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"respondedSidecars": len(roDataColumns),
|
||||||
|
"requests": len(byRangeRequests),
|
||||||
|
"type": "byRange",
|
||||||
|
"duration": time.Since(start),
|
||||||
|
}).Debug("Received data column sidecars")
|
||||||
|
|
||||||
|
return roDataColumns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build identifiers for the by root request.
|
||||||
|
byRootRequest := buildByRootRequest(indicesByRoot)
|
||||||
|
|
||||||
|
// Send the by root request.
|
||||||
|
start := time.Now()
|
||||||
|
if params.RateLimiter != nil {
|
||||||
|
params.RateLimiter.Add(peerID.String(), rootCount)
|
||||||
|
}
|
||||||
|
roDataColumns, err := SendDataColumnSidecarsByRootRequest(params, peerID, byRootRequest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "send data column sidecars by root request to peer %s", peerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"respondedSidecars": len(roDataColumns),
|
||||||
|
"requests": 1,
|
||||||
|
"type": "byRoot",
|
||||||
|
"duration": time.Since(start),
|
||||||
|
}).Debug("Received data column sidecars")
|
||||||
|
|
||||||
|
return roDataColumns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildByRangeRequests constructs a by range request from the given indices,
|
||||||
|
// only if the indices are the same all blocks and if the blocks are contiguous.
|
||||||
|
// (Missing blocks or blocks without commitments do count as contiguous)
|
||||||
|
// If one of this condition is not met, returns nil.
|
||||||
|
func buildByRangeRequests(
|
||||||
|
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||||
|
slotsWithCommitments map[primitives.Slot]bool,
|
||||||
|
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||||
|
batchSize uint64,
|
||||||
|
) ([]*ethpb.DataColumnSidecarsByRangeRequest, error) {
|
||||||
|
if len(indicesByRoot) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var reference map[uint64]bool
|
||||||
|
slots := make([]primitives.Slot, 0, len(slotByRoot))
|
||||||
|
for root, indices := range indicesByRoot {
|
||||||
|
if reference == nil {
|
||||||
|
reference = indices
|
||||||
|
}
|
||||||
|
|
||||||
|
if !compareIndices(reference, indices) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slot, ok := slotByRoot[root]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("slot not found for block root %#x", root)
|
||||||
|
}
|
||||||
|
|
||||||
|
slots = append(slots, slot)
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(slots)
|
||||||
|
|
||||||
|
for i := 1; i < len(slots); i++ {
|
||||||
|
previous, current := slots[i-1], slots[i]
|
||||||
|
if current == previous+1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for j := previous + 1; j < current; j++ {
|
||||||
|
if slotsWithCommitments[j] {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
columns := sortedSliceFromMap(reference)
|
||||||
|
startSlot, endSlot := slots[0], slots[len(slots)-1]
|
||||||
|
totalCount := uint64(endSlot - startSlot + 1)
|
||||||
|
|
||||||
|
requests := make([]*ethpb.DataColumnSidecarsByRangeRequest, 0, totalCount/batchSize)
|
||||||
|
for start := startSlot; start <= endSlot; start += primitives.Slot(batchSize) {
|
||||||
|
end := min(start+primitives.Slot(batchSize)-1, endSlot)
|
||||||
|
request := ðpb.DataColumnSidecarsByRangeRequest{
|
||||||
|
StartSlot: start,
|
||||||
|
Count: uint64(end - start + 1),
|
||||||
|
Columns: columns,
|
||||||
|
}
|
||||||
|
|
||||||
|
requests = append(requests, request)
|
||||||
|
}
|
||||||
|
|
||||||
|
return requests, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildByRootRequest constructs a by root request from the given indices.
|
||||||
|
func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) p2ptypes.DataColumnsByRootIdentifiers {
|
||||||
|
identifiers := make(p2ptypes.DataColumnsByRootIdentifiers, 0, len(indicesByRoot))
|
||||||
|
for root, indices := range indicesByRoot {
|
||||||
|
identifier := ð.DataColumnsByRootIdentifier{
|
||||||
|
BlockRoot: root[:],
|
||||||
|
Columns: sortedSliceFromMap(indices),
|
||||||
|
}
|
||||||
|
identifiers = append(identifiers, identifier)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort identifiers to have a deterministic output.
|
||||||
|
slices.SortFunc(identifiers, func(left, right *eth.DataColumnsByRootIdentifier) int {
|
||||||
|
if cmp := bytes.Compare(left.BlockRoot, right.BlockRoot); cmp != 0 {
|
||||||
|
return cmp
|
||||||
|
}
|
||||||
|
return slices.Compare(left.Columns, right.Columns)
|
||||||
|
})
|
||||||
|
|
||||||
|
return identifiers
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyDataColumnSidecarsByPeer verifies the received data column sidecars.
|
||||||
|
// If at least one sidecar from a peer is invalid, the peer is downscored and
|
||||||
|
// all its sidecars are rejected. (Sidecars from other peers are still accepted.)
|
||||||
|
func verifyDataColumnSidecarsByPeer(
|
||||||
|
p2p prysmP2P.P2P,
|
||||||
|
newVerifier verification.NewDataColumnsVerifier,
|
||||||
|
roDataColumnsByPeer map[goPeer.ID][]blocks.RODataColumn,
|
||||||
|
) ([]blocks.VerifiedRODataColumn, error) {
|
||||||
|
// First optimistically verify all received data columns in a single batch.
|
||||||
|
count := 0
|
||||||
|
for _, columns := range roDataColumnsByPeer {
|
||||||
|
count += len(columns)
|
||||||
|
}
|
||||||
|
|
||||||
|
roDataColumnSidecars := make([]blocks.RODataColumn, 0, count)
|
||||||
|
for _, columns := range roDataColumnsByPeer {
|
||||||
|
roDataColumnSidecars = append(roDataColumnSidecars, columns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, roDataColumnSidecars)
|
||||||
|
if err == nil {
|
||||||
|
// This is the happy path where all sidecars are verified.
|
||||||
|
return verifiedRoDataColumnSidecars, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// An error occurred during verification, which means that at least one sidecar is invalid.
|
||||||
|
// Reverify peer by peer to identify faulty peer(s), reject all its sidecars, and downscore it.
|
||||||
|
verifiedRoDataColumnSidecars = make([]blocks.VerifiedRODataColumn, 0, count)
|
||||||
|
for peer, columns := range roDataColumnsByPeer {
|
||||||
|
peerVerifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, columns)
|
||||||
|
if err != nil {
|
||||||
|
// This peer has invalid sidecars.
|
||||||
|
log := log.WithError(err).WithField("peerID", peer)
|
||||||
|
newScore := p2p.Peers().Scorers().BadResponsesScorer().Increment(peer)
|
||||||
|
log.Warning("Peer returned invalid data column sidecars")
|
||||||
|
log.WithFields(logrus.Fields{"reason": "invalidDataColumnSidecars", "newScore": newScore}).Debug("Downscore peer")
|
||||||
|
}
|
||||||
|
|
||||||
|
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, peerVerifiedRoDataColumnSidecars...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return verifiedRoDataColumnSidecars, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyByRootDataColumnSidecars verifies the provided read-only data columns against the
|
||||||
|
// requirements for data column sidecars received via the by root request.
|
||||||
|
func verifyByRootDataColumnSidecars(newVerifier verification.NewDataColumnsVerifier, roDataColumns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||||
|
verifier := newVerifier(roDataColumns, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||||
|
|
||||||
|
if err := verifier.ValidFields(); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "valid fields")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "sidecar inclusion proven")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "sidecar KZG proof verified")
|
||||||
|
}
|
||||||
|
|
||||||
|
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "verified RO data columns - should never happen")
|
||||||
|
}
|
||||||
|
|
||||||
|
return verifiedRoDataColumns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeIndicesByRootByPeer returns a peers->root->indices map only for
|
||||||
|
// root and indices given in `indicesByBlockRoot`. It also only selects peers
|
||||||
|
// for a given root only if its head state is higher than the block slot.
|
||||||
|
func computeIndicesByRootByPeer(
|
||||||
|
p2p prysmP2P.P2P,
|
||||||
|
slotByBlockRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||||
|
indicesByBlockRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||||
|
peers map[goPeer.ID]bool,
|
||||||
|
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||||
|
// First, compute custody columns for all peers
|
||||||
|
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
|
||||||
|
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
|
||||||
|
for peer := range peers {
|
||||||
|
// Computes the custody columns for each peer
|
||||||
|
nodeID, err := prysmP2P.ConvertPeerIDToNodeID(peer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "convert peer ID to node ID for peer %s", peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
custodyGroupCount := p2p.CustodyGroupCountFromPeer(peer)
|
||||||
|
|
||||||
|
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "peerdas info for peer %s", peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
for column := range dasInfo.CustodyColumns {
|
||||||
|
if _, exists := peersByIndex[column]; !exists {
|
||||||
|
peersByIndex[column] = make(map[goPeer.ID]bool)
|
||||||
|
}
|
||||||
|
peersByIndex[column][peer] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the head slot for each peer
|
||||||
|
peerChainState, err := p2p.Peers().ChainState(peer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "get chain state for peer %s", peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
if peerChainState == nil {
|
||||||
|
return nil, errors.Errorf("chain state is nil for peer %s", peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
headSlotByPeer[peer] = peerChainState.HeadSlot
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each block root and its indices, find suitable peers
|
||||||
|
indicesByRootByPeer := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||||
|
for blockRoot, indices := range indicesByBlockRoot {
|
||||||
|
blockSlot, ok := slotByBlockRoot[blockRoot]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("slot not found for block root %#x", blockRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
for index := range indices {
|
||||||
|
peers := peersByIndex[index]
|
||||||
|
for peer := range peers {
|
||||||
|
peerHeadSlot, ok := headSlotByPeer[peer]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("head slot not found for peer %s", peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
if peerHeadSlot < blockSlot {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build peers->root->indices map
|
||||||
|
if _, exists := indicesByRootByPeer[peer]; !exists {
|
||||||
|
indicesByRootByPeer[peer] = make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||||
|
}
|
||||||
|
if _, exists := indicesByRootByPeer[peer][blockRoot]; !exists {
|
||||||
|
indicesByRootByPeer[peer][blockRoot] = make(map[uint64]bool)
|
||||||
|
}
|
||||||
|
indicesByRootByPeer[peer][blockRoot][index] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return indicesByRootByPeer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// randomPeer selects a random peer. If no peers has enough bandwidth, it will wait and retry.
|
||||||
|
// Returns the selected peer ID and any error.
|
||||||
|
func randomPeer(
|
||||||
|
ctx context.Context,
|
||||||
|
randomSource *rand.Rand,
|
||||||
|
rateLimiter *leakybucket.Collector,
|
||||||
|
count int,
|
||||||
|
indicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||||
|
) (goPeer.ID, error) {
|
||||||
|
const waitPeriod = 5 * time.Second
|
||||||
|
|
||||||
|
peerCount := len(indicesByRootByPeer)
|
||||||
|
if peerCount == 0 {
|
||||||
|
return "", errors.New("no peers available")
|
||||||
|
}
|
||||||
|
|
||||||
|
for ctx.Err() == nil {
|
||||||
|
nonRateLimitedPeers := make([]goPeer.ID, 0, len(indicesByRootByPeer))
|
||||||
|
for peer := range indicesByRootByPeer {
|
||||||
|
remaining := int64(math.MaxInt64)
|
||||||
|
if rateLimiter != nil {
|
||||||
|
remaining = rateLimiter.Remaining(peer.String())
|
||||||
|
}
|
||||||
|
if remaining >= int64(count) {
|
||||||
|
nonRateLimitedPeers = append(nonRateLimitedPeers, peer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nonRateLimitedPeers) == 0 {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"peerCount": peerCount,
|
||||||
|
"delay": waitPeriod,
|
||||||
|
}).Debug("Waiting for a peer with enough bandwidth for data column sidecars")
|
||||||
|
time.Sleep(waitPeriod)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
randomIndex := randomSource.Intn(len(nonRateLimitedPeers))
|
||||||
|
return nonRateLimitedPeers[randomIndex], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyIndicesByRootByPeer creates a deep copy of the given nested map.
|
||||||
|
// Returns a new map with the same structure and contents.
|
||||||
|
func copyIndicesByRootByPeer(original map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool) map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool {
|
||||||
|
copied := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
|
||||||
|
for peer, indicesByRoot := range original {
|
||||||
|
copied[peer] = copyIndicesByRoot(indicesByRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
return copied
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyIndicesByRoot creates a deep copy of the given nested map.
|
||||||
|
// Returns a new map with the same structure and contents.
|
||||||
|
func copyIndicesByRoot(original map[[fieldparams.RootLength]byte]map[uint64]bool) map[[fieldparams.RootLength]byte]map[uint64]bool {
|
||||||
|
copied := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
|
||||||
|
for root, indexMap := range original {
|
||||||
|
copied[root] = make(map[uint64]bool, len(indexMap))
|
||||||
|
for index, value := range indexMap {
|
||||||
|
copied[root][index] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return copied
|
||||||
|
}
|
||||||
|
|
||||||
|
// compareIndices compares two map[uint64]bool and returns true if they are equal.
|
||||||
|
func compareIndices(left, right map[uint64]bool) bool {
|
||||||
|
if len(left) != len(right) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, leftValue := range left {
|
||||||
|
rightValue, exists := right[key]
|
||||||
|
if !exists || leftValue != rightValue {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortedSliceFromMap converts a map[uint64]bool to a sorted slice of keys.
|
||||||
|
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||||
|
result := make([]uint64, 0, len(m))
|
||||||
|
for k := range m {
|
||||||
|
result = append(result, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(result)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeSlotByBlockRoot maps each block root to its corresponding slot.
|
||||||
|
func computeSlotByBlockRoot(roBlocks []blocks.ROBlock) map[[fieldparams.RootLength]byte]primitives.Slot {
|
||||||
|
slotByBlockRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, len(roBlocks))
|
||||||
|
for _, roBlock := range roBlocks {
|
||||||
|
slotByBlockRoot[roBlock.Root()] = roBlock.Block().Slot()
|
||||||
|
}
|
||||||
|
return slotByBlockRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeTotalCount calculates the total count of indices across all roots.
|
||||||
|
func computeTotalCount(input map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||||
|
totalCount := 0
|
||||||
|
for _, indices := range input {
|
||||||
|
totalCount += len(indices)
|
||||||
|
}
|
||||||
|
return totalCount
|
||||||
|
}
|
||||||
984
beacon-chain/sync/data_column_sidecars_test.go
Normal file
984
beacon-chain/sync/data_column_sidecars_test.go
Normal file
@@ -0,0 +1,984 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||||
|
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||||
|
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||||
|
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||||
|
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||||
|
"github.com/libp2p/go-libp2p"
|
||||||
|
"github.com/libp2p/go-libp2p/core/crypto"
|
||||||
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFetchDataColumnSidecars(t *testing.T) {
|
||||||
|
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||||
|
// Slot 1: All needed sidecars are available in storage
|
||||||
|
// Slot 2: No commitment
|
||||||
|
// Slot 3: All sidecars are saved excepted the needed ones
|
||||||
|
// Slot 4: Some sidecars are in the storage, other have to be retrieved from peers.
|
||||||
|
|
||||||
|
params.SetupTestConfigCleanup(t)
|
||||||
|
cfg := params.BeaconConfig().Copy()
|
||||||
|
cfg.FuluForkEpoch = 0
|
||||||
|
params.OverrideBeaconConfig(cfg)
|
||||||
|
|
||||||
|
// Start the trusted setup.
|
||||||
|
err := kzg.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||||
|
|
||||||
|
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
const blobCount = 3
|
||||||
|
indices := map[uint64]bool{31: true, 81: true, 106: true}
|
||||||
|
|
||||||
|
// Block 1
|
||||||
|
block1, _, verifiedSidecars1 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(1))
|
||||||
|
root1 := block1.Root()
|
||||||
|
|
||||||
|
toStore1 := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||||
|
for index := range indices {
|
||||||
|
sidecar := verifiedSidecars1[index]
|
||||||
|
toStore1 = append(toStore1, sidecar)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.Save(toStore1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Block 2
|
||||||
|
block2, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(2))
|
||||||
|
|
||||||
|
// Block 3
|
||||||
|
block3, _, verifiedSidecars3 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(3))
|
||||||
|
root3 := block3.Root()
|
||||||
|
|
||||||
|
toStore3 := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns-uint64(len(indices)))
|
||||||
|
for i := range numberOfColumns {
|
||||||
|
if !indices[i] {
|
||||||
|
sidecar := verifiedSidecars3[i]
|
||||||
|
toStore3 = append(toStore3, sidecar)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.Save(toStore3)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Block 4
|
||||||
|
block4, _, verifiedSidecars4 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(4))
|
||||||
|
root4 := block4.Root()
|
||||||
|
toStore4 := []blocks.VerifiedRODataColumn{verifiedSidecars4[106]}
|
||||||
|
|
||||||
|
err = storage.Save(toStore4)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
privateKeyBytes := [32]byte{1}
|
||||||
|
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Peers
|
||||||
|
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||||
|
|
||||||
|
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||||
|
p2p.Peers().SetConnectionState(other.PeerID(), peers.Connected)
|
||||||
|
p2p.Connect(other)
|
||||||
|
|
||||||
|
p2p.Peers().SetChainState(other.PeerID(), ðpb.StatusV2{
|
||||||
|
HeadSlot: 4,
|
||||||
|
})
|
||||||
|
|
||||||
|
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||||
|
StartSlot: 4,
|
||||||
|
Count: 1,
|
||||||
|
Columns: []uint64{31, 81},
|
||||||
|
}
|
||||||
|
|
||||||
|
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||||
|
|
||||||
|
gs := startup.NewClockSynchronizer()
|
||||||
|
err = gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||||
|
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||||
|
|
||||||
|
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||||
|
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||||
|
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||||
|
|
||||||
|
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[31].DataColumnSidecar)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[81].DataColumnSidecar)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = stream.CloseWrite()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
params := DataColumnSidecarsParams{
|
||||||
|
Ctx: t.Context(),
|
||||||
|
Tor: clock,
|
||||||
|
P2P: p2p,
|
||||||
|
RateLimiter: leakybucket.NewCollector(1., 10, time.Second, false /* deleteEmptyBuckets */),
|
||||||
|
CtxMap: ctxMap,
|
||||||
|
Storage: storage,
|
||||||
|
NewVerifier: newDataColumnsVerifier,
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||||
|
root1: {verifiedSidecars1[31], verifiedSidecars1[81], verifiedSidecars1[106]},
|
||||||
|
// no root2 (no commitments in this block)
|
||||||
|
root3: {verifiedSidecars3[31], verifiedSidecars3[81], verifiedSidecars3[106]},
|
||||||
|
root4: {verifiedSidecars4[31], verifiedSidecars4[81], verifiedSidecars4[106]},
|
||||||
|
}
|
||||||
|
|
||||||
|
blocks := []blocks.ROBlock{block1, block2, block3, block4}
|
||||||
|
actual, err := FetchDataColumnSidecars(params, blocks, indices)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, len(expected), len(actual))
|
||||||
|
for root := range expected {
|
||||||
|
require.Equal(t, len(expected[root]), len(actual[root]))
|
||||||
|
for i := range expected[root] {
|
||||||
|
require.DeepSSZEqual(t, expected[root][i], actual[root][i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCategorizeIndices(t *testing.T) {
|
||||||
|
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||||
|
|
||||||
|
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||||
|
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||||
|
{Slot: 1, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
|
||||||
|
})
|
||||||
|
|
||||||
|
err := storage.Save(verifiedRoSidecars)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expectedToQuery := map[uint64]bool{13: true}
|
||||||
|
expectedStored := map[uint64]bool{12: true, 14: true}
|
||||||
|
|
||||||
|
actualToQuery, actualStored := categorizeIndices(storage, verifiedRoSidecars[0].BlockRoot(), []uint64{12, 13, 14})
|
||||||
|
|
||||||
|
require.Equal(t, len(expectedToQuery), len(actualToQuery))
|
||||||
|
require.Equal(t, len(expectedStored), len(actualStored))
|
||||||
|
|
||||||
|
for index := range expectedToQuery {
|
||||||
|
require.Equal(t, true, actualToQuery[index])
|
||||||
|
}
|
||||||
|
for index := range expectedStored {
|
||||||
|
require.Equal(t, true, actualStored[index])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectPeers(t *testing.T) {
|
||||||
|
const (
|
||||||
|
count = 3
|
||||||
|
seed = 46
|
||||||
|
)
|
||||||
|
|
||||||
|
params := DataColumnSidecarsParams{
|
||||||
|
Ctx: t.Context(),
|
||||||
|
RateLimiter: leakybucket.NewCollector(1., 10, time.Second, false /* deleteEmptyBuckets */),
|
||||||
|
}
|
||||||
|
|
||||||
|
randomSource := rand.NewGenerator()
|
||||||
|
|
||||||
|
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
"peer1": {
|
||||||
|
{1}: {12: true, 13: true},
|
||||||
|
{2}: {13: true, 14: true, 15: true},
|
||||||
|
{3}: {14: true, 15: true},
|
||||||
|
},
|
||||||
|
"peer2": {
|
||||||
|
{1}: {13: true, 14: true},
|
||||||
|
{2}: {13: true, 14: true, 15: true},
|
||||||
|
{3}: {14: true, 16: true},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expected_1 := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
"peer1": {
|
||||||
|
{1}: {12: true, 13: true},
|
||||||
|
{2}: {13: true, 14: true, 15: true},
|
||||||
|
{3}: {14: true, 15: true},
|
||||||
|
},
|
||||||
|
"peer2": {
|
||||||
|
{1}: {14: true},
|
||||||
|
{3}: {16: true},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
expected_2 := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
"peer1": {
|
||||||
|
{1}: {12: true},
|
||||||
|
{3}: {15: true},
|
||||||
|
},
|
||||||
|
"peer2": {
|
||||||
|
{1}: {13: true, 14: true},
|
||||||
|
{2}: {13: true, 14: true, 15: true},
|
||||||
|
{3}: {14: true, 16: true},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
actual, err := selectPeers(params, randomSource, count, indicesByRootByPeer)
|
||||||
|
|
||||||
|
expected := expected_1
|
||||||
|
if len(actual["peer1"]) == 2 {
|
||||||
|
expected = expected_2
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, len(expected), len(actual))
|
||||||
|
for peerID := range expected {
|
||||||
|
require.Equal(t, len(expected[peerID]), len(actual[peerID]))
|
||||||
|
for root := range expected[peerID] {
|
||||||
|
require.Equal(t, len(expected[peerID][root]), len(actual[peerID][root]))
|
||||||
|
for indices := range expected[peerID][root] {
|
||||||
|
require.Equal(t, expected[peerID][root][indices], actual[peerID][root][indices])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateResults(t *testing.T) {
|
||||||
|
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||||
|
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||||
|
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}}},
|
||||||
|
{Slot: 2, Index: 13, Column: [][]byte{{1}, {2}, {3}}},
|
||||||
|
{Slot: 2, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
|
||||||
|
})
|
||||||
|
|
||||||
|
missingIndicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
verifiedSidecars[0].BlockRoot(): {12: true, 13: true},
|
||||||
|
verifiedSidecars[2].BlockRoot(): {13: true, 14: true, 15: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedMissingIndicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
verifiedSidecars[2].BlockRoot(): {15: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedVerifiedSidecarsByRoot := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||||
|
verifiedSidecars[0].BlockRoot(): {verifiedSidecars[0], verifiedSidecars[1]},
|
||||||
|
verifiedSidecars[2].BlockRoot(): {verifiedSidecars[2], verifiedSidecars[3]},
|
||||||
|
}
|
||||||
|
|
||||||
|
actualMissingIndicesByRoot, actualVerifiedSidecarsByRoot := updateResults(verifiedSidecars, missingIndicesByRoot)
|
||||||
|
require.DeepEqual(t, expectedMissingIndicesByRoot, actualMissingIndicesByRoot)
|
||||||
|
require.DeepEqual(t, expectedVerifiedSidecarsByRoot, actualVerifiedSidecarsByRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetchDataColumnSidecarsFromPeers(t *testing.T) {
|
||||||
|
const count = 4
|
||||||
|
|
||||||
|
params.SetupTestConfigCleanup(t)
|
||||||
|
cfg := params.BeaconConfig().Copy()
|
||||||
|
cfg.FuluForkEpoch = 0
|
||||||
|
params.OverrideBeaconConfig(cfg)
|
||||||
|
|
||||||
|
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||||
|
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||||
|
for range count {
|
||||||
|
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedResponseSidecarPb := ðpb.DataColumnSidecar{
|
||||||
|
Index: 2,
|
||||||
|
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||||
|
Header: ðpb.BeaconBlockHeader{
|
||||||
|
Slot: 1,
|
||||||
|
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||||
|
StateRoot: make([]byte, fieldparams.RootLength),
|
||||||
|
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||||
|
},
|
||||||
|
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||||
|
},
|
||||||
|
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedResponseSidecar, err := blocks.NewRODataColumn(expectedResponseSidecarPb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||||
|
{1}: 1,
|
||||||
|
{3}: 3,
|
||||||
|
{4}: 4,
|
||||||
|
{7}: 7,
|
||||||
|
}
|
||||||
|
|
||||||
|
slotsWithCommitments := map[primitives.Slot]bool{
|
||||||
|
1: true,
|
||||||
|
3: true,
|
||||||
|
4: true,
|
||||||
|
7: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||||
|
StartSlot: 1,
|
||||||
|
Count: 7,
|
||||||
|
Columns: []uint64{1, 2},
|
||||||
|
}
|
||||||
|
|
||||||
|
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||||
|
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
|
||||||
|
p2p.Connect(other)
|
||||||
|
|
||||||
|
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||||
|
receivedRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||||
|
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.DeepEqual(t, expectedRequest, receivedRequest)
|
||||||
|
|
||||||
|
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponseSidecarPb)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = stream.CloseWrite()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
other.PeerID(): {
|
||||||
|
{1}: {1: true, 2: true},
|
||||||
|
{3}: {1: true, 2: true},
|
||||||
|
{4}: {1: true, 2: true},
|
||||||
|
{7}: {1: true, 2: true},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
params := DataColumnSidecarsParams{
|
||||||
|
Ctx: t.Context(),
|
||||||
|
Tor: clock,
|
||||||
|
P2P: p2p,
|
||||||
|
CtxMap: ctxMap,
|
||||||
|
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedResponse := map[peer.ID][]blocks.RODataColumn{
|
||||||
|
other.PeerID(): {expectedResponseSidecar},
|
||||||
|
}
|
||||||
|
|
||||||
|
actualResponse := fetchDataColumnSidecarsFromPeers(params, slotByRoot, slotsWithCommitments, indicesByRootByPeer)
|
||||||
|
require.Equal(t, len(expectedResponse), len(actualResponse))
|
||||||
|
|
||||||
|
for peerID := range expectedResponse {
|
||||||
|
require.DeepSSZEqual(t, expectedResponse[peerID], actualResponse[peerID])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSendDataColumnSidecarsRequest(t *testing.T) {
|
||||||
|
const count = 4
|
||||||
|
|
||||||
|
params.SetupTestConfigCleanup(t)
|
||||||
|
cfg := params.BeaconConfig().Copy()
|
||||||
|
cfg.FuluForkEpoch = 0
|
||||||
|
params.OverrideBeaconConfig(cfg)
|
||||||
|
|
||||||
|
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||||
|
for range count {
|
||||||
|
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedResponsePb := ðpb.DataColumnSidecar{
|
||||||
|
Index: 2,
|
||||||
|
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||||
|
Header: ðpb.BeaconBlockHeader{
|
||||||
|
Slot: 1,
|
||||||
|
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||||
|
StateRoot: make([]byte, fieldparams.RootLength),
|
||||||
|
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||||
|
},
|
||||||
|
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||||
|
},
|
||||||
|
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedResponse, err := blocks.NewRODataColumn(expectedResponsePb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||||
|
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("contiguous", func(t *testing.T) {
|
||||||
|
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
{1}: {1: true, 2: true},
|
||||||
|
{3}: {1: true, 2: true},
|
||||||
|
{4}: {1: true, 2: true},
|
||||||
|
{7}: {1: true, 2: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||||
|
{1}: 1,
|
||||||
|
{3}: 3,
|
||||||
|
{4}: 4,
|
||||||
|
{7}: 7,
|
||||||
|
}
|
||||||
|
|
||||||
|
slotsWithCommitments := map[primitives.Slot]bool{
|
||||||
|
1: true,
|
||||||
|
3: true,
|
||||||
|
4: true,
|
||||||
|
7: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||||
|
StartSlot: 1,
|
||||||
|
Count: 7,
|
||||||
|
Columns: []uint64{1, 2},
|
||||||
|
}
|
||||||
|
|
||||||
|
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||||
|
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
|
||||||
|
p2p.Connect(other)
|
||||||
|
|
||||||
|
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||||
|
receivedRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||||
|
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.DeepEqual(t, expectedRequest, receivedRequest)
|
||||||
|
|
||||||
|
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponsePb)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = stream.CloseWrite()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
params := DataColumnSidecarsParams{
|
||||||
|
Ctx: t.Context(),
|
||||||
|
Tor: clock,
|
||||||
|
P2P: p2p,
|
||||||
|
CtxMap: ctxMap,
|
||||||
|
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
|
||||||
|
}
|
||||||
|
|
||||||
|
actualResponse, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, other.PeerID(), indicesByRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, expectedResponse, actualResponse[0])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("non contiguous", func(t *testing.T) {
|
||||||
|
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
expectedResponse.BlockRoot(): {1: true, 2: true},
|
||||||
|
{4}: {1: true, 2: true},
|
||||||
|
{7}: {1: true, 2: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||||
|
expectedResponse.BlockRoot(): 1,
|
||||||
|
{4}: 4,
|
||||||
|
{7}: 7,
|
||||||
|
}
|
||||||
|
|
||||||
|
slotsWithCommitments := map[primitives.Slot]bool{
|
||||||
|
1: true,
|
||||||
|
3: true,
|
||||||
|
4: true,
|
||||||
|
7: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
roots := [...][fieldparams.RootLength]byte{expectedResponse.BlockRoot(), {4}, {7}}
|
||||||
|
|
||||||
|
expectedRequest := &p2ptypes.DataColumnsByRootIdentifiers{
|
||||||
|
{
|
||||||
|
BlockRoot: roots[1][:],
|
||||||
|
Columns: []uint64{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockRoot: roots[2][:],
|
||||||
|
Columns: []uint64{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockRoot: roots[0][:],
|
||||||
|
Columns: []uint64{1, 2},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||||
|
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
|
||||||
|
p2p.Connect(other)
|
||||||
|
|
||||||
|
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||||
|
receivedRequest := new(p2ptypes.DataColumnsByRootIdentifiers)
|
||||||
|
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.DeepSSZEqual(t, *expectedRequest, *receivedRequest)
|
||||||
|
|
||||||
|
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponsePb)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = stream.CloseWrite()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
params := DataColumnSidecarsParams{
|
||||||
|
Ctx: t.Context(),
|
||||||
|
Tor: clock,
|
||||||
|
P2P: p2p,
|
||||||
|
CtxMap: ctxMap,
|
||||||
|
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
|
||||||
|
}
|
||||||
|
|
||||||
|
actualResponse, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, other.PeerID(), indicesByRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, expectedResponse, actualResponse[0])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildByRangeRequests(t *testing.T) {
|
||||||
|
const nullBatchSize = 0
|
||||||
|
|
||||||
|
t.Run("empty", func(t *testing.T) {
|
||||||
|
actual, err := buildByRangeRequests(nil, nil, nil, nullBatchSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 0, len(actual))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("missing Root", func(t *testing.T) {
|
||||||
|
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
{1}: {1: true, 2: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := buildByRangeRequests(nil, nil, indicesByRoot, nullBatchSize)
|
||||||
|
require.NotNil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("indices differ", func(t *testing.T) {
|
||||||
|
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
{1}: {1: true, 2: true},
|
||||||
|
{2}: {1: true, 2: true},
|
||||||
|
{3}: {2: true, 3: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||||
|
{1}: 1,
|
||||||
|
{2}: 2,
|
||||||
|
{3}: 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
actual, err := buildByRangeRequests(slotByRoot, nil, indicesByRoot, nullBatchSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, len(actual))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("slots non contiguous", func(t *testing.T) {
|
||||||
|
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
{1}: {1: true, 2: true},
|
||||||
|
{2}: {1: true, 2: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||||
|
{1}: 1,
|
||||||
|
{2}: 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
slotsWithCommitments := map[primitives.Slot]bool{
|
||||||
|
1: true,
|
||||||
|
2: true,
|
||||||
|
3: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, nullBatchSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, len(actual))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nominal", func(t *testing.T) {
|
||||||
|
const batchSize = 3
|
||||||
|
|
||||||
|
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
{1}: {1: true, 2: true},
|
||||||
|
{3}: {1: true, 2: true},
|
||||||
|
{4}: {1: true, 2: true},
|
||||||
|
{7}: {1: true, 2: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||||
|
{1}: 1,
|
||||||
|
{3}: 3,
|
||||||
|
{4}: 4,
|
||||||
|
{7}: 7,
|
||||||
|
}
|
||||||
|
|
||||||
|
slotsWithCommitments := map[primitives.Slot]bool{
|
||||||
|
1: true,
|
||||||
|
3: true,
|
||||||
|
4: true,
|
||||||
|
7: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := []*ethpb.DataColumnSidecarsByRangeRequest{
|
||||||
|
{
|
||||||
|
StartSlot: 1,
|
||||||
|
Count: 3,
|
||||||
|
Columns: []uint64{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
StartSlot: 4,
|
||||||
|
Count: 3,
|
||||||
|
Columns: []uint64{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
StartSlot: 7,
|
||||||
|
Count: 1,
|
||||||
|
Columns: []uint64{1, 2},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, expected, actual)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildByRootRequest(t *testing.T) {
|
||||||
|
root1 := [fieldparams.RootLength]byte{1}
|
||||||
|
root2 := [fieldparams.RootLength]byte{2}
|
||||||
|
|
||||||
|
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
root1: {1: true, 2: true},
|
||||||
|
root2: {3: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := p2ptypes.DataColumnsByRootIdentifiers{
|
||||||
|
{
|
||||||
|
BlockRoot: root1[:],
|
||||||
|
Columns: []uint64{1, 2},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BlockRoot: root2[:],
|
||||||
|
Columns: []uint64{3},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := buildByRootRequest(input)
|
||||||
|
require.DeepSSZEqual(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifyDataColumnSidecarsByPeer(t *testing.T) {
|
||||||
|
err := kzg.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("nominal", func(t *testing.T) {
|
||||||
|
const (
|
||||||
|
start, stop = 0, 15
|
||||||
|
blobCount = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
p2p := testp2p.NewTestP2P(t)
|
||||||
|
|
||||||
|
// Setup test data and expectations
|
||||||
|
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||||
|
|
||||||
|
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
|
||||||
|
"peer1": roDataColumnSidecars[start:5],
|
||||||
|
"peer2": roDataColumnSidecars[5:9],
|
||||||
|
"peer3": roDataColumnSidecars[9:stop],
|
||||||
|
}
|
||||||
|
gs := startup.NewClockSynchronizer()
|
||||||
|
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||||
|
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||||
|
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, stop-start, len(actual))
|
||||||
|
|
||||||
|
for i := range actual {
|
||||||
|
actualSidecar := actual[i]
|
||||||
|
index := actualSidecar.Index
|
||||||
|
expectedSidecar := expected[index]
|
||||||
|
require.DeepEqual(t, expectedSidecar, actualSidecar)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("one rogue peer", func(t *testing.T) {
|
||||||
|
const (
|
||||||
|
start, middle, stop = 0, 5, 15
|
||||||
|
blobCount = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
p2p := testp2p.NewTestP2P(t)
|
||||||
|
|
||||||
|
// Setup test data and expectations
|
||||||
|
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||||
|
|
||||||
|
// Modify one sidecar to ensure proof verification fails.
|
||||||
|
if roDataColumnSidecars[middle].KzgProofs[0][0] == 0 {
|
||||||
|
roDataColumnSidecars[middle].KzgProofs[0][0]++
|
||||||
|
} else {
|
||||||
|
roDataColumnSidecars[middle].KzgProofs[0][0]--
|
||||||
|
}
|
||||||
|
|
||||||
|
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
|
||||||
|
"peer1": roDataColumnSidecars[start:middle],
|
||||||
|
"peer2": roDataColumnSidecars[5:middle],
|
||||||
|
"peer3": roDataColumnSidecars[middle:stop],
|
||||||
|
}
|
||||||
|
gs := startup.NewClockSynchronizer()
|
||||||
|
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||||
|
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||||
|
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, middle-start, len(actual))
|
||||||
|
|
||||||
|
for i := range actual {
|
||||||
|
actualSidecar := actual[i]
|
||||||
|
index := actualSidecar.Index
|
||||||
|
expectedSidecar := expected[index]
|
||||||
|
require.DeepEqual(t, expectedSidecar, actualSidecar)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeIndicesByRootByPeer(t *testing.T) {
|
||||||
|
peerIdStrs := []string{
|
||||||
|
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq", // Custodies 89, 94, 97 & 122
|
||||||
|
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf", // Custodies 1, 11, 37 & 86
|
||||||
|
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8", // Custodies 2, 37, 38 & 68
|
||||||
|
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx", // Custodies 10, 29, 36 & 108
|
||||||
|
}
|
||||||
|
|
||||||
|
headSlotByPeer := map[string]primitives.Slot{
|
||||||
|
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq": 89,
|
||||||
|
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf": 10,
|
||||||
|
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8": 12,
|
||||||
|
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx": 9,
|
||||||
|
}
|
||||||
|
|
||||||
|
p2p := testp2p.NewTestP2P(t)
|
||||||
|
peers := p2p.Peers()
|
||||||
|
|
||||||
|
peerIDs := make([]peer.ID, 0, len(peerIdStrs))
|
||||||
|
for _, peerIdStr := range peerIdStrs {
|
||||||
|
peerID, err := peer.Decode(peerIdStr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
peers.SetChainState(peerID, ðpb.StatusV2{
|
||||||
|
HeadSlot: headSlotByPeer[peerIdStr],
|
||||||
|
})
|
||||||
|
|
||||||
|
peerIDs = append(peerIDs, peerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
slotByBlockRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||||
|
[fieldparams.RootLength]byte{1}: 8,
|
||||||
|
[fieldparams.RootLength]byte{2}: 10,
|
||||||
|
[fieldparams.RootLength]byte{3}: 9,
|
||||||
|
[fieldparams.RootLength]byte{4}: 50,
|
||||||
|
}
|
||||||
|
|
||||||
|
indicesByBlockRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
[fieldparams.RootLength]byte{1}: {3: true, 4: true, 5: true},
|
||||||
|
[fieldparams.RootLength]byte{2}: {1: true, 10: true, 37: true, 80: true},
|
||||||
|
[fieldparams.RootLength]byte{3}: {10: true, 38: true, 39: true, 40: true},
|
||||||
|
[fieldparams.RootLength]byte{4}: {89: true, 108: true, 122: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
peerIDs[0]: {
|
||||||
|
[fieldparams.RootLength]byte{4}: {89: true, 122: true},
|
||||||
|
},
|
||||||
|
peerIDs[1]: {
|
||||||
|
[fieldparams.RootLength]byte{2}: {1: true, 37: true},
|
||||||
|
},
|
||||||
|
peerIDs[2]: {
|
||||||
|
[fieldparams.RootLength]byte{2}: {37: true},
|
||||||
|
[fieldparams.RootLength]byte{3}: {38: true},
|
||||||
|
},
|
||||||
|
peerIDs[3]: {
|
||||||
|
[fieldparams.RootLength]byte{3}: {10: true},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
peerIDsMap := make(map[peer.ID]bool, len(peerIDs))
|
||||||
|
for _, id := range peerIDs {
|
||||||
|
peerIDsMap[id] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
actual, err := computeIndicesByRootByPeer(p2p, slotByBlockRoot, indicesByBlockRoot, peerIDsMap)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, len(expected), len(actual))
|
||||||
|
|
||||||
|
for peer, indicesByRoot := range expected {
|
||||||
|
require.Equal(t, len(indicesByRoot), len(actual[peer]))
|
||||||
|
for root, indices := range indicesByRoot {
|
||||||
|
require.Equal(t, len(indices), len(actual[peer][root]))
|
||||||
|
for index := range indices {
|
||||||
|
require.Equal(t, actual[peer][root][index], true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomPeer(t *testing.T) {
|
||||||
|
// Fixed seed.
|
||||||
|
const seed = 42
|
||||||
|
randomSource := rand.NewGenerator()
|
||||||
|
|
||||||
|
t.Run("no peers", func(t *testing.T) {
|
||||||
|
pid, err := randomPeer(t.Context(), randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, nil)
|
||||||
|
require.NotNil(t, err)
|
||||||
|
require.Equal(t, peer.ID(""), pid)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("context cancelled", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{peer.ID("peer1"): {}}
|
||||||
|
pid, err := randomPeer(ctx, randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, indicesByRootByPeer)
|
||||||
|
require.NotNil(t, err)
|
||||||
|
require.Equal(t, peer.ID(""), pid)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nominal", func(t *testing.T) {
|
||||||
|
const count = 1
|
||||||
|
collector := leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */)
|
||||||
|
peer1, peer2, peer3 := peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3")
|
||||||
|
|
||||||
|
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
peer1: {},
|
||||||
|
peer2: {},
|
||||||
|
peer3: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
pid, err := randomPeer(t.Context(), randomSource, collector, count, indicesByRootByPeer)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, true, map[peer.ID]bool{peer1: true, peer2: true, peer3: true}[pid])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopyIndicesByRootByPeer(t *testing.T) {
|
||||||
|
original := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
peer.ID("peer1"): {
|
||||||
|
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
|
||||||
|
[fieldparams.RootLength]byte{2}: {2: true},
|
||||||
|
},
|
||||||
|
peer.ID("peer2"): {
|
||||||
|
[fieldparams.RootLength]byte{1}: {1: true},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
copied := copyIndicesByRootByPeer(original)
|
||||||
|
|
||||||
|
require.Equal(t, len(original), len(copied))
|
||||||
|
for peer, indicesByRoot := range original {
|
||||||
|
require.Equal(t, len(indicesByRoot), len(copied[peer]))
|
||||||
|
for root, indices := range indicesByRoot {
|
||||||
|
require.Equal(t, len(indices), len(copied[peer][root]))
|
||||||
|
for index := range indices {
|
||||||
|
require.Equal(t, copied[peer][root][index], true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompareIndices(t *testing.T) {
|
||||||
|
left := map[uint64]bool{3: true, 5: true, 7: true}
|
||||||
|
right := map[uint64]bool{5: true}
|
||||||
|
require.Equal(t, false, compareIndices(left, right))
|
||||||
|
|
||||||
|
left = map[uint64]bool{3: true, 5: true, 7: true}
|
||||||
|
right = map[uint64]bool{3: true, 6: true, 7: true}
|
||||||
|
require.Equal(t, false, compareIndices(left, right))
|
||||||
|
|
||||||
|
left = map[uint64]bool{3: true, 5: true, 7: true}
|
||||||
|
right = map[uint64]bool{5: true, 7: true, 3: true}
|
||||||
|
require.Equal(t, true, compareIndices(left, right))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSlortedSliceFromMap(t *testing.T) {
|
||||||
|
input := map[uint64]bool{54: true, 23: true, 35: true}
|
||||||
|
expected := []uint64{23, 35, 54}
|
||||||
|
actual := sortedSliceFromMap(input)
|
||||||
|
require.DeepEqual(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeSlotByBlockRoot(t *testing.T) {
|
||||||
|
const (
|
||||||
|
count = 3
|
||||||
|
multiplier = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
roBlocks := make([]blocks.ROBlock, 0, count)
|
||||||
|
for i := range count {
|
||||||
|
signedBlock := util.NewBeaconBlock()
|
||||||
|
signedBlock.Block.Slot = primitives.Slot(i).Mul(multiplier)
|
||||||
|
roSignedBlock, err := blocks.NewSignedBeaconBlock(signedBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, [fieldparams.RootLength]byte{byte(i)})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
roBlocks = append(roBlocks, roBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||||
|
[fieldparams.RootLength]byte{0}: primitives.Slot(0),
|
||||||
|
[fieldparams.RootLength]byte{1}: primitives.Slot(10),
|
||||||
|
[fieldparams.RootLength]byte{2}: primitives.Slot(20),
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := computeSlotByBlockRoot(roBlocks)
|
||||||
|
|
||||||
|
require.Equal(t, len(expected), len(actual))
|
||||||
|
for k, v := range expected {
|
||||||
|
require.Equal(t, v, actual[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeTotalCount(t *testing.T) {
|
||||||
|
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||||
|
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
|
||||||
|
[fieldparams.RootLength]byte{2}: {2: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
const expected = 3
|
||||||
|
actual := computeTotalCount(input)
|
||||||
|
require.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
@@ -20,6 +20,7 @@ go_library(
|
|||||||
"//beacon-chain/blockchain:go_default_library",
|
"//beacon-chain/blockchain:go_default_library",
|
||||||
"//beacon-chain/core/feed/block:go_default_library",
|
"//beacon-chain/core/feed/block:go_default_library",
|
||||||
"//beacon-chain/core/feed/state:go_default_library",
|
"//beacon-chain/core/feed/state:go_default_library",
|
||||||
|
"//beacon-chain/core/peerdas:go_default_library",
|
||||||
"//beacon-chain/core/transition:go_default_library",
|
"//beacon-chain/core/transition:go_default_library",
|
||||||
"//beacon-chain/das:go_default_library",
|
"//beacon-chain/das:go_default_library",
|
||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
@@ -72,7 +73,9 @@ go_test(
|
|||||||
deps = [
|
deps = [
|
||||||
"//async/abool:go_default_library",
|
"//async/abool:go_default_library",
|
||||||
"//beacon-chain/blockchain:go_default_library",
|
"//beacon-chain/blockchain:go_default_library",
|
||||||
|
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||||
"//beacon-chain/blockchain/testing:go_default_library",
|
"//beacon-chain/blockchain/testing:go_default_library",
|
||||||
|
"//beacon-chain/core/peerdas:go_default_library",
|
||||||
"//beacon-chain/das:go_default_library",
|
"//beacon-chain/das:go_default_library",
|
||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
"//beacon-chain/db/filesystem:go_default_library",
|
"//beacon-chain/db/filesystem:go_default_library",
|
||||||
@@ -89,6 +92,7 @@ go_test(
|
|||||||
"//beacon-chain/verification:go_default_library",
|
"//beacon-chain/verification:go_default_library",
|
||||||
"//cmd/beacon-chain/flags:go_default_library",
|
"//cmd/beacon-chain/flags:go_default_library",
|
||||||
"//config/features:go_default_library",
|
"//config/features:go_default_library",
|
||||||
|
"//config/fieldparams:go_default_library",
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
"//consensus-types/blocks:go_default_library",
|
"//consensus-types/blocks:go_default_library",
|
||||||
"//consensus-types/interfaces:go_default_library",
|
"//consensus-types/interfaces:go_default_library",
|
||||||
|
|||||||
@@ -3,11 +3,13 @@ package initialsync
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||||
@@ -15,6 +17,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||||
prysmsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
prysmsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||||
@@ -34,7 +37,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
||||||
// maxPendingRequests limits how many concurrent fetch request one can initiate.
|
// maxPendingRequests limits how many concurrent fetch request one can initiate.
|
||||||
maxPendingRequests = 64
|
maxPendingRequests = 64
|
||||||
// peersPercentagePerRequest caps percentage of peers to be used in a request.
|
// peersPercentagePerRequest caps percentage of peers to be used in a request.
|
||||||
@@ -78,6 +80,8 @@ type blocksFetcherConfig struct {
|
|||||||
peerFilterCapacityWeight float64
|
peerFilterCapacityWeight float64
|
||||||
mode syncMode
|
mode syncMode
|
||||||
bs filesystem.BlobStorageSummarizer
|
bs filesystem.BlobStorageSummarizer
|
||||||
|
dcs filesystem.DataColumnStorageReader
|
||||||
|
cv verification.NewDataColumnsVerifier
|
||||||
}
|
}
|
||||||
|
|
||||||
// blocksFetcher is a service to fetch chain data from peers.
|
// blocksFetcher is a service to fetch chain data from peers.
|
||||||
@@ -94,6 +98,8 @@ type blocksFetcher struct {
|
|||||||
p2p p2p.P2P
|
p2p p2p.P2P
|
||||||
db db.ReadOnlyDatabase
|
db db.ReadOnlyDatabase
|
||||||
bs filesystem.BlobStorageSummarizer
|
bs filesystem.BlobStorageSummarizer
|
||||||
|
dcs filesystem.DataColumnStorageReader
|
||||||
|
cv verification.NewDataColumnsVerifier
|
||||||
blocksPerPeriod uint64
|
blocksPerPeriod uint64
|
||||||
rateLimiter *leakybucket.Collector
|
rateLimiter *leakybucket.Collector
|
||||||
peerLocks map[peer.ID]*peerLock
|
peerLocks map[peer.ID]*peerLock
|
||||||
@@ -124,7 +130,7 @@ type fetchRequestResponse struct {
|
|||||||
blobsFrom peer.ID
|
blobsFrom peer.ID
|
||||||
start primitives.Slot
|
start primitives.Slot
|
||||||
count uint64
|
count uint64
|
||||||
bwb []blocks.BlockWithROBlobs
|
bwb []blocks.BlockWithROSidecars
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,6 +168,8 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
|||||||
p2p: cfg.p2p,
|
p2p: cfg.p2p,
|
||||||
db: cfg.db,
|
db: cfg.db,
|
||||||
bs: cfg.bs,
|
bs: cfg.bs,
|
||||||
|
dcs: cfg.dcs,
|
||||||
|
cv: cfg.cv,
|
||||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||||
rateLimiter: rateLimiter,
|
rateLimiter: rateLimiter,
|
||||||
peerLocks: make(map[peer.ID]*peerLock),
|
peerLocks: make(map[peer.ID]*peerLock),
|
||||||
@@ -298,7 +306,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
|||||||
response := &fetchRequestResponse{
|
response := &fetchRequestResponse{
|
||||||
start: start,
|
start: start,
|
||||||
count: count,
|
count: count,
|
||||||
bwb: []blocks.BlockWithROBlobs{},
|
bwb: []blocks.BlockWithROSidecars{},
|
||||||
err: nil,
|
err: nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -317,30 +325,114 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
|||||||
if f.mode == modeStopOnFinalizedEpoch {
|
if f.mode == modeStopOnFinalizedEpoch {
|
||||||
highestFinalizedSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(targetEpoch + 1))
|
highestFinalizedSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(targetEpoch + 1))
|
||||||
if start > highestFinalizedSlot {
|
if start > highestFinalizedSlot {
|
||||||
response.err = fmt.Errorf("%w, slot: %d, highest finalized slot: %d",
|
response.err = fmt.Errorf(
|
||||||
errSlotIsTooHigh, start, highestFinalizedSlot)
|
"%w, slot: %d, highest finalized slot: %d",
|
||||||
|
errSlotIsTooHigh, start, highestFinalizedSlot,
|
||||||
|
)
|
||||||
|
|
||||||
return response
|
return response
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
response.bwb, response.blocksFrom, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
|
response.bwb, response.blocksFrom, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
|
||||||
if response.err == nil {
|
if response.err == nil {
|
||||||
pid, bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.blocksFrom, peers)
|
pid, err := f.fetchSidecars(ctx, response.blocksFrom, peers, response.bwb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Failed to fetch sidecars")
|
||||||
response.err = err
|
response.err = err
|
||||||
}
|
}
|
||||||
response.bwb = bwb
|
|
||||||
response.blobsFrom = pid
|
response.blobsFrom = pid
|
||||||
}
|
}
|
||||||
|
|
||||||
return response
|
return response
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer.
|
// fetchSidecars fetches sidecars corresponding to blocks in `response.bwb`.
|
||||||
|
// It mutates `Blobs` and `Columns` fields of `response.bwb` with fetched sidecars.
|
||||||
|
// `pid` is the initial peer to request blob from (usually the peer from which the block originated),
|
||||||
|
// `peers` is a list of peers to use for the request blobs if `pid` fails.
|
||||||
|
// `bwScs` must me sorted by slot.
|
||||||
|
// It returns the peer ID from which blobs were fetched (if any).
|
||||||
|
func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []peer.ID, bwScs []blocks.BlockWithROSidecars) (peer.ID, error) {
|
||||||
|
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||||
|
|
||||||
|
if len(bwScs) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
firstFuluIndex, err := findFirstFuluIndex(bwScs)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "find first Fulu index")
|
||||||
|
}
|
||||||
|
|
||||||
|
preFulu := bwScs[:firstFuluIndex]
|
||||||
|
postFulu := bwScs[firstFuluIndex:]
|
||||||
|
|
||||||
|
var blobsPid peer.ID
|
||||||
|
|
||||||
|
if len(preFulu) > 0 {
|
||||||
|
// Fetch blob sidecars.
|
||||||
|
blobsPid, err = f.fetchBlobsFromPeer(ctx, preFulu, pid, peers)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "fetch blobs from peer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(postFulu) == 0 {
|
||||||
|
return blobsPid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the columns to request.
|
||||||
|
custodyGroupCount, err := f.p2p.CustodyGroupCount()
|
||||||
|
if err != nil {
|
||||||
|
return blobsPid, errors.Wrap(err, "custody group count")
|
||||||
|
}
|
||||||
|
|
||||||
|
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||||
|
info, _, err := peerdas.Info(f.p2p.NodeID(), samplingSize)
|
||||||
|
if err != nil {
|
||||||
|
return blobsPid, errors.Wrap(err, "custody info")
|
||||||
|
}
|
||||||
|
|
||||||
|
params := prysmsync.DataColumnSidecarsParams{
|
||||||
|
Ctx: ctx,
|
||||||
|
Tor: f.clock,
|
||||||
|
P2P: f.p2p,
|
||||||
|
RateLimiter: f.rateLimiter,
|
||||||
|
CtxMap: f.ctxMap,
|
||||||
|
Storage: f.dcs,
|
||||||
|
NewVerifier: f.cv,
|
||||||
|
}
|
||||||
|
|
||||||
|
roBlocks := make([]blocks.ROBlock, 0, len(postFulu))
|
||||||
|
for _, block := range postFulu {
|
||||||
|
roBlocks = append(roBlocks, block.Block)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifiedRoDataColumnsByRoot, err := prysmsync.FetchDataColumnSidecars(params, roBlocks, info.CustodyColumns)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "fetch data column sidecars")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate the response.
|
||||||
|
for i := range bwScs {
|
||||||
|
bwSc := &bwScs[i]
|
||||||
|
root := bwSc.Block.Root()
|
||||||
|
if columns, ok := verifiedRoDataColumnsByRoot[root]; ok {
|
||||||
|
bwSc.Columns = columns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return blobsPid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer, sorted by slot.
|
||||||
func (f *blocksFetcher) fetchBlocksFromPeer(
|
func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
start primitives.Slot, count uint64,
|
start primitives.Slot, count uint64,
|
||||||
peers []peer.ID,
|
peers []peer.ID,
|
||||||
) ([]blocks.BlockWithROBlobs, peer.ID, error) {
|
) ([]blocks.BlockWithROSidecars, peer.ID, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlocksFromPeer")
|
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlocksFromPeer")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
@@ -355,8 +447,7 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
|||||||
// peers are dialed first.
|
// peers are dialed first.
|
||||||
peers = append(bestPeers, peers...)
|
peers = append(bestPeers, peers...)
|
||||||
peers = dedupPeers(peers)
|
peers = dedupPeers(peers)
|
||||||
for i := 0; i < len(peers); i++ {
|
for _, p := range peers {
|
||||||
p := peers[i]
|
|
||||||
blocks, err := f.requestBlocks(ctx, req, p)
|
blocks, err := f.requestBlocks(ctx, req, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("peer", p).WithError(err).Debug("Could not request blocks by range from peer")
|
log.WithField("peer", p).WithError(err).Debug("Could not request blocks by range from peer")
|
||||||
@@ -380,14 +471,14 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
|||||||
return nil, "", errNoPeersAvailable
|
return nil, "", errNoPeersAvailable
|
||||||
}
|
}
|
||||||
|
|
||||||
func sortedBlockWithVerifiedBlobSlice(bs []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROBlobs, error) {
|
func sortedBlockWithVerifiedBlobSlice(blks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROSidecars, error) {
|
||||||
rb := make([]blocks.BlockWithROBlobs, len(bs))
|
rb := make([]blocks.BlockWithROSidecars, len(blks))
|
||||||
for i, b := range bs {
|
for i, b := range blks {
|
||||||
ro, err := blocks.NewROBlock(b)
|
ro, err := blocks.NewROBlock(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rb[i] = blocks.BlockWithROBlobs{Block: ro}
|
rb[i] = blocks.BlockWithROSidecars{Block: ro}
|
||||||
}
|
}
|
||||||
sort.Sort(blocks.BlockWithROBlobsSlice(rb))
|
sort.Sort(blocks.BlockWithROBlobsSlice(rb))
|
||||||
return rb, nil
|
return rb, nil
|
||||||
@@ -403,7 +494,8 @@ type commitmentCountList []commitmentCount
|
|||||||
|
|
||||||
// countCommitments makes a list of all blocks that have commitments that need to be satisfied.
|
// countCommitments makes a list of all blocks that have commitments that need to be satisfied.
|
||||||
// This gives us a representation to finish building the request that is lightweight and readable for testing.
|
// This gives us a representation to finish building the request that is lightweight and readable for testing.
|
||||||
func countCommitments(bwb []blocks.BlockWithROBlobs, retentionStart primitives.Slot) commitmentCountList {
|
// `bwb` must be sorted by slot.
|
||||||
|
func countCommitments(bwb []blocks.BlockWithROSidecars, retentionStart primitives.Slot) commitmentCountList {
|
||||||
if len(bwb) == 0 {
|
if len(bwb) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -485,7 +577,9 @@ func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
|
|||||||
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
|
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
|
||||||
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
|
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
|
||||||
|
|
||||||
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) ([]blocks.BlockWithROBlobs, error) {
|
// verifyAndPopulateBlobs mutate the input `bwb` argument by adding verified blobs.
|
||||||
|
// This function mutates the input `bwb` argument.
|
||||||
|
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||||
blobsByRoot := make(map[[32]byte][]blocks.ROBlob)
|
blobsByRoot := make(map[[32]byte][]blocks.ROBlob)
|
||||||
for i := range blobs {
|
for i := range blobs {
|
||||||
if blobs[i].Slot() < req.StartSlot {
|
if blobs[i].Slot() < req.StartSlot {
|
||||||
@@ -495,46 +589,53 @@ func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob
|
|||||||
blobsByRoot[br] = append(blobsByRoot[br], blobs[i])
|
blobsByRoot[br] = append(blobsByRoot[br], blobs[i])
|
||||||
}
|
}
|
||||||
for i := range bwb {
|
for i := range bwb {
|
||||||
bwi, err := populateBlock(bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
err := populateBlock(&bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, errDidntPopulate) {
|
if errors.Is(err, errDidntPopulate) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return bwb, err
|
return err
|
||||||
}
|
}
|
||||||
bwb[i] = bwi
|
|
||||||
}
|
}
|
||||||
return bwb, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var errDidntPopulate = errors.New("skipping population of block")
|
var errDidntPopulate = errors.New("skipping population of block")
|
||||||
|
|
||||||
func populateBlock(bw blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks.BlockWithROBlobs, error) {
|
// populateBlock verifies and populates blobs for a block.
|
||||||
|
// This function mutates the input `bw` argument.
|
||||||
|
func populateBlock(bw *blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||||
blk := bw.Block
|
blk := bw.Block
|
||||||
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
|
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
|
||||||
return bw, errDidntPopulate
|
return errDidntPopulate
|
||||||
}
|
}
|
||||||
|
|
||||||
commits, err := blk.Block().Body().BlobKzgCommitments()
|
commits, err := blk.Block().Body().BlobKzgCommitments()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return bw, errDidntPopulate
|
return errDidntPopulate
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(commits) == 0 {
|
if len(commits) == 0 {
|
||||||
return bw, errDidntPopulate
|
return errDidntPopulate
|
||||||
}
|
}
|
||||||
|
|
||||||
// Drop blobs on the floor if we already have them.
|
// Drop blobs on the floor if we already have them.
|
||||||
if bss != nil && bss.Summary(blk.Root()).AllAvailable(len(commits)) {
|
if bss != nil && bss.Summary(blk.Root()).AllAvailable(len(commits)) {
|
||||||
return bw, errDidntPopulate
|
return errDidntPopulate
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(commits) != len(blobs) {
|
if len(commits) != len(blobs) {
|
||||||
return bw, missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
return missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||||
}
|
}
|
||||||
|
|
||||||
for ci := range commits {
|
for ci := range commits {
|
||||||
if err := verify.BlobAlignsWithBlock(blobs[ci], blk); err != nil {
|
if err := verify.BlobAlignsWithBlock(blobs[ci], blk); err != nil {
|
||||||
return bw, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bw.Blobs = blobs
|
bw.Blobs = blobs
|
||||||
return bw, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
||||||
@@ -547,29 +648,38 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
|
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
|
||||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROBlobs, pid peer.ID, peers []peer.ID) (peer.ID, []blocks.BlockWithROBlobs, error) {
|
// This function mutates the input `bwb` argument.
|
||||||
|
// `pid` is the initial peer to request blobs from (usually the peer from which the block originated),
|
||||||
|
// `peers` is a list of peers to use for the request if `pid` fails.
|
||||||
|
// `bwb` must be sorted by slot.
|
||||||
|
// It returns the peer ID from which blobs were fetched.
|
||||||
|
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROSidecars, pid peer.ID, peers []peer.ID) (peer.ID, error) {
|
||||||
|
if len(bwb) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
|
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
|
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
|
||||||
return "", bwb, nil
|
return "", nil
|
||||||
}
|
}
|
||||||
blobWindowStart, err := prysmsync.BlobRPCMinValidSlot(f.clock.CurrentSlot())
|
blobWindowStart, err := prysmsync.BlobRPCMinValidSlot(f.clock.CurrentSlot())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
// Construct request message based on observed interval of blocks in need of blobs.
|
// Construct request message based on observed interval of blocks in need of blobs.
|
||||||
req := countCommitments(bwb, blobWindowStart).blobRange(f.bs).Request()
|
req := countCommitments(bwb, blobWindowStart).blobRange(f.bs).Request()
|
||||||
if req == nil {
|
if req == nil {
|
||||||
return "", bwb, nil
|
return "", nil
|
||||||
}
|
}
|
||||||
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
|
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
|
||||||
// We dial the initial peer first to ensure that we get the desired set of blobs.
|
// We dial the initial peer first to ensure that we get the desired set of blobs.
|
||||||
wantedPeers := append([]peer.ID{pid}, peers...)
|
peers = append([]peer.ID{pid}, peers...)
|
||||||
bestPeers := f.hasSufficientBandwidth(wantedPeers, req.Count)
|
peers = f.hasSufficientBandwidth(peers, req.Count)
|
||||||
// We append the best peers to the front so that higher capacity
|
// We append the best peers to the front so that higher capacity
|
||||||
// peers are dialed first. If all of them fail, we fallback to the
|
// peers are dialed first. If all of them fail, we fallback to the
|
||||||
// initial peer we wanted to request blobs from.
|
// initial peer we wanted to request blobs from.
|
||||||
peers = append(bestPeers, pid)
|
peers = append(peers, pid)
|
||||||
for i := 0; i < len(peers); i++ {
|
for i := 0; i < len(peers); i++ {
|
||||||
p := peers[i]
|
p := peers[i]
|
||||||
blobs, err := f.requestBlobs(ctx, req, p)
|
blobs, err := f.requestBlobs(ctx, req, p)
|
||||||
@@ -578,14 +688,24 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.Blo
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||||
robs, err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs)
|
if err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs); err != nil {
|
||||||
if err != nil {
|
|
||||||
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
|
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return p, robs, err
|
return p, err
|
||||||
}
|
}
|
||||||
return "", nil, errNoPeersAvailable
|
return "", errNoPeersAvailable
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortedSliceFromMap returns a sorted slice of keys from a map.
|
||||||
|
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||||
|
result := make([]uint64, 0, len(m))
|
||||||
|
for k := range m {
|
||||||
|
result = append(result, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(result)
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
||||||
@@ -642,6 +762,7 @@ func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecar
|
|||||||
}
|
}
|
||||||
f.rateLimiter.Add(pid.String(), int64(req.Count))
|
f.rateLimiter.Add(pid.String(), int64(req.Count))
|
||||||
l.Unlock()
|
l.Unlock()
|
||||||
|
|
||||||
return prysmsync.SendBlobsByRangeRequest(ctx, f.clock, f.p2p, pid, f.ctxMap, req)
|
return prysmsync.SendBlobsByRangeRequest(ctx, f.clock, f.p2p, pid, f.ctxMap, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -699,13 +820,17 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *blocksFetcher) hasSufficientBandwidth(peers []peer.ID, count uint64) []peer.ID {
|
func (f *blocksFetcher) hasSufficientBandwidth(peers []peer.ID, count uint64) []peer.ID {
|
||||||
filteredPeers := []peer.ID{}
|
filteredPeers := make([]peer.ID, 0, len(peers))
|
||||||
for _, p := range peers {
|
|
||||||
if uint64(f.rateLimiter.Remaining(p.String())) < count {
|
for _, peer := range peers {
|
||||||
|
remaining := uint64(0)
|
||||||
|
if remainingInt := f.rateLimiter.Remaining(peer.String()); remainingInt > 0 {
|
||||||
|
remaining = uint64(remainingInt)
|
||||||
|
}
|
||||||
|
if remaining < count {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
copiedP := p
|
filteredPeers = append(filteredPeers, peer)
|
||||||
filteredPeers = append(filteredPeers, copiedP)
|
|
||||||
}
|
}
|
||||||
return filteredPeers
|
return filteredPeers
|
||||||
}
|
}
|
||||||
@@ -745,3 +870,23 @@ func dedupPeers(peers []peer.ID) []peer.ID {
|
|||||||
}
|
}
|
||||||
return newPeerList
|
return newPeerList
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// findFirstFuluIndex returns the index of the first block with a version >= Fulu.
|
||||||
|
// It returns an error if blocks are not correctly sorted by version regarding Fulu.
|
||||||
|
func findFirstFuluIndex(bwScs []blocks.BlockWithROSidecars) (int, error) {
|
||||||
|
firstFuluIndex := len(bwScs)
|
||||||
|
|
||||||
|
for i, bwSc := range bwScs {
|
||||||
|
blockVersion := bwSc.Block.Version()
|
||||||
|
if blockVersion >= version.Fulu && firstFuluIndex > i {
|
||||||
|
firstFuluIndex = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if blockVersion < version.Fulu && firstFuluIndex <= i {
|
||||||
|
return 0, errors.New("blocks are not sorted by version")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return firstFuluIndex, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,11 +12,12 @@ import (
|
|||||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||||
p2pm "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||||
beaconsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
beaconsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||||
@@ -266,7 +267,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
|||||||
|
|
||||||
beaconDB := dbtest.SetupDB(t)
|
beaconDB := dbtest.SetupDB(t)
|
||||||
|
|
||||||
p := p2pt.NewTestP2P(t)
|
p := p2ptest.NewTestP2P(t)
|
||||||
connectPeers(t, p, tt.peers, p.Peers())
|
connectPeers(t, p, tt.peers, p.Peers())
|
||||||
cache.RLock()
|
cache.RLock()
|
||||||
genesisRoot := cache.rootCache[0]
|
genesisRoot := cache.rootCache[0]
|
||||||
@@ -307,9 +308,9 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
|||||||
fetcher.stop()
|
fetcher.stop()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
processFetchedBlocks := func() ([]blocks.BlockWithROBlobs, error) {
|
processFetchedBlocks := func() ([]blocks.BlockWithROSidecars, error) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
var unionRespBlocks []blocks.BlockWithROBlobs
|
var unionRespBlocks []blocks.BlockWithROSidecars
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -398,6 +399,7 @@ func TestBlocksFetcher_scheduleRequest(t *testing.T) {
|
|||||||
fetcher.scheduleRequest(t.Context(), 1, blockBatchLimit))
|
fetcher.scheduleRequest(t.Context(), 1, blockBatchLimit))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlocksFetcher_handleRequest(t *testing.T) {
|
func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||||
blockBatchLimit := flags.Get().BlockBatchLimit
|
blockBatchLimit := flags.Get().BlockBatchLimit
|
||||||
chainConfig := struct {
|
chainConfig := struct {
|
||||||
@@ -455,7 +457,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var bwb []blocks.BlockWithROBlobs
|
var bwb []blocks.BlockWithROSidecars
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Error(ctx.Err())
|
t.Error(ctx.Err())
|
||||||
@@ -531,9 +533,9 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||||
p1 := p2pt.NewTestP2P(t)
|
p1 := p2ptest.NewTestP2P(t)
|
||||||
p2 := p2pt.NewTestP2P(t)
|
p2 := p2ptest.NewTestP2P(t)
|
||||||
p3 := p2pt.NewTestP2P(t)
|
p3 := p2ptest.NewTestP2P(t)
|
||||||
p1.Connect(p2)
|
p1.Connect(p2)
|
||||||
p1.Connect(p3)
|
p1.Connect(p3)
|
||||||
require.Equal(t, 2, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
require.Equal(t, 2, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||||
@@ -543,7 +545,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
|||||||
Count: 64,
|
Count: 64,
|
||||||
}
|
}
|
||||||
|
|
||||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
topic := p2p.RPCBlocksByRangeTopicV1
|
||||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||||
streamHandlerFn := func(stream network.Stream) {
|
streamHandlerFn := func(stream network.Stream) {
|
||||||
assert.NoError(t, stream.Close())
|
assert.NoError(t, stream.Close())
|
||||||
@@ -602,15 +604,15 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||||
p1 := p2pt.NewTestP2P(t)
|
p1 := p2ptest.NewTestP2P(t)
|
||||||
p2 := p2pt.NewTestP2P(t)
|
p2 := p2ptest.NewTestP2P(t)
|
||||||
p1.Connect(p2)
|
p1.Connect(p2)
|
||||||
require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||||
req := ðpb.BeaconBlocksByRangeRequest{
|
req := ðpb.BeaconBlocksByRangeRequest{
|
||||||
Count: 64,
|
Count: 64,
|
||||||
}
|
}
|
||||||
|
|
||||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
topic := p2p.RPCBlocksByRangeTopicV1
|
||||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||||
streamHandlerFn := func(stream network.Stream) {
|
streamHandlerFn := func(stream network.Stream) {
|
||||||
assert.NoError(t, stream.Close())
|
assert.NoError(t, stream.Close())
|
||||||
@@ -638,7 +640,7 @@ func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) {
|
func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) {
|
||||||
p1 := p2pt.NewTestP2P(t)
|
p1 := p2ptest.NewTestP2P(t)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
req *ethpb.BeaconBlocksByRangeRequest
|
req *ethpb.BeaconBlocksByRangeRequest
|
||||||
@@ -883,7 +885,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
topic := p2p.RPCBlocksByRangeTopicV1
|
||||||
protocol := libp2pcore.ProtocolID(topic + p1.Encoding().ProtocolSuffix())
|
protocol := libp2pcore.ProtocolID(topic + p1.Encoding().ProtocolSuffix())
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(t.Context())
|
ctx, cancel := context.WithCancel(t.Context())
|
||||||
@@ -893,7 +895,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
p2 := p2pt.NewTestP2P(t)
|
p2 := p2ptest.NewTestP2P(t)
|
||||||
p1.Connect(p2)
|
p1.Connect(p2)
|
||||||
|
|
||||||
p2.BHost.SetStreamHandler(protocol, tt.handlerGenFn(tt.req))
|
p2.BHost.SetStreamHandler(protocol, tt.handlerGenFn(tt.req))
|
||||||
@@ -993,7 +995,7 @@ func TestBlobRangeForBlocks(t *testing.T) {
|
|||||||
func TestBlobRequest(t *testing.T) {
|
func TestBlobRequest(t *testing.T) {
|
||||||
var nilReq *ethpb.BlobSidecarsByRangeRequest
|
var nilReq *ethpb.BlobSidecarsByRangeRequest
|
||||||
// no blocks
|
// no blocks
|
||||||
req := countCommitments([]blocks.BlockWithROBlobs{}, 0).blobRange(nil).Request()
|
req := countCommitments([]blocks.BlockWithROSidecars{}, 0).blobRange(nil).Request()
|
||||||
require.Equal(t, nilReq, req)
|
require.Equal(t, nilReq, req)
|
||||||
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||||
@@ -1026,22 +1028,16 @@ func TestBlobRequest(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCountCommitments(t *testing.T) {
|
func TestCountCommitments(t *testing.T) {
|
||||||
// no blocks
|
|
||||||
// blocks before retention start filtered
|
|
||||||
// blocks without commitments filtered
|
|
||||||
// pre-deneb filtered
|
|
||||||
// variety of commitment counts are accurate, from 1 to max
|
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
name string
|
name string
|
||||||
bwb func(t *testing.T, c testcase) []blocks.BlockWithROBlobs
|
bwb func(t *testing.T, c testcase) []blocks.BlockWithROSidecars
|
||||||
numBlocks int
|
retStart primitives.Slot
|
||||||
retStart primitives.Slot
|
resCount int
|
||||||
resCount int
|
|
||||||
}
|
}
|
||||||
cases := []testcase{
|
cases := []testcase{
|
||||||
{
|
{
|
||||||
name: "nil blocks is safe",
|
name: "nil blocks is safe",
|
||||||
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROBlobs {
|
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROSidecars {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
retStart: 0,
|
retStart: 0,
|
||||||
@@ -1179,7 +1175,7 @@ func TestCommitmentCountList(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROBlobs, []blocks.ROBlob) {
|
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROSidecars, []blocks.ROBlob) {
|
||||||
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
|
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
|
||||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||||
for i := range blks {
|
for i := range blks {
|
||||||
@@ -1190,7 +1186,7 @@ func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROB
|
|||||||
return bwb, blobs
|
return bwb, blobs
|
||||||
}
|
}
|
||||||
|
|
||||||
func testReqFromResp(bwb []blocks.BlockWithROBlobs) *ethpb.BlobSidecarsByRangeRequest {
|
func testReqFromResp(bwb []blocks.BlockWithROSidecars) *ethpb.BlobSidecarsByRangeRequest {
|
||||||
return ðpb.BlobSidecarsByRangeRequest{
|
return ðpb.BlobSidecarsByRangeRequest{
|
||||||
StartSlot: bwb[0].Block.Block().Slot(),
|
StartSlot: bwb[0].Block.Block().Slot(),
|
||||||
Count: uint64(bwb[len(bwb)-1].Block.Block().Slot()-bwb[0].Block.Block().Slot()) + 1,
|
Count: uint64(bwb[len(bwb)-1].Block.Block().Slot()-bwb[0].Block.Block().Slot()) + 1,
|
||||||
@@ -1207,7 +1203,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
require.Equal(t, len(blobs), len(expectedCommits))
|
require.Equal(t, len(blobs), len(expectedCommits))
|
||||||
|
|
||||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, bw := range bwb {
|
for _, bw := range bwb {
|
||||||
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
|
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
|
||||||
@@ -1228,7 +1224,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
|||||||
})
|
})
|
||||||
t.Run("missing blobs", func(t *testing.T) {
|
t.Run("missing blobs", func(t *testing.T) {
|
||||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||||
_, err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
|
err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
|
||||||
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
|
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
|
||||||
})
|
})
|
||||||
t.Run("no blobs for last block", func(t *testing.T) {
|
t.Run("no blobs for last block", func(t *testing.T) {
|
||||||
@@ -1240,7 +1236,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
|||||||
blobs = blobs[0 : len(blobs)-len(cmts)]
|
blobs = blobs[0 : len(blobs)-len(cmts)]
|
||||||
lastBlk, _ = util.GenerateTestDenebBlockWithSidecar(t, lastBlk.Block().ParentRoot(), lastBlk.Block().Slot(), 0)
|
lastBlk, _ = util.GenerateTestDenebBlockWithSidecar(t, lastBlk.Block().ParentRoot(), lastBlk.Block().Slot(), 0)
|
||||||
bwb[lastIdx].Block = lastBlk
|
bwb[lastIdx].Block = lastBlk
|
||||||
_, err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
})
|
})
|
||||||
t.Run("blobs not copied if all locally available", func(t *testing.T) {
|
t.Run("blobs not copied if all locally available", func(t *testing.T) {
|
||||||
@@ -1254,7 +1250,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
|||||||
r7: {0, 1, 2, 3, 4, 5},
|
r7: {0, 1, 2, 3, 4, 5},
|
||||||
}
|
}
|
||||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 6, len(bwb[i1].Blobs))
|
require.Equal(t, 6, len(bwb[i1].Blobs))
|
||||||
require.Equal(t, 0, len(bwb[i7].Blobs))
|
require.Equal(t, 0, len(bwb[i7].Blobs))
|
||||||
@@ -1302,3 +1298,203 @@ func TestBlockFetcher_HasSufficientBandwidth(t *testing.T) {
|
|||||||
}
|
}
|
||||||
assert.Equal(t, 2, len(receivedPeers))
|
assert.Equal(t, 2, len(receivedPeers))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSortedSliceFromMap(t *testing.T) {
|
||||||
|
m := map[uint64]bool{1: true, 3: true, 2: true, 4: true}
|
||||||
|
expected := []uint64{1, 2, 3, 4}
|
||||||
|
|
||||||
|
actual := sortedSliceFromMap(m)
|
||||||
|
require.DeepSSZEqual(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetchSidecars(t *testing.T) {
|
||||||
|
ctx := t.Context()
|
||||||
|
t.Run("No blocks", func(t *testing.T) {
|
||||||
|
fetcher := new(blocksFetcher)
|
||||||
|
|
||||||
|
pid, err := fetcher.fetchSidecars(ctx, "", nil, []blocks.BlockWithROSidecars{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, peer.ID(""), pid)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Nominal", func(t *testing.T) {
|
||||||
|
beaconConfig := params.BeaconConfig()
|
||||||
|
numberOfColumns := beaconConfig.NumberOfColumns
|
||||||
|
samplesPerSlot := beaconConfig.SamplesPerSlot
|
||||||
|
|
||||||
|
// Define "now" to be one epoch after genesis time + retention period.
|
||||||
|
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
|
||||||
|
secondsPerSlot := beaconConfig.SecondsPerSlot
|
||||||
|
slotsPerEpoch := beaconConfig.SlotsPerEpoch
|
||||||
|
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
|
||||||
|
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||||
|
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
|
||||||
|
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||||
|
|
||||||
|
genesisValidatorRoot := [fieldparams.RootLength]byte{}
|
||||||
|
nower := func() time.Time { return now }
|
||||||
|
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||||
|
|
||||||
|
// Define a Deneb block with blobs out of retention period.
|
||||||
|
denebBlock := util.NewBeaconBlockDeneb()
|
||||||
|
denebBlock.Block.Slot = 0 // Genesis slot, out of retention period.
|
||||||
|
signedDenebBlock, err := blocks.NewSignedBeaconBlock(denebBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roDebebBlock, err := blocks.NewROBlock(signedDenebBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Define a Fulu block with blobs in the retention period.
|
||||||
|
fuluBlock := util.NewBeaconBlockFulu()
|
||||||
|
fuluBlock.Block.Slot = slotsPerEpoch // Within retention period.
|
||||||
|
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||||
|
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bodyRoot, err := fuluBlock.Block.Body.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create and save data column sidecars for this fulu block in the database.
|
||||||
|
params := make([]util.DataColumnParam, 0, numberOfColumns)
|
||||||
|
for i := range numberOfColumns {
|
||||||
|
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRoot[:]}
|
||||||
|
params = append(params, param)
|
||||||
|
}
|
||||||
|
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||||
|
|
||||||
|
// Create a data columns storage.
|
||||||
|
dir := t.TempDir()
|
||||||
|
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Save the data column sidecars to the storage.
|
||||||
|
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a blocks fetcher.
|
||||||
|
fetcher := &blocksFetcher{
|
||||||
|
clock: clock,
|
||||||
|
p2p: p2ptest.NewTestP2P(t),
|
||||||
|
dcs: dataColumnStorage,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch sidecars.
|
||||||
|
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||||
|
{Block: roDebebBlock},
|
||||||
|
{Block: roFuluBlock},
|
||||||
|
}
|
||||||
|
pid, err := fetcher.fetchSidecars(ctx, "", nil, blocksWithSidecars)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, peer.ID(""), pid)
|
||||||
|
|
||||||
|
// Verify that block with sidecars were modified correctly.
|
||||||
|
require.Equal(t, 0, len(blocksWithSidecars[0].Blobs))
|
||||||
|
require.Equal(t, 0, len(blocksWithSidecars[0].Columns))
|
||||||
|
require.Equal(t, 0, len(blocksWithSidecars[1].Blobs))
|
||||||
|
|
||||||
|
// We don't check the content of the columns here. The extensive test is done
|
||||||
|
// in TestFetchDataColumnsSidecars.
|
||||||
|
require.Equal(t, samplesPerSlot, uint64(len(blocksWithSidecars[1].Columns)))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func TestFirstFuluIndex(t *testing.T) {
|
||||||
|
bellatrix := util.NewBeaconBlockBellatrix()
|
||||||
|
signedBellatrix, err := blocks.NewSignedBeaconBlock(bellatrix)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roBellatrix, err := blocks.NewROBlock(signedBellatrix)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
capella := util.NewBeaconBlockCapella()
|
||||||
|
signedCapella, err := blocks.NewSignedBeaconBlock(capella)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roCapella, err := blocks.NewROBlock(signedCapella)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
deneb := util.NewBeaconBlockDeneb()
|
||||||
|
signedDeneb, err := blocks.NewSignedBeaconBlock(deneb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roDeneb, err := blocks.NewROBlock(signedDeneb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fulu := util.NewBeaconBlockFulu()
|
||||||
|
signedFulu, err := blocks.NewSignedBeaconBlock(fulu)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roFulu, err := blocks.NewROBlock(signedFulu)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
setupBlocks func(t *testing.T) []blocks.BlockWithROSidecars
|
||||||
|
expectedIndex int
|
||||||
|
expectError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "all blocks are pre-Fulu",
|
||||||
|
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||||
|
return []blocks.BlockWithROSidecars{
|
||||||
|
{Block: roBellatrix},
|
||||||
|
{Block: roCapella},
|
||||||
|
{Block: roDeneb},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
expectedIndex: 3, // Should be the length of the slice
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all blocks are Fulu or later",
|
||||||
|
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||||
|
return []blocks.BlockWithROSidecars{
|
||||||
|
{Block: roFulu},
|
||||||
|
{Block: roFulu},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
expectedIndex: 0,
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mixed blocks correctly sorted",
|
||||||
|
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||||
|
|
||||||
|
return []blocks.BlockWithROSidecars{
|
||||||
|
{Block: roBellatrix},
|
||||||
|
{Block: roCapella},
|
||||||
|
{Block: roDeneb},
|
||||||
|
{Block: roFulu},
|
||||||
|
{Block: roFulu},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
expectedIndex: 3, // Index where Fulu blocks start
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mixed blocks incorrectly sorted",
|
||||||
|
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||||
|
return []blocks.BlockWithROSidecars{
|
||||||
|
{Block: roBellatrix},
|
||||||
|
{Block: roCapella},
|
||||||
|
{Block: roFulu},
|
||||||
|
{Block: roDeneb},
|
||||||
|
{Block: roFulu},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
expectedIndex: 0,
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
blocks := tt.setupBlocks(t)
|
||||||
|
index, err := findFirstFuluIndex(blocks)
|
||||||
|
|
||||||
|
if tt.expectError {
|
||||||
|
require.NotNil(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tt.expectedIndex, index)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ import (
|
|||||||
type forkData struct {
|
type forkData struct {
|
||||||
blocksFrom peer.ID
|
blocksFrom peer.ID
|
||||||
blobsFrom peer.ID
|
blobsFrom peer.ID
|
||||||
bwb []blocks.BlockWithROBlobs
|
bwb []blocks.BlockWithROSidecars
|
||||||
}
|
}
|
||||||
|
|
||||||
// nonSkippedSlotAfter checks slots after the given one in an attempt to find a non-empty future slot.
|
// nonSkippedSlotAfter checks slots after the given one in an attempt to find a non-empty future slot.
|
||||||
@@ -275,16 +275,18 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
|||||||
"slot": block.Block().Slot(),
|
"slot": block.Block().Slot(),
|
||||||
"root": fmt.Sprintf("%#x", parentRoot),
|
"root": fmt.Sprintf("%#x", parentRoot),
|
||||||
}).Debug("Block with unknown parent root has been found")
|
}).Debug("Block with unknown parent root has been found")
|
||||||
altBlocks, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
|
bwb, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "invalid blocks received in findForkWithPeer")
|
return nil, errors.Wrap(err, "invalid blocks received in findForkWithPeer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to fetch the blobs for the given alt-chain if any exist, so that we can try to verify and import
|
// We need to fetch the blobs for the given alt-chain if any exist, so that we can try to verify and import
|
||||||
// the blocks.
|
// the blocks.
|
||||||
bpid, bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
|
bpid, err := f.fetchSidecars(ctx, pid, []peer.ID{pid}, bwb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
|
return nil, errors.Wrap(err, "fetch sidecars")
|
||||||
}
|
}
|
||||||
|
|
||||||
// The caller will use the BlocksWith VerifiedBlobs in bwb as the starting point for
|
// The caller will use the BlocksWith VerifiedBlobs in bwb as the starting point for
|
||||||
// round-robin syncing the alternate chain.
|
// round-robin syncing the alternate chain.
|
||||||
return &forkData{blocksFrom: pid, blobsFrom: bpid, bwb: bwb}, nil
|
return &forkData{blocksFrom: pid, blobsFrom: bpid, bwb: bwb}, nil
|
||||||
@@ -303,10 +305,9 @@ func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfa
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "received invalid blocks in findAncestor")
|
return nil, errors.Wrap(err, "received invalid blocks in findAncestor")
|
||||||
}
|
}
|
||||||
var bpid peer.ID
|
bpid, err := f.fetchSidecars(ctx, pid, []peer.ID{pid}, bwb)
|
||||||
bpid, bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid, []peer.ID{pid})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findAncestor")
|
return nil, errors.Wrap(err, "fetch sidecars")
|
||||||
}
|
}
|
||||||
return &forkData{
|
return &forkData{
|
||||||
blocksFrom: pid,
|
blocksFrom: pid,
|
||||||
@@ -350,9 +351,12 @@ func (f *blocksFetcher) calculateHeadAndTargetEpochs() (headEpoch, targetEpoch p
|
|||||||
cp := f.chain.FinalizedCheckpt()
|
cp := f.chain.FinalizedCheckpt()
|
||||||
headEpoch = cp.Epoch
|
headEpoch = cp.Epoch
|
||||||
targetEpoch, peers = f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch)
|
targetEpoch, peers = f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch)
|
||||||
} else {
|
|
||||||
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
|
return headEpoch, targetEpoch, peers
|
||||||
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
|
||||||
|
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
|
||||||
|
|
||||||
return headEpoch, targetEpoch, peers
|
return headEpoch, targetEpoch, peers
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -72,6 +72,8 @@ type blocksQueueConfig struct {
|
|||||||
db db.ReadOnlyDatabase
|
db db.ReadOnlyDatabase
|
||||||
mode syncMode
|
mode syncMode
|
||||||
bs filesystem.BlobStorageSummarizer
|
bs filesystem.BlobStorageSummarizer
|
||||||
|
dcs filesystem.DataColumnStorageReader
|
||||||
|
cv verification.NewDataColumnsVerifier
|
||||||
}
|
}
|
||||||
|
|
||||||
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
|
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
|
||||||
@@ -96,7 +98,7 @@ type blocksQueue struct {
|
|||||||
type blocksQueueFetchedData struct {
|
type blocksQueueFetchedData struct {
|
||||||
blocksFrom peer.ID
|
blocksFrom peer.ID
|
||||||
blobsFrom peer.ID
|
blobsFrom peer.ID
|
||||||
bwb []blocks.BlockWithROBlobs
|
bwb []blocks.BlockWithROSidecars
|
||||||
}
|
}
|
||||||
|
|
||||||
// newBlocksQueue creates initialized priority queue.
|
// newBlocksQueue creates initialized priority queue.
|
||||||
@@ -115,6 +117,8 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
|
|||||||
db: cfg.db,
|
db: cfg.db,
|
||||||
clock: cfg.clock,
|
clock: cfg.clock,
|
||||||
bs: cfg.bs,
|
bs: cfg.bs,
|
||||||
|
dcs: cfg.dcs,
|
||||||
|
cv: cfg.cv,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
highestExpectedSlot := cfg.highestExpectedSlot
|
highestExpectedSlot := cfg.highestExpectedSlot
|
||||||
|
|||||||
@@ -263,7 +263,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
|
|||||||
highestExpectedSlot: tt.highestExpectedSlot,
|
highestExpectedSlot: tt.highestExpectedSlot,
|
||||||
})
|
})
|
||||||
assert.NoError(t, queue.start())
|
assert.NoError(t, queue.start())
|
||||||
processBlock := func(b blocks.BlockWithROBlobs) error {
|
processBlock := func(b blocks.BlockWithROSidecars) error {
|
||||||
block := b.Block
|
block := b.Block
|
||||||
if !beaconDB.HasBlock(ctx, block.Block().ParentRoot()) {
|
if !beaconDB.HasBlock(ctx, block.Block().ParentRoot()) {
|
||||||
return fmt.Errorf("%w: %#x", errParentDoesNotExist, block.Block().ParentRoot())
|
return fmt.Errorf("%w: %#x", errParentDoesNotExist, block.Block().ParentRoot())
|
||||||
@@ -275,7 +275,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
|
|||||||
return mc.ReceiveBlock(ctx, block, root, nil)
|
return mc.ReceiveBlock(ctx, block, root, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
var blocks []blocks.BlockWithROBlobs
|
var blocks []blocks.BlockWithROSidecars
|
||||||
for data := range queue.fetchedData {
|
for data := range queue.fetchedData {
|
||||||
for _, b := range data.bwb {
|
for _, b := range data.bwb {
|
||||||
if err := processBlock(b); err != nil {
|
if err := processBlock(b); err != nil {
|
||||||
@@ -538,7 +538,7 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
response := &fetchRequestResponse{
|
response := &fetchRequestResponse{
|
||||||
blocksFrom: "abc",
|
blocksFrom: "abc",
|
||||||
bwb: []blocks.BlockWithROBlobs{
|
bwb: []blocks.BlockWithROSidecars{
|
||||||
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsb}},
|
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsb}},
|
||||||
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsbCopy}},
|
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsbCopy}},
|
||||||
},
|
},
|
||||||
@@ -640,7 +640,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
|||||||
queue.smm.machines[256].fetched.blocksFrom = pidDataParsed
|
queue.smm.machines[256].fetched.blocksFrom = pidDataParsed
|
||||||
rwsb, err := blocks.NewROBlock(wsb)
|
rwsb, err := blocks.NewROBlock(wsb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROBlobs{
|
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||||
{Block: rwsb},
|
{Block: rwsb},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -674,7 +674,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
|||||||
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
||||||
rwsb, err := blocks.NewROBlock(wsb)
|
rwsb, err := blocks.NewROBlock(wsb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
|
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||||
{Block: rwsb},
|
{Block: rwsb},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -705,7 +705,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
|||||||
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
||||||
rwsb, err := blocks.NewROBlock(wsb)
|
rwsb, err := blocks.NewROBlock(wsb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
|
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||||
{Block: rwsb},
|
{Block: rwsb},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||||
@@ -13,6 +14,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/paulbellamy/ratecounter"
|
"github.com/paulbellamy/ratecounter"
|
||||||
@@ -78,6 +80,8 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S
|
|||||||
highestExpectedSlot: highestSlot,
|
highestExpectedSlot: highestSlot,
|
||||||
mode: mode,
|
mode: mode,
|
||||||
bs: s.cfg.BlobStorage,
|
bs: s.cfg.BlobStorage,
|
||||||
|
dcs: s.cfg.DataColumnStorage,
|
||||||
|
cv: s.newDataColumnsVerifier,
|
||||||
}
|
}
|
||||||
queue := newBlocksQueue(ctx, cfg)
|
queue := newBlocksQueue(ctx, cfg)
|
||||||
if err := queue.start(); err != nil {
|
if err := queue.start(); err != nil {
|
||||||
@@ -157,31 +161,82 @@ func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQue
|
|||||||
log.WithError(err).Debug("Batch did not contain a valid sequence of unprocessed blocks")
|
log.WithError(err).Debug("Batch did not contain a valid sequence of unprocessed blocks")
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(bwb) == 0 {
|
if len(bwb) == 0 {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
|
||||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
// Separate blocks with blobs from blocks with data columns.
|
||||||
batchFields := logrus.Fields{
|
fistDataColumnIndex := sort.Search(len(bwb), func(i int) bool {
|
||||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
return bwb[i].Block.Version() >= version.Fulu
|
||||||
"firstUnprocessed": bwb[0].Block.Block().Slot(),
|
})
|
||||||
|
|
||||||
|
blocksWithBlobs := bwb[:fistDataColumnIndex]
|
||||||
|
blocksWithDataColumns := bwb[fistDataColumnIndex:]
|
||||||
|
|
||||||
|
blobBatchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||||
|
lazilyPersistentStoreBlobs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, blobBatchVerifier)
|
||||||
|
|
||||||
|
log := log.WithField("firstSlot", data.bwb[0].Block.Block().Slot())
|
||||||
|
logBlobs, logDataColumns := log, log
|
||||||
|
|
||||||
|
if len(blocksWithBlobs) > 0 {
|
||||||
|
logBlobs = logBlobs.WithField("firstUnprocessed", blocksWithBlobs[0].Block.Block().Slot())
|
||||||
}
|
}
|
||||||
for i, b := range bwb {
|
|
||||||
sidecars := blocks.NewSidecarsFromBlobSidecars(b.Blobs)
|
for i, b := range blocksWithBlobs {
|
||||||
if err := avs.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
if err := lazilyPersistentStoreBlobs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
|
||||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues")
|
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warning("Batch failure due to BlobSidecar issues")
|
||||||
return uint64(i), err
|
return uint64(i), err
|
||||||
}
|
}
|
||||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
|
|
||||||
|
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, lazilyPersistentStoreBlobs); err != nil {
|
||||||
if errors.Is(err, errParentDoesNotExist) {
|
if errors.Is(err, errParentDoesNotExist) {
|
||||||
log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
logBlobs.WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||||
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
|
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
|
||||||
} else {
|
} else {
|
||||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||||
}
|
}
|
||||||
|
|
||||||
return uint64(i), err
|
return uint64(i), err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(blocksWithDataColumns) == 0 {
|
||||||
|
return uint64(len(bwb)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save data column sidecars.
|
||||||
|
count := 0
|
||||||
|
for _, b := range blocksWithDataColumns {
|
||||||
|
count += len(b.Columns)
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||||
|
for _, blockWithDataColumns := range blocksWithDataColumns {
|
||||||
|
sidecarsToSave = append(sidecarsToSave, blockWithDataColumns.Columns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.cfg.DataColumnStorage.Save(sidecarsToSave); err != nil {
|
||||||
|
return 0, errors.Wrap(err, "save data column sidecars")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, b := range blocksWithDataColumns {
|
||||||
|
logDataColumns := logDataColumns.WithFields(syncFields(b.Block))
|
||||||
|
|
||||||
|
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, nil); err != nil {
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, errParentDoesNotExist):
|
||||||
|
logDataColumns.
|
||||||
|
WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||||
|
Debug("Could not process batch blocks due to missing parent")
|
||||||
|
return uint64(i), err
|
||||||
|
default:
|
||||||
|
logDataColumns.WithError(err).Warning("Block processing failure")
|
||||||
|
return uint64(i), err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return uint64(len(bwb)), nil
|
return uint64(len(bwb)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -193,12 +248,18 @@ func syncFields(b blocks.ROBlock) logrus.Fields {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// highestFinalizedEpoch returns the absolute highest finalized epoch of all connected peers.
|
// highestFinalizedEpoch returns the absolute highest finalized epoch of all connected peers.
|
||||||
// Note this can be lower than our finalized epoch if we have no peers or peers that are all behind us.
|
// It returns `0` if no peers are connected.
|
||||||
|
// Note this can be lower than our finalized epoch if our connected peers are all behind us.
|
||||||
func (s *Service) highestFinalizedEpoch() primitives.Epoch {
|
func (s *Service) highestFinalizedEpoch() primitives.Epoch {
|
||||||
highest := primitives.Epoch(0)
|
highest := primitives.Epoch(0)
|
||||||
for _, pid := range s.cfg.P2P.Peers().Connected() {
|
for _, pid := range s.cfg.P2P.Peers().Connected() {
|
||||||
peerChainState, err := s.cfg.P2P.Peers().ChainState(pid)
|
peerChainState, err := s.cfg.P2P.Peers().ChainState(pid)
|
||||||
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch > highest {
|
|
||||||
|
if err != nil || peerChainState == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if peerChainState.FinalizedEpoch > highest {
|
||||||
highest = peerChainState.FinalizedEpoch
|
highest = peerChainState.FinalizedEpoch
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -250,7 +311,7 @@ func (s *Service) logBatchSyncStatus(firstBlk blocks.ROBlock, nBlocks int) {
|
|||||||
func (s *Service) processBlock(
|
func (s *Service) processBlock(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
genesis time.Time,
|
genesis time.Time,
|
||||||
bwb blocks.BlockWithROBlobs,
|
bwb blocks.BlockWithROSidecars,
|
||||||
blockReceiver blockReceiverFn,
|
blockReceiver blockReceiverFn,
|
||||||
avs das.AvailabilityStore,
|
avs das.AvailabilityStore,
|
||||||
) error {
|
) error {
|
||||||
@@ -269,7 +330,7 @@ func (s *Service) processBlock(
|
|||||||
|
|
||||||
type processedChecker func(context.Context, blocks.ROBlock) bool
|
type processedChecker func(context.Context, blocks.ROBlock) bool
|
||||||
|
|
||||||
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROBlobs, error) {
|
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROSidecars, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROSidecars, error) {
|
||||||
// use a pointer to avoid confusing the zero-value with the case where the first element is processed.
|
// use a pointer to avoid confusing the zero-value with the case where the first element is processed.
|
||||||
var processed *int
|
var processed *int
|
||||||
for i := range bwb {
|
for i := range bwb {
|
||||||
@@ -299,43 +360,100 @@ func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSl
|
|||||||
return bwb[nonProcessedIdx:], nil
|
return bwb[nonProcessedIdx:], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROBlobs, bFunc batchBlockReceiverFn) (uint64, error) {
|
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn) (uint64, error) {
|
||||||
if len(bwb) == 0 {
|
bwbCount := uint64(len(bwb))
|
||||||
|
if bwbCount == 0 {
|
||||||
return 0, errors.New("0 blocks provided into method")
|
return 0, errors.New("0 blocks provided into method")
|
||||||
}
|
}
|
||||||
|
|
||||||
headSlot := s.cfg.Chain.HeadSlot()
|
headSlot := s.cfg.Chain.HeadSlot()
|
||||||
var err error
|
bwb, err := validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
|
||||||
bwb, err = validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if len(bwb) == 0 {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
first := bwb[0].Block
|
firstBlock := bwb[0].Block
|
||||||
if !s.cfg.Chain.HasBlock(ctx, first.Block().ParentRoot()) {
|
if !s.cfg.Chain.HasBlock(ctx, firstBlock.Block().ParentRoot()) {
|
||||||
return 0, fmt.Errorf("%w: %#x (in processBatchedBlocks, slot=%d)",
|
return 0, fmt.Errorf("%w: %#x (in processBatchedBlocks, slot=%d)",
|
||||||
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
|
errParentDoesNotExist, firstBlock.Block().ParentRoot(), firstBlock.Block().Slot())
|
||||||
}
|
}
|
||||||
|
|
||||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
firstFuluIndex, err := findFirstFuluIndex(bwb)
|
||||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
if err != nil {
|
||||||
s.logBatchSyncStatus(first, len(bwb))
|
return 0, errors.Wrap(err, "finding first Fulu index")
|
||||||
for _, bb := range bwb {
|
}
|
||||||
if len(bb.Blobs) == 0 {
|
|
||||||
|
blocksWithBlobs := bwb[:firstFuluIndex]
|
||||||
|
blocksWithDataColumns := bwb[firstFuluIndex:]
|
||||||
|
|
||||||
|
if err := s.processBlocksWithBlobs(ctx, blocksWithBlobs, bFunc, firstBlock); err != nil {
|
||||||
|
return 0, errors.Wrap(err, "processing blocks with blobs")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.processBlocksWithDataColumns(ctx, blocksWithDataColumns, bFunc, firstBlock); err != nil {
|
||||||
|
return 0, errors.Wrap(err, "processing blocks with data columns")
|
||||||
|
}
|
||||||
|
|
||||||
|
return bwbCount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) processBlocksWithBlobs(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
|
||||||
|
bwbCount := len(bwbs)
|
||||||
|
if bwbCount == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
batchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||||
|
persistentStore := das.NewLazilyPersistentStore(s.cfg.BlobStorage, batchVerifier)
|
||||||
|
s.logBatchSyncStatus(firstBlock, bwbCount)
|
||||||
|
|
||||||
|
for _, bwb := range bwbs {
|
||||||
|
if len(bwb.Blobs) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
sidecars := blocks.NewSidecarsFromBlobSidecars(bb.Blobs)
|
if err := persistentStore.Persist(s.clock.CurrentSlot(), bwb.Blobs...); err != nil {
|
||||||
|
return errors.Wrap(err, "persisting blobs")
|
||||||
if err := avs.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
robs := blocks.BlockWithROBlobsSlice(bwb).ROBlocks()
|
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
|
||||||
return uint64(len(bwb)), bFunc(ctx, robs, avs)
|
if err := bFunc(ctx, robs, persistentStore); err != nil {
|
||||||
|
return errors.Wrap(err, "processing blocks with blobs")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) processBlocksWithDataColumns(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
|
||||||
|
bwbCount := len(bwbs)
|
||||||
|
if bwbCount == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logBatchSyncStatus(firstBlock, bwbCount)
|
||||||
|
|
||||||
|
// Save data column sidecars.
|
||||||
|
count := 0
|
||||||
|
for _, bwb := range bwbs {
|
||||||
|
count += len(bwb.Columns)
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||||
|
for _, blockWithDataColumns := range bwbs {
|
||||||
|
sidecarsToSave = append(sidecarsToSave, blockWithDataColumns.Columns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.cfg.DataColumnStorage.Save(sidecarsToSave); err != nil {
|
||||||
|
return errors.Wrap(err, "save data column sidecars")
|
||||||
|
}
|
||||||
|
|
||||||
|
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
|
||||||
|
if err := bFunc(ctx, robs, nil); err != nil {
|
||||||
|
return errors.Wrap(err, "process post-Fulu blocks")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func isPunishableError(err error) bool {
|
func isPunishableError(err error) bool {
|
||||||
|
|||||||
@@ -8,9 +8,11 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||||
@@ -373,7 +375,7 @@ func TestService_processBlock(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rowsb, err := blocks.NewROBlock(wsb)
|
rowsb, err := blocks.NewROBlock(wsb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||||
return nil
|
return nil
|
||||||
@@ -385,7 +387,7 @@ func TestService_processBlock(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rowsb, err = blocks.NewROBlock(wsb)
|
rowsb, err = blocks.NewROBlock(wsb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||||
return nil
|
return nil
|
||||||
}, nil)
|
}, nil)
|
||||||
@@ -396,7 +398,7 @@ func TestService_processBlock(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rowsb, err = blocks.NewROBlock(wsb)
|
rowsb, err = blocks.NewROBlock(wsb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||||
return nil
|
return nil
|
||||||
@@ -432,7 +434,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
|||||||
s.genesisTime = genesis
|
s.genesisTime = genesis
|
||||||
|
|
||||||
t.Run("process non-linear batch", func(t *testing.T) {
|
t.Run("process non-linear batch", func(t *testing.T) {
|
||||||
var batch []blocks.BlockWithROBlobs
|
var batch []blocks.BlockWithROSidecars
|
||||||
currBlockRoot := genesisBlkRoot
|
currBlockRoot := genesisBlkRoot
|
||||||
for i := primitives.Slot(1); i < 10; i++ {
|
for i := primitives.Slot(1); i < 10; i++ {
|
||||||
parentRoot := currBlockRoot
|
parentRoot := currBlockRoot
|
||||||
@@ -446,11 +448,11 @@ func TestService_processBlockBatch(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rowsb, err := blocks.NewROBlock(wsb)
|
rowsb, err := blocks.NewROBlock(wsb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
|
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
|
||||||
currBlockRoot = blk1Root
|
currBlockRoot = blk1Root
|
||||||
}
|
}
|
||||||
|
|
||||||
var batch2 []blocks.BlockWithROBlobs
|
var batch2 []blocks.BlockWithROSidecars
|
||||||
for i := primitives.Slot(10); i < 20; i++ {
|
for i := primitives.Slot(10); i < 20; i++ {
|
||||||
parentRoot := currBlockRoot
|
parentRoot := currBlockRoot
|
||||||
blk1 := util.NewBeaconBlock()
|
blk1 := util.NewBeaconBlock()
|
||||||
@@ -463,7 +465,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rowsb, err := blocks.NewROBlock(wsb)
|
rowsb, err := blocks.NewROBlock(wsb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
batch2 = append(batch2, blocks.BlockWithROBlobs{Block: rowsb})
|
batch2 = append(batch2, blocks.BlockWithROSidecars{Block: rowsb})
|
||||||
currBlockRoot = blk1Root
|
currBlockRoot = blk1Root
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -485,7 +487,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
|||||||
assert.ErrorContains(t, "block is already processed", err)
|
assert.ErrorContains(t, "block is already processed", err)
|
||||||
require.Equal(t, uint64(0), count)
|
require.Equal(t, uint64(0), count)
|
||||||
|
|
||||||
var badBatch2 []blocks.BlockWithROBlobs
|
var badBatch2 []blocks.BlockWithROSidecars
|
||||||
for i, b := range batch2 {
|
for i, b := range batch2 {
|
||||||
// create a non-linear batch
|
// create a non-linear batch
|
||||||
if i%3 == 0 && i != 0 {
|
if i%3 == 0 && i != 0 {
|
||||||
@@ -685,7 +687,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
util.SaveBlock(t, t.Context(), beaconDB, genesisBlk)
|
util.SaveBlock(t, t.Context(), beaconDB, genesisBlk)
|
||||||
|
|
||||||
var batch []blocks.BlockWithROBlobs
|
var batch []blocks.BlockWithROSidecars
|
||||||
currBlockRoot := genesisBlkRoot
|
currBlockRoot := genesisBlkRoot
|
||||||
for i := primitives.Slot(1); i < 10; i++ {
|
for i := primitives.Slot(1); i < 10; i++ {
|
||||||
parentRoot := currBlockRoot
|
parentRoot := currBlockRoot
|
||||||
@@ -699,7 +701,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
rowsb, err := blocks.NewROBlock(wsb)
|
rowsb, err := blocks.NewROBlock(wsb)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
|
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
|
||||||
currBlockRoot = blk1Root
|
currBlockRoot = blk1Root
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -712,3 +714,155 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
|||||||
// Ensure that the unprocessed batch is returned correctly.
|
// Ensure that the unprocessed batch is returned correctly.
|
||||||
assert.Equal(t, len(retBlocks), len(batch)-2)
|
assert.Equal(t, len(retBlocks), len(batch)-2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestService_PropcessFetchedDataRegSync(t *testing.T) {
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
// Create a data columns storage.
|
||||||
|
dir := t.TempDir()
|
||||||
|
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create Fulu blocks.
|
||||||
|
fuluBlock1 := util.NewBeaconBlockFulu()
|
||||||
|
signedFuluBlock1, err := blocks.NewSignedBeaconBlock(fuluBlock1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roFuluBlock1, err := blocks.NewROBlock(signedFuluBlock1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
block1Root := roFuluBlock1.Root()
|
||||||
|
|
||||||
|
fuluBlock2 := util.NewBeaconBlockFulu()
|
||||||
|
fuluBlock2.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||||
|
fuluBlock2.Block.Slot = 1
|
||||||
|
fuluBlock2.Block.ParentRoot = block1Root[:]
|
||||||
|
signedFuluBlock2, err := blocks.NewSignedBeaconBlock(fuluBlock2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
roFuluBlock2, err := blocks.NewROBlock(signedFuluBlock2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
block2Root := roFuluBlock2.Root()
|
||||||
|
parentRoot2 := roFuluBlock2.Block().ParentRoot()
|
||||||
|
bodyRoot2, err := roFuluBlock2.Block().Body().HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a mock chain service.
|
||||||
|
const validatorCount = uint64(64)
|
||||||
|
state, _ := util.DeterministicGenesisState(t, validatorCount)
|
||||||
|
chain := &mock.ChainService{
|
||||||
|
FinalizedCheckPoint: ð.Checkpoint{},
|
||||||
|
DB: dbtest.SetupDB(t),
|
||||||
|
State: state,
|
||||||
|
Root: block1Root[:],
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new service instance.
|
||||||
|
service := &Service{
|
||||||
|
cfg: &Config{
|
||||||
|
Chain: chain,
|
||||||
|
DataColumnStorage: dataColumnStorage,
|
||||||
|
},
|
||||||
|
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the parent block in the database.
|
||||||
|
err = chain.DB.SaveBlock(ctx, roFuluBlock1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create data column sidecars.
|
||||||
|
const count = uint64(3)
|
||||||
|
params := make([]util.DataColumnParam, 0, count)
|
||||||
|
for i := range count {
|
||||||
|
param := util.DataColumnParam{Index: i, BodyRoot: bodyRoot2[:], ParentRoot: parentRoot2[:], Slot: roFuluBlock2.Block().Slot()}
|
||||||
|
params = append(params, param)
|
||||||
|
}
|
||||||
|
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||||
|
|
||||||
|
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||||
|
{Block: roFuluBlock2, Columns: verifiedRoDataColumnSidecars},
|
||||||
|
}
|
||||||
|
|
||||||
|
data := &blocksQueueFetchedData{
|
||||||
|
bwb: blocksWithSidecars,
|
||||||
|
}
|
||||||
|
|
||||||
|
actual, err := service.processFetchedDataRegSync(ctx, data)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(1), actual)
|
||||||
|
|
||||||
|
// Check block and data column sidecars were saved correctly.
|
||||||
|
require.Equal(t, true, chain.DB.HasBlock(ctx, block2Root))
|
||||||
|
|
||||||
|
summary := dataColumnStorage.Summary(block2Root)
|
||||||
|
for i := range count {
|
||||||
|
require.Equal(t, true, summary.HasIndex(i))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_processBlocksWithDataColumns(t *testing.T) {
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
t.Run("no blocks", func(t *testing.T) {
|
||||||
|
fuluBlock := util.NewBeaconBlockFulu()
|
||||||
|
|
||||||
|
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
service := new(Service)
|
||||||
|
err = service.processBlocksWithDataColumns(ctx, nil, nil, roFuluBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nominal", func(t *testing.T) {
|
||||||
|
fuluBlock := util.NewBeaconBlockFulu()
|
||||||
|
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||||
|
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
bodyRoot, err := roFuluBlock.Block().Body().HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create data column sidecars.
|
||||||
|
const count = uint64(3)
|
||||||
|
params := make([]util.DataColumnParam, 0, count)
|
||||||
|
for i := range count {
|
||||||
|
param := util.DataColumnParam{Index: i, BodyRoot: bodyRoot[:]}
|
||||||
|
params = append(params, param)
|
||||||
|
}
|
||||||
|
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||||
|
|
||||||
|
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||||
|
{Block: roFuluBlock, Columns: verifiedRoDataColumnSidecars},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a data columns storage.
|
||||||
|
dir := t.TempDir()
|
||||||
|
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a service.
|
||||||
|
service := &Service{
|
||||||
|
cfg: &Config{
|
||||||
|
P2P: p2pt.NewTestP2P(t),
|
||||||
|
DataColumnStorage: dataColumnStorage,
|
||||||
|
},
|
||||||
|
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||||
|
}
|
||||||
|
|
||||||
|
receiverFunc := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||||
|
require.Equal(t, 1, len(blks))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = service.processBlocksWithDataColumns(ctx, blocksWithSidecars, receiverFunc, roFuluBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify that the data columns were saved correctly.
|
||||||
|
summary := dataColumnStorage.Summary(roFuluBlock.Root())
|
||||||
|
for i := range count {
|
||||||
|
require.Equal(t, true, summary.HasIndex(i))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||||
@@ -53,22 +54,24 @@ type Config struct {
|
|||||||
ClockWaiter startup.ClockWaiter
|
ClockWaiter startup.ClockWaiter
|
||||||
InitialSyncComplete chan struct{}
|
InitialSyncComplete chan struct{}
|
||||||
BlobStorage *filesystem.BlobStorage
|
BlobStorage *filesystem.BlobStorage
|
||||||
|
DataColumnStorage *filesystem.DataColumnStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
// Service service.
|
// Service service.
|
||||||
type Service struct {
|
type Service struct {
|
||||||
cfg *Config
|
cfg *Config
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
synced *abool.AtomicBool
|
synced *abool.AtomicBool
|
||||||
chainStarted *abool.AtomicBool
|
chainStarted *abool.AtomicBool
|
||||||
counter *ratecounter.RateCounter
|
counter *ratecounter.RateCounter
|
||||||
genesisChan chan time.Time
|
genesisChan chan time.Time
|
||||||
clock *startup.Clock
|
clock *startup.Clock
|
||||||
verifierWaiter *verification.InitializerWaiter
|
verifierWaiter *verification.InitializerWaiter
|
||||||
newBlobVerifier verification.NewBlobVerifier
|
newBlobVerifier verification.NewBlobVerifier
|
||||||
ctxMap sync.ContextByteVersions
|
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||||
genesisTime time.Time
|
ctxMap sync.ContextByteVersions
|
||||||
|
genesisTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Option is a functional option for the initial-sync Service.
|
// Option is a functional option for the initial-sync Service.
|
||||||
@@ -149,6 +152,7 @@ func (s *Service) Start() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||||
|
s.newDataColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||||
|
|
||||||
gt := clock.GenesisTime()
|
gt := clock.GenesisTime()
|
||||||
if gt.IsZero() {
|
if gt.IsZero() {
|
||||||
@@ -175,19 +179,22 @@ func (s *Service) Start() {
|
|||||||
}
|
}
|
||||||
s.chainStarted.Set()
|
s.chainStarted.Set()
|
||||||
log.Info("Starting initial chain sync...")
|
log.Info("Starting initial chain sync...")
|
||||||
|
|
||||||
// Are we already in sync, or close to it?
|
// Are we already in sync, or close to it?
|
||||||
if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) {
|
if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) {
|
||||||
log.Info("Already synced to the current chain head")
|
log.Info("Already synced to the current chain head")
|
||||||
s.markSynced()
|
s.markSynced()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
peers, err := s.waitForMinimumPeers()
|
peers, err := s.waitForMinimumPeers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Error("Error waiting for minimum number of peers")
|
log.WithError(err).Error("Error waiting for minimum number of peers")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := s.fetchOriginBlobs(peers); err != nil {
|
|
||||||
log.WithError(err).Error("Failed to fetch missing blobs for checkpoint origin")
|
if err := s.fetchOriginSidecars(peers); err != nil {
|
||||||
|
log.WithError(err).Error("Error fetching origin sidecars")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := s.roundRobinSync(); err != nil {
|
if err := s.roundRobinSync(); err != nil {
|
||||||
@@ -200,6 +207,48 @@ func (s *Service) Start() {
|
|||||||
s.markSynced()
|
s.markSynced()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fetchOriginSidecars fetches origin sidecars
|
||||||
|
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||||
|
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||||
|
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
block, err := s.cfg.DB.Block(s.ctx, blockRoot)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "block")
|
||||||
|
}
|
||||||
|
|
||||||
|
currentSlot, blockSlot := s.clock.CurrentSlot(), block.Block().Slot()
|
||||||
|
currentEpoch, blockEpoch := slots.ToEpoch(currentSlot), slots.ToEpoch(blockSlot)
|
||||||
|
|
||||||
|
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
roBlock, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "new ro block with root")
|
||||||
|
}
|
||||||
|
|
||||||
|
blockVersion := roBlock.Version()
|
||||||
|
|
||||||
|
if blockVersion >= version.Fulu {
|
||||||
|
if err := s.fetchOriginColumns(peers, roBlock); err != nil {
|
||||||
|
return errors.Wrap(err, "fetch origin columns")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if blockVersion >= version.Deneb {
|
||||||
|
if err := s.fetchOriginBlobs(peers, roBlock); err != nil {
|
||||||
|
return errors.Wrap(err, "fetch origin blobs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Stop initial sync.
|
// Stop initial sync.
|
||||||
func (s *Service) Stop() error {
|
func (s *Service) Stop() error {
|
||||||
s.cancel()
|
s.cancel()
|
||||||
@@ -304,23 +353,9 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
|
|||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||||
r, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
r := rob.Root()
|
||||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
blk, err := s.cfg.DB.Block(s.ctx, r)
|
|
||||||
if err != nil {
|
|
||||||
log.WithField("root", fmt.Sprintf("%#x", r)).Error("Block for checkpoint sync origin root not found in db")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(s.clock.CurrentSlot())) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
rob, err := blocks.NewROBlockWithRoot(blk, r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req, err := missingBlobRequest(rob, s.cfg.BlobStorage)
|
req, err := missingBlobRequest(rob, s.cfg.BlobStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -335,16 +370,17 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(blobSidecars) != len(req) {
|
if len(blobSidecars) != len(req) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||||
current := s.clock.CurrentSlot()
|
current := s.clock.CurrentSlot()
|
||||||
sidecars := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
if err := avs.Persist(current, blobSidecars...); err != nil {
|
||||||
if err := avs.Persist(current, sidecars...); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
|
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
|
||||||
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Blobs from peer for origin block were unusable")
|
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Blobs from peer for origin block were unusable")
|
||||||
continue
|
continue
|
||||||
@@ -355,6 +391,67 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
|||||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) error {
|
||||||
|
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||||
|
|
||||||
|
// Return early if the origin block has no blob commitments.
|
||||||
|
commitments, err := roBlock.Block().Body().BlobKzgCommitments()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "fetch blob commitments")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(commitments) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the columns to request.
|
||||||
|
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "custody group count")
|
||||||
|
}
|
||||||
|
|
||||||
|
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||||
|
info, _, err := peerdas.Info(s.cfg.P2P.NodeID(), samplingSize)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "fetch peer info")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch origin data column sidecars.
|
||||||
|
root := roBlock.Root()
|
||||||
|
|
||||||
|
params := sync.DataColumnSidecarsParams{
|
||||||
|
Ctx: s.ctx,
|
||||||
|
Tor: s.clock,
|
||||||
|
P2P: s.cfg.P2P,
|
||||||
|
CtxMap: s.ctxMap,
|
||||||
|
Storage: s.cfg.DataColumnStorage,
|
||||||
|
NewVerifier: s.newDataColumnsVerifier,
|
||||||
|
}
|
||||||
|
|
||||||
|
verfifiedRoDataColumnsByRoot, err := sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "fetch data column sidecars")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save origin data columns to disk.
|
||||||
|
verifiedRoDataColumnsSidecars, ok := verfifiedRoDataColumnsByRoot[root]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.cfg.DataColumnStorage.Save(verifiedRoDataColumnsSidecars); err != nil {
|
||||||
|
return errors.Wrap(err, "save data column sidecars")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||||
|
"blobCount": len(commitments),
|
||||||
|
"columnCount": len(verifiedRoDataColumnsSidecars),
|
||||||
|
}).Info("Successfully downloaded data columns for checkpoint sync block")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func shufflePeers(pids []peer.ID) {
|
func shufflePeers(pids []peer.ID) {
|
||||||
rg := rand.NewGenerator()
|
rg := rand.NewGenerator()
|
||||||
rg.Shuffle(len(pids), func(i, j int) {
|
rg.Shuffle(len(pids), func(i, j int) {
|
||||||
@@ -367,3 +464,9 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.
|
|||||||
return ini.NewBlobVerifier(b, reqs)
|
return ini.NewBlobVerifier(b, reqs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier {
|
||||||
|
return func(roDataColumns []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier {
|
||||||
|
return ini.NewDataColumnsVerifier(roDataColumns, reqs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,14 +7,17 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||||
@@ -138,7 +141,7 @@ func TestService_InitStartStop(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
p := p2pt.NewTestP2P(t)
|
p := p2ptest.NewTestP2P(t)
|
||||||
connectPeers(t, p, []*peerData{}, p.Peers())
|
connectPeers(t, p, []*peerData{}, p.Peers())
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
@@ -328,7 +331,7 @@ func TestService_markSynced(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestService_Resync(t *testing.T) {
|
func TestService_Resync(t *testing.T) {
|
||||||
p := p2pt.NewTestP2P(t)
|
p := p2ptest.NewTestP2P(t)
|
||||||
connectPeers(t, p, []*peerData{
|
connectPeers(t, p, []*peerData{
|
||||||
{blocks: makeSequence(1, 160), finalizedEpoch: 5, headSlot: 160},
|
{blocks: makeSequence(1, 160), finalizedEpoch: 5, headSlot: 160},
|
||||||
}, p.Peers())
|
}, p.Peers())
|
||||||
@@ -511,5 +514,152 @@ func TestOriginOutsideRetention(t *testing.T) {
|
|||||||
require.NoError(t, concreteDB.SaveOriginCheckpointBlockRoot(ctx, blk.Root()))
|
require.NoError(t, concreteDB.SaveOriginCheckpointBlockRoot(ctx, blk.Root()))
|
||||||
// This would break due to missing service dependencies, but will return nil fast due to being outside retention.
|
// This would break due to missing service dependencies, but will return nil fast due to being outside retention.
|
||||||
require.Equal(t, false, params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(clock.CurrentSlot())))
|
require.Equal(t, false, params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(clock.CurrentSlot())))
|
||||||
require.NoError(t, s.fetchOriginBlobs([]peer.ID{}))
|
require.NoError(t, s.fetchOriginSidecars([]peer.ID{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFetchOriginSidecars(t *testing.T) {
|
||||||
|
ctx := t.Context()
|
||||||
|
|
||||||
|
beaconConfig := params.BeaconConfig()
|
||||||
|
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
|
||||||
|
secondsPerSlot := beaconConfig.SecondsPerSlot
|
||||||
|
slotsPerEpoch := beaconConfig.SlotsPerEpoch
|
||||||
|
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
|
||||||
|
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||||
|
|
||||||
|
genesisValidatorRoot := [fieldparams.RootLength]byte{}
|
||||||
|
|
||||||
|
t.Run("out of retention period", func(t *testing.T) {
|
||||||
|
// Create an origin block.
|
||||||
|
block := util.NewBeaconBlockFulu()
|
||||||
|
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Save the block.
|
||||||
|
db := dbtest.SetupDB(t)
|
||||||
|
err = db.SaveOriginCheckpointBlockRoot(ctx, roBlock.Root())
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = db.SaveBlock(ctx, roBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Define "now" to be one epoch after genesis time + retention period.
|
||||||
|
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
|
||||||
|
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||||
|
nower := func() time.Time { return now }
|
||||||
|
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
cfg: &Config{
|
||||||
|
DB: db,
|
||||||
|
},
|
||||||
|
clock: clock,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = service.fetchOriginSidecars(nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no commitments", func(t *testing.T) {
|
||||||
|
// Create an origin block.
|
||||||
|
block := util.NewBeaconBlockFulu()
|
||||||
|
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||||
|
require.NoError(t, err)
|
||||||
|
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Save the block.
|
||||||
|
db := dbtest.SetupDB(t)
|
||||||
|
err = db.SaveOriginCheckpointBlockRoot(ctx, roBlock.Root())
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = db.SaveBlock(ctx, roBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Define "now" to be after genesis time + retention period.
|
||||||
|
nowWrtGenesisSecs := retentionEpochs.Mul(secondsPerEpoch)
|
||||||
|
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||||
|
nower := func() time.Time { return now }
|
||||||
|
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
cfg: &Config{
|
||||||
|
DB: db,
|
||||||
|
P2P: p2ptest.NewTestP2P(t),
|
||||||
|
},
|
||||||
|
clock: clock,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = service.fetchOriginSidecars(nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nominal", func(t *testing.T) {
|
||||||
|
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||||
|
|
||||||
|
// Start the trusted setup.
|
||||||
|
err := kzg.Start()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create block and sidecars.
|
||||||
|
const blobCount = 1
|
||||||
|
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||||
|
|
||||||
|
// Save the block.
|
||||||
|
db := dbtest.SetupDB(t)
|
||||||
|
err = db.SaveOriginCheckpointBlockRoot(ctx, roBlock.Root())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = db.SaveBlock(ctx, roBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a data columns storage.
|
||||||
|
dir := t.TempDir()
|
||||||
|
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Compute the columns to request.
|
||||||
|
p2p := p2ptest.NewTestP2P(t)
|
||||||
|
custodyGroupCount, err := p2p.CustodyGroupCount()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||||
|
info, _, err := peerdas.Info(p2p.NodeID(), samplingSize)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Save all sidecars except what we need.
|
||||||
|
toSave := make([]blocks.VerifiedRODataColumn, 0, uint64(len(verifiedRoSidecars))-samplingSize)
|
||||||
|
for _, sidecar := range verifiedRoSidecars {
|
||||||
|
if !info.CustodyColumns[sidecar.Index] {
|
||||||
|
toSave = append(toSave, sidecar)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = dataColumnStorage.Save(toSave)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Define "now" to be after genesis time + retention period.
|
||||||
|
nowWrtGenesisSecs := retentionEpochs.Mul(secondsPerEpoch)
|
||||||
|
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||||
|
nower := func() time.Time { return now }
|
||||||
|
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||||
|
|
||||||
|
service := &Service{
|
||||||
|
cfg: &Config{
|
||||||
|
DB: db,
|
||||||
|
P2P: p2p,
|
||||||
|
DataColumnStorage: dataColumnStorage,
|
||||||
|
},
|
||||||
|
clock: clock,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = service.fetchOriginSidecars(nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Check that needed sidecars are saved.
|
||||||
|
summary := dataColumnStorage.Summary(roBlock.Root())
|
||||||
|
for index := range info.CustodyColumns {
|
||||||
|
require.Equal(t, true, summary.HasIndex(index))
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v6/async"
|
"github.com/OffchainLabs/prysm/v6/async"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||||
@@ -175,8 +176,9 @@ func (s *Service) getBlocksInQueue(slot primitives.Slot) []interfaces.ReadOnlySi
|
|||||||
func (s *Service) removeBlockFromQueue(b interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
func (s *Service) removeBlockFromQueue(b interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||||
s.pendingQueueLock.Lock()
|
s.pendingQueueLock.Lock()
|
||||||
defer s.pendingQueueLock.Unlock()
|
defer s.pendingQueueLock.Unlock()
|
||||||
|
|
||||||
if err := s.deleteBlockFromPendingQueue(b.Block().Slot(), b, blkRoot); err != nil {
|
if err := s.deleteBlockFromPendingQueue(b.Block().Slot(), b, blkRoot); err != nil {
|
||||||
return err
|
return errors.Wrap(err, "delete block from pending queue")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -196,41 +198,82 @@ func (s *Service) hasPeer() bool {
|
|||||||
var errNoPeersForPending = errors.New("no suitable peers to process pending block queue, delaying")
|
var errNoPeersForPending = errors.New("no suitable peers to process pending block queue, delaying")
|
||||||
|
|
||||||
// processAndBroadcastBlock validates, processes, and broadcasts a block.
|
// processAndBroadcastBlock validates, processes, and broadcasts a block.
|
||||||
// part of the function is to request missing blobs from peers if the block contains kzg commitments.
|
// Part of the function is to request missing sidecars from peers if the block contains kzg commitments.
|
||||||
func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [fieldparams.RootLength]byte) error {
|
||||||
|
if err := s.processBlock(ctx, b, blkRoot); err != nil {
|
||||||
|
return errors.Wrap(err, "process block")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.receiveAndBroadCastBlock(ctx, b, blkRoot, b.Block().Slot()); err != nil {
|
||||||
|
return errors.Wrap(err, "receive and broadcast block")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) processBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [fieldparams.RootLength]byte) error {
|
||||||
|
blockSlot := b.Block().Slot()
|
||||||
|
|
||||||
if err := s.validateBeaconBlock(ctx, b, blkRoot); err != nil {
|
if err := s.validateBeaconBlock(ctx, b, blkRoot); err != nil {
|
||||||
if !errors.Is(ErrOptimisticParent, err) {
|
if !errors.Is(ErrOptimisticParent, err) {
|
||||||
log.WithError(err).WithField("slot", b.Block().Slot()).Debug("Could not validate block")
|
log.WithError(err).WithField("slot", blockSlot).Debug("Could not validate block")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
request, err := s.pendingBlobsRequestForBlock(blkRoot, b)
|
blockEpoch, denebForkEpoch, fuluForkEpoch := slots.ToEpoch(blockSlot), params.BeaconConfig().DenebForkEpoch, params.BeaconConfig().FuluForkEpoch
|
||||||
|
|
||||||
|
roBlock, err := blocks.NewROBlockWithRoot(b, blkRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Wrap(err, "new ro block with root")
|
||||||
}
|
|
||||||
if len(request) > 0 {
|
|
||||||
peers := s.getBestPeers()
|
|
||||||
peerCount := len(peers)
|
|
||||||
if peerCount == 0 {
|
|
||||||
return errors.Wrapf(errNoPeersForPending, "block root=%#x", blkRoot)
|
|
||||||
}
|
|
||||||
if err := s.sendAndSaveBlobSidecars(ctx, request, peers[rand.NewGenerator().Int()%peerCount], b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if blockEpoch >= fuluForkEpoch {
|
||||||
|
if err := s.requestAndSaveMissingDataColumnSidecars([]blocks.ROBlock{roBlock}); err != nil {
|
||||||
|
return errors.Wrap(err, "request and save missing data column sidecars")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if blockEpoch >= denebForkEpoch {
|
||||||
|
request, err := s.pendingBlobsRequestForBlock(blkRoot, b)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "pending blobs request for block")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(request) > 0 {
|
||||||
|
peers := s.getBestPeers()
|
||||||
|
peerCount := len(peers)
|
||||||
|
|
||||||
|
if peerCount == 0 {
|
||||||
|
return errors.Wrapf(errNoPeersForPending, "block root=%#x", blkRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.sendAndSaveBlobSidecars(ctx, request, peers[rand.NewGenerator().Int()%peerCount], b); err != nil {
|
||||||
|
return errors.Wrap(err, "send and save blob sidecars")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) receiveAndBroadCastBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, blkRoot [fieldparams.RootLength]byte, blockSlot primitives.Slot) error {
|
||||||
if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot, nil); err != nil {
|
if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot, nil); err != nil {
|
||||||
return err
|
return errors.Wrap(err, "receive block")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.setSeenBlockIndexSlot(b.Block().Slot(), b.Block().ProposerIndex())
|
s.setSeenBlockIndexSlot(blockSlot, b.Block().ProposerIndex())
|
||||||
|
|
||||||
pb, err := b.Proto()
|
pb, err := b.Proto()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Debug("Could not get protobuf block")
|
log.WithError(err).Debug("Could not get protobuf block")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.cfg.p2p.Broadcast(ctx, pb); err != nil {
|
if err := s.cfg.p2p.Broadcast(ctx, pb); err != nil {
|
||||||
log.WithError(err).Debug("Could not broadcast block")
|
log.WithError(err).Debug("Could not broadcast block")
|
||||||
return err
|
return err
|
||||||
@@ -286,58 +329,113 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
|||||||
ctx, span := prysmTrace.StartSpan(ctx, "sendBatchRootRequest")
|
ctx, span := prysmTrace.StartSpan(ctx, "sendBatchRootRequest")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
roots = dedupRoots(roots)
|
// Exit early if there are no roots to request.
|
||||||
s.pendingQueueLock.RLock()
|
|
||||||
for i := len(roots) - 1; i >= 0; i-- {
|
|
||||||
r := roots[i]
|
|
||||||
if s.seenPendingBlocks[r] || s.cfg.chain.BlockBeingSynced(r) {
|
|
||||||
roots = append(roots[:i], roots[i+1:]...)
|
|
||||||
} else {
|
|
||||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.pendingQueueLock.RUnlock()
|
|
||||||
|
|
||||||
if len(roots) == 0 {
|
if len(roots) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
bestPeers := s.getBestPeers()
|
|
||||||
if len(bestPeers) == 0 {
|
// Filter out roots that are already seen in pending blocks or being synced.
|
||||||
|
roots = s.filterOutPendingAndSynced(roots)
|
||||||
|
|
||||||
|
// Nothing to do, exit early.
|
||||||
|
if len(roots) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Randomly choose a peer to query from our best peers. If that peer cannot return
|
|
||||||
// all the requested blocks, we randomly select another peer.
|
// Fetch best peers to request blocks from.
|
||||||
pid := bestPeers[randGen.Int()%len(bestPeers)]
|
bestPeers := s.getBestPeers()
|
||||||
for i := 0; i < numOfTries; i++ {
|
|
||||||
|
// No suitable peer, exit early.
|
||||||
|
if len(bestPeers) == 0 {
|
||||||
|
log.WithField("roots", fmt.Sprintf("%#x", roots)).Debug("Send batch root request: No suitable peers")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Randomly choose a peer to query from our best peers.
|
||||||
|
// If that peer cannot return all the requested blocks,
|
||||||
|
// we randomly select another peer.
|
||||||
|
randomIndex := randGen.Int() % len(bestPeers)
|
||||||
|
pid := bestPeers[randomIndex]
|
||||||
|
|
||||||
|
for range numOfTries {
|
||||||
req := p2ptypes.BeaconBlockByRootsReq(roots)
|
req := p2ptypes.BeaconBlockByRootsReq(roots)
|
||||||
currentEpoch := slots.ToEpoch(s.cfg.clock.CurrentSlot())
|
|
||||||
|
// Get the current epoch.
|
||||||
|
currentSlot := s.cfg.clock.CurrentSlot()
|
||||||
|
currentEpoch := slots.ToEpoch(currentSlot)
|
||||||
|
|
||||||
|
// Trim the request to the maximum number of blocks we can request if needed.
|
||||||
maxReqBlock := params.MaxRequestBlock(currentEpoch)
|
maxReqBlock := params.MaxRequestBlock(currentEpoch)
|
||||||
if uint64(len(roots)) > maxReqBlock {
|
rootCount := uint64(len(roots))
|
||||||
|
if rootCount > maxReqBlock {
|
||||||
req = roots[:maxReqBlock]
|
req = roots[:maxReqBlock]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send the request to the peer.
|
||||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
log.WithError(err).Debug("Could not send recent block request")
|
log.WithError(err).Debug("Could not send recent block request")
|
||||||
}
|
}
|
||||||
newRoots := make([][32]byte, 0, len(roots))
|
|
||||||
s.pendingQueueLock.RLock()
|
// Filter out roots that are already seen in pending blocks.
|
||||||
for _, rt := range roots {
|
newRoots := make([][32]byte, 0, rootCount)
|
||||||
if !s.seenPendingBlocks[rt] {
|
func() {
|
||||||
newRoots = append(newRoots, rt)
|
s.pendingQueueLock.RLock()
|
||||||
|
defer s.pendingQueueLock.RUnlock()
|
||||||
|
|
||||||
|
for _, rt := range roots {
|
||||||
|
if !s.seenPendingBlocks[rt] {
|
||||||
|
newRoots = append(newRoots, rt)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}()
|
||||||
s.pendingQueueLock.RUnlock()
|
|
||||||
|
// Exit early if all roots have been seen.
|
||||||
|
// This is the happy path.
|
||||||
if len(newRoots) == 0 {
|
if len(newRoots) == 0 {
|
||||||
break
|
return nil
|
||||||
}
|
}
|
||||||
// Choosing a new peer with the leftover set of
|
|
||||||
// roots to request.
|
// There is still some roots that have not been seen.
|
||||||
|
// Choosing a new peer with the leftover set of oots to request.
|
||||||
roots = newRoots
|
roots = newRoots
|
||||||
pid = bestPeers[randGen.Int()%len(bestPeers)]
|
|
||||||
|
// Choose a new peer to query.
|
||||||
|
randomIndex = randGen.Int() % len(bestPeers)
|
||||||
|
pid = bestPeers[randomIndex]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Some roots are still missing after all allowed tries.
|
||||||
|
// This is the unhappy path.
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"roots": fmt.Sprintf("%#x", roots),
|
||||||
|
"tries": numOfTries,
|
||||||
|
}).Debug("Send batch root request: Some roots are still missing after all allowed tries")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// filterOutPendingAndSynced filters out roots that are already seen in pending blocks or being synced.
|
||||||
|
func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte) [][fieldparams.RootLength]byte {
|
||||||
|
// Remove duplicates (if any) from the list of roots.
|
||||||
|
roots = dedupRoots(roots)
|
||||||
|
|
||||||
|
// Filters out in place roots that are already seen in pending blocks or being synced.
|
||||||
|
s.pendingQueueLock.RLock()
|
||||||
|
defer s.pendingQueueLock.RUnlock()
|
||||||
|
|
||||||
|
for i := len(roots) - 1; i >= 0; i-- {
|
||||||
|
r := roots[i]
|
||||||
|
if s.seenPendingBlocks[r] || s.cfg.chain.BlockBeingSynced(r) {
|
||||||
|
roots = append(roots[:i], roots[i+1:]...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||||
|
}
|
||||||
|
return roots
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Service) sortedPendingSlots() []primitives.Slot {
|
func (s *Service) sortedPendingSlots() []primitives.Slot {
|
||||||
s.pendingQueueLock.RLock()
|
s.pendingQueueLock.RLock()
|
||||||
defer s.pendingQueueLock.RUnlock()
|
defer s.pendingQueueLock.RUnlock()
|
||||||
|
|||||||
@@ -4,11 +4,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
|
||||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||||
@@ -20,15 +22,19 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// sendBeaconBlocksRequest sends a recent beacon blocks request to a peer to get
|
// sendBeaconBlocksRequest sends the `requests` beacon blocks by root requests to
|
||||||
// those corresponding blocks from that peer.
|
// the peer with the given `id`. For each received block, it inserts the block into the
|
||||||
|
// pending queue. Then, for each received blocks, it checks if all corresponding sidecars
|
||||||
|
// are stored, and, if not, sends the corresponding sidecar requests and stores the received sidecars.
|
||||||
|
// For sidecars, only blob sidecars will be requested to the peer with the given `id`.
|
||||||
|
// For other types of sidecars, the request will be sent to the best peers.
|
||||||
func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.BeaconBlockByRootsReq, id peer.ID) error {
|
func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.BeaconBlockByRootsReq, id peer.ID) error {
|
||||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
requestedRoots := make(map[[32]byte]struct{})
|
requestedRoots := make(map[[fieldparams.RootLength]byte]bool)
|
||||||
for _, root := range *requests {
|
for _, root := range *requests {
|
||||||
requestedRoots[root] = struct{}{}
|
requestedRoots[root] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
blks, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, id, requests, func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
blks, err := SendBeaconBlocksByRootRequest(ctx, s.cfg.clock, s.cfg.p2p, id, requests, func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||||
@@ -36,39 +42,124 @@ func (s *Service) sendBeaconBlocksRequest(ctx context.Context, requests *types.B
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, ok := requestedRoots[blkRoot]; !ok {
|
|
||||||
|
if ok := requestedRoots[blkRoot]; !ok {
|
||||||
return fmt.Errorf("received unexpected block with root %x", blkRoot)
|
return fmt.Errorf("received unexpected block with root %x", blkRoot)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.pendingQueueLock.Lock()
|
s.pendingQueueLock.Lock()
|
||||||
defer s.pendingQueueLock.Unlock()
|
defer s.pendingQueueLock.Unlock()
|
||||||
|
|
||||||
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blkRoot); err != nil {
|
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blkRoot); err != nil {
|
||||||
return err
|
return errors.Wrapf(err, "insert block to pending queue for block with root %x", blkRoot)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// The following part deals with sidecars.
|
||||||
|
postFuluBlocks := make([]blocks.ROBlock, 0, len(blks))
|
||||||
for _, blk := range blks {
|
for _, blk := range blks {
|
||||||
// Skip blocks before deneb because they have no blob.
|
blockVersion := blk.Version()
|
||||||
if blk.Version() < version.Deneb {
|
|
||||||
|
if blockVersion >= version.Fulu {
|
||||||
|
roBlock, err := blocks.NewROBlock(blk)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "new ro block")
|
||||||
|
}
|
||||||
|
|
||||||
|
postFuluBlocks = append(postFuluBlocks, roBlock)
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
blkRoot, err := blk.Block().HashTreeRoot()
|
|
||||||
if err != nil {
|
if blockVersion >= version.Deneb {
|
||||||
return err
|
if err := s.requestAndSaveMissingBlobSidecars(blk, id); err != nil {
|
||||||
}
|
return errors.Wrap(err, "request and save missing blob sidecars")
|
||||||
request, err := s.pendingBlobsRequestForBlock(blkRoot, blk)
|
}
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(request) == 0 {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := s.sendAndSaveBlobSidecars(ctx, request, id, blk); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := s.requestAndSaveMissingDataColumnSidecars(postFuluBlocks); err != nil {
|
||||||
|
return errors.Wrap(err, "request and save missing data columns")
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// requestAndSaveMissingDataColumns checks if the data columns are missing for the given block.
|
||||||
|
// If so, requests them and saves them to the storage.
|
||||||
|
func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock) error {
|
||||||
|
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||||
|
|
||||||
|
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "fetch custody group count from peer")
|
||||||
|
}
|
||||||
|
|
||||||
|
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||||
|
info, _, err := peerdas.Info(s.cfg.p2p.NodeID(), samplingSize)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "custody info")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch missing data column sidecars.
|
||||||
|
params := DataColumnSidecarsParams{
|
||||||
|
Ctx: s.ctx,
|
||||||
|
Tor: s.cfg.clock,
|
||||||
|
P2P: s.cfg.p2p,
|
||||||
|
CtxMap: s.ctxMap,
|
||||||
|
Storage: s.cfg.dataColumnStorage,
|
||||||
|
NewVerifier: s.newColumnsVerifier,
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecarsByRoot, err := FetchDataColumnSidecars(params, blks, info.CustodyColumns)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "fetch data column sidecars")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the sidecars to the storage.
|
||||||
|
count := 0
|
||||||
|
for _, sidecars := range sidecarsByRoot {
|
||||||
|
count += len(sidecars)
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||||
|
for _, sidecars := range sidecarsByRoot {
|
||||||
|
sidecarsToSave = append(sidecarsToSave, sidecars...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.cfg.dataColumnStorage.Save(sidecarsToSave); err != nil {
|
||||||
|
return errors.Wrap(err, "save")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) requestAndSaveMissingBlobSidecars(block interfaces.ReadOnlySignedBeaconBlock, peerID peer.ID) error {
|
||||||
|
blockRoot, err := block.Block().HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "hash tree root")
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := s.pendingBlobsRequestForBlock(blockRoot, block)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "pending blobs request for block")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(request) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.sendAndSaveBlobSidecars(s.ctx, request, peerID, block); err != nil {
|
||||||
|
return errors.Wrap(err, "send and save blob sidecars")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// beaconBlocksRootRPCHandler looks up the request blocks from the database from the given block roots.
|
// beaconBlocksRootRPCHandler looks up the request blocks from the database from the given block roots.
|
||||||
func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||||
|
|||||||
@@ -36,12 +36,12 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
|||||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||||
|
|
||||||
// Check if the message type is the one expected.
|
// Check if the message type is the one expected.
|
||||||
ref, ok := msg.(*types.DataColumnsByRootIdentifiers)
|
ref, ok := msg.(types.DataColumnsByRootIdentifiers)
|
||||||
if !ok {
|
if !ok {
|
||||||
return notDataColumnsByRootIdentifiersError
|
return notDataColumnsByRootIdentifiersError
|
||||||
}
|
}
|
||||||
|
|
||||||
requestedColumnIdents := *ref
|
requestedColumnIdents := ref
|
||||||
remotePeer := stream.Conn().RemotePeer()
|
remotePeer := stream.Conn().RemotePeer()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
|||||||
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
stream, err := localP2P.BHost.NewStream(t.Context(), remoteP2P.BHost.ID(), protocolID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
msg := &types.DataColumnsByRootIdentifiers{{Columns: []uint64{1, 2, 3}}}
|
msg := types.DataColumnsByRootIdentifiers{{Columns: []uint64{1, 2, 3}}}
|
||||||
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
require.Equal(t, true, localP2P.Peers().Scorers().BadResponsesScorer().Score(remoteP2P.PeerID()) >= 0)
|
||||||
|
|
||||||
err = service.dataColumnSidecarByRootRPCHandler(t.Context(), msg, stream)
|
err = service.dataColumnSidecarByRootRPCHandler(t.Context(), msg, stream)
|
||||||
@@ -169,7 +169,7 @@ func TestDataColumnSidecarsByRootRPCHandler(t *testing.T) {
|
|||||||
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
|
stream, err := localP2P.BHost.NewStream(ctx, remoteP2P.BHost.ID(), protocolID)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
msg := &types.DataColumnsByRootIdentifiers{
|
msg := types.DataColumnsByRootIdentifiers{
|
||||||
{
|
{
|
||||||
BlockRoot: root0[:],
|
BlockRoot: root0[:],
|
||||||
Columns: []uint64{1, 2, 3},
|
Columns: []uint64{1, 2, 3},
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||||
"github.com/libp2p/go-libp2p/core/network"
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
goPeer "github.com/libp2p/go-libp2p/core/peer"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@@ -404,11 +405,8 @@ func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncod
|
|||||||
// SendDataColumnSidecarsByRangeRequest sends a request for data column sidecars by range
|
// SendDataColumnSidecarsByRangeRequest sends a request for data column sidecars by range
|
||||||
// and returns the fetched data column sidecars.
|
// and returns the fetched data column sidecars.
|
||||||
func SendDataColumnSidecarsByRangeRequest(
|
func SendDataColumnSidecarsByRangeRequest(
|
||||||
ctx context.Context,
|
p DataColumnSidecarsParams,
|
||||||
tor blockchain.TemporalOracle,
|
|
||||||
p2pApi p2p.P2P,
|
|
||||||
pid peer.ID,
|
pid peer.ID,
|
||||||
ctxMap ContextByteVersions,
|
|
||||||
request *ethpb.DataColumnSidecarsByRangeRequest,
|
request *ethpb.DataColumnSidecarsByRangeRequest,
|
||||||
) ([]blocks.RODataColumn, error) {
|
) ([]blocks.RODataColumn, error) {
|
||||||
// Return early if nothing to request.
|
// Return early if nothing to request.
|
||||||
@@ -428,7 +426,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Build the topic.
|
// Build the topic.
|
||||||
currentSlot := tor.CurrentSlot()
|
currentSlot := p.Tor.CurrentSlot()
|
||||||
currentEpoch := slots.ToEpoch(currentSlot)
|
currentEpoch := slots.ToEpoch(currentSlot)
|
||||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRangeName, currentEpoch)
|
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRangeName, currentEpoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -453,7 +451,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Send the request.
|
// Send the request.
|
||||||
stream, err := p2pApi.Send(ctx, request, topic, pid)
|
stream, err := p.P2P.Send(p.Ctx, request, topic, pid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "p2p send")
|
return nil, errors.Wrap(err, "p2p send")
|
||||||
}
|
}
|
||||||
@@ -463,7 +461,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
|||||||
roDataColumns := make([]blocks.RODataColumn, 0, totalCount)
|
roDataColumns := make([]blocks.RODataColumn, 0, totalCount)
|
||||||
for range totalCount {
|
for range totalCount {
|
||||||
// Avoid reading extra chunks if the context is done.
|
// Avoid reading extra chunks if the context is done.
|
||||||
if err := ctx.Err(); err != nil {
|
if err := p.Ctx.Err(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -473,7 +471,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
roDataColumn, err := readChunkedDataColumnSidecar(
|
roDataColumn, err := readChunkedDataColumnSidecar(
|
||||||
stream, p2pApi, ctxMap,
|
stream, p.P2P, p.CtxMap,
|
||||||
validatorSlotWithinBounds,
|
validatorSlotWithinBounds,
|
||||||
isSidecarIndexRequested(request),
|
isSidecarIndexRequested(request),
|
||||||
)
|
)
|
||||||
@@ -492,7 +490,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||||
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
|
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
|
||||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", totalCount)
|
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", totalCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -539,22 +537,10 @@ func isSidecarIndexRequested(request *ethpb.DataColumnSidecarsByRangeRequest) Da
|
|||||||
|
|
||||||
// SendDataColumnSidecarsByRootRequest sends a request for data column sidecars by root
|
// SendDataColumnSidecarsByRootRequest sends a request for data column sidecars by root
|
||||||
// and returns the fetched data column sidecars.
|
// and returns the fetched data column sidecars.
|
||||||
func SendDataColumnSidecarsByRootRequest(
|
func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer.ID, identifiers p2ptypes.DataColumnsByRootIdentifiers) ([]blocks.RODataColumn, error) {
|
||||||
ctx context.Context,
|
|
||||||
tor blockchain.TemporalOracle,
|
|
||||||
p2pApi p2p.P2P,
|
|
||||||
pid peer.ID,
|
|
||||||
ctxMap ContextByteVersions,
|
|
||||||
request p2ptypes.DataColumnsByRootIdentifiers,
|
|
||||||
) ([]blocks.RODataColumn, error) {
|
|
||||||
// Return early if the request is nil.
|
|
||||||
if request == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute how many sidecars are requested.
|
// Compute how many sidecars are requested.
|
||||||
count := uint64(0)
|
count := uint64(0)
|
||||||
for _, identifier := range request {
|
for _, identifier := range identifiers {
|
||||||
count += uint64(len(identifier.Columns))
|
count += uint64(len(identifier.Columns))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -570,13 +556,15 @@ func SendDataColumnSidecarsByRootRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the topic for the request.
|
// Get the topic for the request.
|
||||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, slots.ToEpoch(tor.CurrentSlot()))
|
currentSlot := p.Tor.CurrentSlot()
|
||||||
|
currentEpoch := slots.ToEpoch(currentSlot)
|
||||||
|
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, currentEpoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "topic from message")
|
return nil, errors.Wrap(err, "topic from message")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send the request to the peer.
|
// Send the request to the peer.
|
||||||
stream, err := p2pApi.Send(ctx, request, topic, pid)
|
stream, err := p.P2P.Send(p.Ctx, identifiers, topic, peer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "p2p api send")
|
return nil, errors.Wrap(err, "p2p api send")
|
||||||
}
|
}
|
||||||
@@ -587,7 +575,7 @@ func SendDataColumnSidecarsByRootRequest(
|
|||||||
|
|
||||||
// Read the data column sidecars from the stream.
|
// Read the data column sidecars from the stream.
|
||||||
for range count {
|
for range count {
|
||||||
roDataColumn, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap, isSidecarIndexRootRequested(request))
|
roDataColumn, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap, isSidecarIndexRootRequested(identifiers))
|
||||||
if errors.Is(err, io.EOF) {
|
if errors.Is(err, io.EOF) {
|
||||||
return roDataColumns, nil
|
return roDataColumns, nil
|
||||||
}
|
}
|
||||||
@@ -603,7 +591,7 @@ func SendDataColumnSidecarsByRootRequest(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||||
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
|
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
|
||||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", count)
|
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", count)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -629,11 +617,11 @@ func isSidecarIndexRootRequested(request p2ptypes.DataColumnsByRootIdentifiers)
|
|||||||
indices, ok := columnsIndexFromRoot[root]
|
indices, ok := columnsIndexFromRoot[root]
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Errorf("root #%x returned by peer but not requested", root)
|
return errors.Errorf("root %#x returned by peer but not requested", root)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !indices[index] {
|
if !indices[index] {
|
||||||
return errors.Errorf("index %d for root #%x returned by peer but not requested", index, root)
|
return errors.Errorf("index %d for root %#x returned by peer but not requested", index, root)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -915,7 +915,7 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range nilTestCases {
|
for _, tc := range nilTestCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
|
actual, err := SendDataColumnSidecarsByRangeRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", tc.request)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.IsNil(t, actual)
|
require.IsNil(t, actual)
|
||||||
})
|
})
|
||||||
@@ -928,7 +928,7 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
|||||||
params.OverrideBeaconConfig(beaconConfig)
|
params.OverrideBeaconConfig(beaconConfig)
|
||||||
|
|
||||||
request := ðpb.DataColumnSidecarsByRangeRequest{Count: 1, Columns: []uint64{1, 2, 3}}
|
request := ðpb.DataColumnSidecarsByRangeRequest{Count: 1, Columns: []uint64{1, 2, 3}}
|
||||||
_, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
|
_, err := SendDataColumnSidecarsByRangeRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", request)
|
||||||
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -1040,7 +1040,14 @@ func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, requestSent)
|
parameters := DataColumnSidecarsParams{
|
||||||
|
Ctx: t.Context(),
|
||||||
|
Tor: clock,
|
||||||
|
P2P: p1,
|
||||||
|
CtxMap: ctxMap,
|
||||||
|
}
|
||||||
|
|
||||||
|
actual, err := SendDataColumnSidecarsByRangeRequest(parameters, p2.PeerID(), requestSent)
|
||||||
if tc.expectedError != nil {
|
if tc.expectedError != nil {
|
||||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||||
if util.WaitTimeout(&wg, time.Second) {
|
if util.WaitTimeout(&wg, time.Second) {
|
||||||
@@ -1208,7 +1215,7 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range nilTestCases {
|
for _, tc := range nilTestCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
|
actual, err := SendDataColumnSidecarsByRootRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", tc.request)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.IsNil(t, actual)
|
require.IsNil(t, actual)
|
||||||
})
|
})
|
||||||
@@ -1225,7 +1232,7 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
|||||||
{Columns: []uint64{4, 5, 6}},
|
{Columns: []uint64{4, 5, 6}},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
|
_, err := SendDataColumnSidecarsByRootRequest(DataColumnSidecarsParams{Ctx: t.Context()}, "", request)
|
||||||
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -1346,7 +1353,13 @@ func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), clock, p1, p2.PeerID(), ctxMap, sentRequest)
|
parameters := DataColumnSidecarsParams{
|
||||||
|
Ctx: t.Context(),
|
||||||
|
Tor: clock,
|
||||||
|
P2P: p1,
|
||||||
|
CtxMap: ctxMap,
|
||||||
|
}
|
||||||
|
actual, err := SendDataColumnSidecarsByRootRequest(parameters, p2.PeerID(), sentRequest)
|
||||||
if tc.expectedError != nil {
|
if tc.expectedError != nil {
|
||||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||||
if util.WaitTimeout(&wg, time.Second) {
|
if util.WaitTimeout(&wg, time.Second) {
|
||||||
|
|||||||
@@ -38,7 +38,10 @@ func (s *Service) maintainPeerStatuses() {
|
|||||||
go func(id peer.ID) {
|
go func(id peer.ID) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
log := log.WithField("peer", id)
|
log := log.WithFields(logrus.Fields{
|
||||||
|
"peer": id,
|
||||||
|
"agent": agentString(id, s.cfg.p2p.Host()),
|
||||||
|
})
|
||||||
|
|
||||||
// If our peer status has not been updated correctly we disconnect over here
|
// If our peer status has not been updated correctly we disconnect over here
|
||||||
// and set the connection state over here instead.
|
// and set the connection state over here instead.
|
||||||
|
|||||||
@@ -17,9 +17,12 @@ var (
|
|||||||
|
|
||||||
// BlobAlignsWithBlock verifies if the blob aligns with the block.
|
// BlobAlignsWithBlock verifies if the blob aligns with the block.
|
||||||
func BlobAlignsWithBlock(blob blocks.ROBlob, block blocks.ROBlock) error {
|
func BlobAlignsWithBlock(blob blocks.ROBlob, block blocks.ROBlock) error {
|
||||||
if block.Version() < version.Deneb {
|
blockVersion := block.Version()
|
||||||
|
|
||||||
|
if blockVersion < version.Deneb || blockVersion >= version.Fulu {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blob.Slot())
|
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blob.Slot())
|
||||||
if blob.Index >= uint64(maxBlobsPerBlock) {
|
if blob.Index >= uint64(maxBlobsPerBlock) {
|
||||||
return errors.Wrapf(ErrIncorrectBlobIndex, "index %d exceeds MAX_BLOBS_PER_BLOCK %d", blob.Index, maxBlobsPerBlock)
|
return errors.Wrapf(ErrIncorrectBlobIndex, "index %d exceeds MAX_BLOBS_PER_BLOCK %d", blob.Index, maxBlobsPerBlock)
|
||||||
|
|||||||
@@ -47,6 +47,15 @@ var (
|
|||||||
RequireSidecarKzgProofVerified,
|
RequireSidecarKzgProofVerified,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByRootRequestDataColumnSidecarRequirements defines the set of requirements that DataColumnSidecars received
|
||||||
|
// via the by root request must satisfy in order to upgrade an RODataColumn to a VerifiedRODataColumn.
|
||||||
|
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
||||||
|
ByRootRequestDataColumnSidecarRequirements = []Requirement{
|
||||||
|
RequireValidFields,
|
||||||
|
RequireSidecarInclusionProven,
|
||||||
|
RequireSidecarKzgProofVerified,
|
||||||
|
}
|
||||||
|
|
||||||
// SpectestDataColumnSidecarRequirements is used by the forkchoice spectests when verifying data columns used in the on_block tests.
|
// SpectestDataColumnSidecarRequirements is used by the forkchoice spectests when verifying data columns used in the on_block tests.
|
||||||
SpectestDataColumnSidecarRequirements = requirementList(GossipDataColumnSidecarRequirements).excluding(
|
SpectestDataColumnSidecarRequirements = requirementList(GossipDataColumnSidecarRequirements).excluding(
|
||||||
RequireSidecarParentSeen, RequireSidecarParentValid)
|
RequireSidecarParentSeen, RequireSidecarParentValid)
|
||||||
|
|||||||
2
changelog/manu-peerdas-sync.md
Normal file
2
changelog/manu-peerdas-sync.md
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
### Added
|
||||||
|
- Data columns syncing for Fusaka.
|
||||||
@@ -42,7 +42,7 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
|||||||
cfg := &GlobalFlags{}
|
cfg := &GlobalFlags{}
|
||||||
|
|
||||||
if ctx.Bool(SubscribeToAllSubnets.Name) {
|
if ctx.Bool(SubscribeToAllSubnets.Name) {
|
||||||
log.Warn("Subscribing to All Attestation Subnets")
|
log.Warning("Subscribing to all attestation subnets")
|
||||||
cfg.SubscribeToAllSubnets = true
|
cfg.SubscribeToAllSubnets = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -59,6 +59,8 @@ var appFlags = []cli.Flag{
|
|||||||
flags.BlockBatchLimitBurstFactor,
|
flags.BlockBatchLimitBurstFactor,
|
||||||
flags.BlobBatchLimit,
|
flags.BlobBatchLimit,
|
||||||
flags.BlobBatchLimitBurstFactor,
|
flags.BlobBatchLimitBurstFactor,
|
||||||
|
flags.DataColumnBatchLimit,
|
||||||
|
flags.DataColumnBatchLimitBurstFactor,
|
||||||
flags.InteropMockEth1DataVotesFlag,
|
flags.InteropMockEth1DataVotesFlag,
|
||||||
flags.SlotsPerArchivedPoint,
|
flags.SlotsPerArchivedPoint,
|
||||||
flags.DisableDebugRPCEndpoints,
|
flags.DisableDebugRPCEndpoints,
|
||||||
@@ -84,6 +86,7 @@ var appFlags = []cli.Flag{
|
|||||||
flags.BeaconDBPruning,
|
flags.BeaconDBPruning,
|
||||||
flags.PrunerRetentionEpochs,
|
flags.PrunerRetentionEpochs,
|
||||||
flags.EnableBuilderSSZ,
|
flags.EnableBuilderSSZ,
|
||||||
|
flags.SubscribeAllDataSubnets,
|
||||||
cmd.MinimalConfigFlag,
|
cmd.MinimalConfigFlag,
|
||||||
cmd.E2EConfigFlag,
|
cmd.E2EConfigFlag,
|
||||||
cmd.RPCMaxPageSizeFlag,
|
cmd.RPCMaxPageSizeFlag,
|
||||||
|
|||||||
@@ -98,12 +98,15 @@ var appHelpFlagGroups = []flagGroup{
|
|||||||
cmd.StaticPeers,
|
cmd.StaticPeers,
|
||||||
flags.BlobBatchLimit,
|
flags.BlobBatchLimit,
|
||||||
flags.BlobBatchLimitBurstFactor,
|
flags.BlobBatchLimitBurstFactor,
|
||||||
|
flags.DataColumnBatchLimit,
|
||||||
|
flags.DataColumnBatchLimitBurstFactor,
|
||||||
flags.BlockBatchLimit,
|
flags.BlockBatchLimit,
|
||||||
flags.BlockBatchLimitBurstFactor,
|
flags.BlockBatchLimitBurstFactor,
|
||||||
flags.MaxConcurrentDials,
|
flags.MaxConcurrentDials,
|
||||||
flags.MinPeersPerSubnet,
|
flags.MinPeersPerSubnet,
|
||||||
flags.MinSyncPeers,
|
flags.MinSyncPeers,
|
||||||
flags.SubscribeToAllSubnets,
|
flags.SubscribeToAllSubnets,
|
||||||
|
flags.SubscribeAllDataSubnets,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{ // Flags relevant to storing data on disk and configuring the beacon chain database.
|
{ // Flags relevant to storing data on disk and configuring the beacon chain database.
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ go_library(
|
|||||||
"roblob.go",
|
"roblob.go",
|
||||||
"roblock.go",
|
"roblock.go",
|
||||||
"rodatacolumn.go",
|
"rodatacolumn.go",
|
||||||
"rosidecar.go",
|
|
||||||
"setters.go",
|
"setters.go",
|
||||||
"types.go",
|
"types.go",
|
||||||
],
|
],
|
||||||
@@ -54,7 +53,6 @@ go_test(
|
|||||||
"roblob_test.go",
|
"roblob_test.go",
|
||||||
"roblock_test.go",
|
"roblock_test.go",
|
||||||
"rodatacolumn_test.go",
|
"rodatacolumn_test.go",
|
||||||
"rosidecar_test.go",
|
|
||||||
],
|
],
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
@@ -74,6 +72,5 @@ go_test(
|
|||||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||||
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
|
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
|
||||||
"@com_github_stretchr_testify//require:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package blocks
|
package blocks
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
|
consensus_types "github.com/OffchainLabs/prysm/v6/consensus-types"
|
||||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||||
@@ -398,7 +400,7 @@ func (b *BeaconBlock) Proto() (proto.Message, error) { // nolint:gocognit
|
|||||||
Body: body,
|
Body: body,
|
||||||
}, nil
|
}, nil
|
||||||
default:
|
default:
|
||||||
return nil, errors.New("unsupported beacon block version")
|
return nil, fmt.Errorf("unsupported beacon block version: %s", version.String(b.version))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -96,16 +96,17 @@ func (s ROBlockSlice) Len() int {
|
|||||||
return len(s)
|
return len(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockWithROBlobs is a wrapper that collects the block and blob values together.
|
// BlockWithROSidecars is a wrapper that collects the block and blob values together.
|
||||||
// This is helpful because these values are collated from separate RPC requests.
|
// This is helpful because these values are collated from separate RPC requests.
|
||||||
type BlockWithROBlobs struct {
|
type BlockWithROSidecars struct {
|
||||||
Block ROBlock
|
Block ROBlock
|
||||||
Blobs []ROBlob
|
Blobs []ROBlob
|
||||||
|
Columns []VerifiedRODataColumn
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockWithROBlobsSlice gives convenient access to getting a slice of just the ROBlocks,
|
// BlockWithROBlobsSlice gives convenient access to getting a slice of just the ROBlocks,
|
||||||
// and defines sorting helpers.
|
// and defines sorting helpers.
|
||||||
type BlockWithROBlobsSlice []BlockWithROBlobs
|
type BlockWithROBlobsSlice []BlockWithROSidecars
|
||||||
|
|
||||||
func (s BlockWithROBlobsSlice) ROBlocks() []ROBlock {
|
func (s BlockWithROBlobsSlice) ROBlocks() []ROBlock {
|
||||||
r := make([]ROBlock, len(s))
|
r := make([]ROBlock, len(s))
|
||||||
|
|||||||
@@ -66,16 +66,16 @@ func (dc *RODataColumn) Slot() primitives.Slot {
|
|||||||
return dc.SignedBlockHeader.Header.Slot
|
return dc.SignedBlockHeader.Header.Slot
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParentRoot returns the parent root of the data column sidecar.
|
|
||||||
func (dc *RODataColumn) ParentRoot() [fieldparams.RootLength]byte {
|
|
||||||
return bytesutil.ToBytes32(dc.SignedBlockHeader.Header.ParentRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProposerIndex returns the proposer index of the data column sidecar.
|
// ProposerIndex returns the proposer index of the data column sidecar.
|
||||||
func (dc *RODataColumn) ProposerIndex() primitives.ValidatorIndex {
|
func (dc *RODataColumn) ProposerIndex() primitives.ValidatorIndex {
|
||||||
return dc.SignedBlockHeader.Header.ProposerIndex
|
return dc.SignedBlockHeader.Header.ProposerIndex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParentRoot returns the parent root of the data column sidecar.
|
||||||
|
func (dc *RODataColumn) ParentRoot() [fieldparams.RootLength]byte {
|
||||||
|
return bytesutil.ToBytes32(dc.SignedBlockHeader.Header.ParentRoot)
|
||||||
|
}
|
||||||
|
|
||||||
// VerifiedRODataColumn represents an RODataColumn that has undergone full verification (eg block sig, inclusion proof, commitment check).
|
// VerifiedRODataColumn represents an RODataColumn that has undergone full verification (eg block sig, inclusion proof, commitment check).
|
||||||
type VerifiedRODataColumn struct {
|
type VerifiedRODataColumn struct {
|
||||||
RODataColumn
|
RODataColumn
|
||||||
|
|||||||
@@ -1,96 +0,0 @@
|
|||||||
package blocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ROSidecar represents a read-only sidecar with its block root.
|
|
||||||
type ROSidecar struct {
|
|
||||||
blob *ROBlob
|
|
||||||
dataColumn *RODataColumn
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errBlobNeeded = errors.New("blob sidecar needed")
|
|
||||||
errDataColumnNeeded = errors.New("data column sidecar needed")
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewSidecarFromBlobSidecar creates a new read-only (generic) sidecar from a read-only blob sidecar.
|
|
||||||
func NewSidecarFromBlobSidecar(blob ROBlob) ROSidecar {
|
|
||||||
return ROSidecar{blob: &blob}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSidecarFromDataColumnSidecar creates a new read-only (generic) sidecar from a read-only data column sidecar.
|
|
||||||
func NewSidecarFromDataColumnSidecar(dataColumn RODataColumn) ROSidecar {
|
|
||||||
return ROSidecar{dataColumn: &dataColumn}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSidecarsFromBlobSidecars creates a new slice of read-only (generic) sidecars from a slice of read-only blobs sidecars.
|
|
||||||
func NewSidecarsFromBlobSidecars(blobSidecars []ROBlob) []ROSidecar {
|
|
||||||
sidecars := make([]ROSidecar, 0, len(blobSidecars))
|
|
||||||
for _, blob := range blobSidecars {
|
|
||||||
blobSidecar := ROSidecar{blob: &blob} // #nosec G601
|
|
||||||
sidecars = append(sidecars, blobSidecar)
|
|
||||||
}
|
|
||||||
|
|
||||||
return sidecars
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSidecarsFromDataColumnSidecars creates a new slice of read-only (generic) sidecars from a slice of read-only data column sidecars.
|
|
||||||
func NewSidecarsFromDataColumnSidecars(dataColumnSidecars []RODataColumn) []ROSidecar {
|
|
||||||
sidecars := make([]ROSidecar, 0, len(dataColumnSidecars))
|
|
||||||
for _, dataColumn := range dataColumnSidecars {
|
|
||||||
dataColumnSidecar := ROSidecar{dataColumn: &dataColumn} // #nosec G601
|
|
||||||
sidecars = append(sidecars, dataColumnSidecar)
|
|
||||||
}
|
|
||||||
|
|
||||||
return sidecars
|
|
||||||
}
|
|
||||||
|
|
||||||
// Blob returns the blob sidecar.
|
|
||||||
func (sc *ROSidecar) Blob() (ROBlob, error) {
|
|
||||||
if sc.blob == nil {
|
|
||||||
return ROBlob{}, errBlobNeeded
|
|
||||||
}
|
|
||||||
|
|
||||||
return *sc.blob, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataColumn returns the data column sidecar.
|
|
||||||
func (sc *ROSidecar) DataColumn() (RODataColumn, error) {
|
|
||||||
if sc.dataColumn == nil {
|
|
||||||
return RODataColumn{}, errDataColumnNeeded
|
|
||||||
}
|
|
||||||
|
|
||||||
return *sc.dataColumn, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobSidecarsFromSidecars creates a new slice of read-only blobs sidecars from a slice of read-only (generic) sidecars.
|
|
||||||
func BlobSidecarsFromSidecars(sidecars []ROSidecar) ([]ROBlob, error) {
|
|
||||||
blobSidecars := make([]ROBlob, 0, len(sidecars))
|
|
||||||
for _, sidecar := range sidecars {
|
|
||||||
blob, err := sidecar.Blob()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "blob")
|
|
||||||
}
|
|
||||||
|
|
||||||
blobSidecars = append(blobSidecars, blob)
|
|
||||||
}
|
|
||||||
|
|
||||||
return blobSidecars, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataColumnSidecarsFromSidecars creates a new slice of read-only data column sidecars from a slice of read-only (generic) sidecars.
|
|
||||||
func DataColumnSidecarsFromSidecars(sidecars []ROSidecar) ([]RODataColumn, error) {
|
|
||||||
dataColumnSidecars := make([]RODataColumn, 0, len(sidecars))
|
|
||||||
for _, sidecar := range sidecars {
|
|
||||||
dataColumn, err := sidecar.DataColumn()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "data column")
|
|
||||||
}
|
|
||||||
|
|
||||||
dataColumnSidecars = append(dataColumnSidecars, dataColumn)
|
|
||||||
}
|
|
||||||
|
|
||||||
return dataColumnSidecars, nil
|
|
||||||
}
|
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
package blocks
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewSidecarFromBlobSidecar(t *testing.T) {
|
|
||||||
blob := ROBlob{}
|
|
||||||
sidecar := NewSidecarFromBlobSidecar(blob)
|
|
||||||
|
|
||||||
// Check that the blob is set
|
|
||||||
retrievedBlob, err := sidecar.Blob()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, blob, retrievedBlob)
|
|
||||||
|
|
||||||
// Check that data column is not set
|
|
||||||
_, err = sidecar.DataColumn()
|
|
||||||
require.ErrorIs(t, err, errDataColumnNeeded)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewSidecarFromDataColumnSidecar(t *testing.T) {
|
|
||||||
dataColumn := RODataColumn{}
|
|
||||||
sidecar := NewSidecarFromDataColumnSidecar(dataColumn)
|
|
||||||
|
|
||||||
// Check that the data column is set
|
|
||||||
retrievedDataColumn, err := sidecar.DataColumn()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, dataColumn, retrievedDataColumn)
|
|
||||||
|
|
||||||
// Check that blob is not set
|
|
||||||
_, err = sidecar.Blob()
|
|
||||||
require.ErrorIs(t, err, errBlobNeeded)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewSidecarsFromBlobSidecars(t *testing.T) {
|
|
||||||
blobSidecars := []ROBlob{{}, {}}
|
|
||||||
sidecars := NewSidecarsFromBlobSidecars(blobSidecars)
|
|
||||||
|
|
||||||
require.Equal(t, len(blobSidecars), len(sidecars))
|
|
||||||
|
|
||||||
for i, sidecar := range sidecars {
|
|
||||||
retrievedBlob, err := sidecar.Blob()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, blobSidecars[i], retrievedBlob)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewSidecarsFromDataColumnSidecars(t *testing.T) {
|
|
||||||
dataColumnSidecars := []RODataColumn{{}, {}}
|
|
||||||
sidecars := NewSidecarsFromDataColumnSidecars(dataColumnSidecars)
|
|
||||||
|
|
||||||
require.Equal(t, len(dataColumnSidecars), len(sidecars))
|
|
||||||
|
|
||||||
for i, sidecar := range sidecars {
|
|
||||||
retrievedDataColumn, err := sidecar.DataColumn()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, dataColumnSidecars[i], retrievedDataColumn)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBlobSidecarsFromSidecars(t *testing.T) {
|
|
||||||
// Create sidecars with blobs
|
|
||||||
blobSidecars := []ROBlob{{}, {}}
|
|
||||||
sidecars := NewSidecarsFromBlobSidecars(blobSidecars)
|
|
||||||
|
|
||||||
// Convert back to blob sidecars
|
|
||||||
retrievedBlobSidecars, err := BlobSidecarsFromSidecars(sidecars)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, len(blobSidecars), len(retrievedBlobSidecars))
|
|
||||||
|
|
||||||
for i, blob := range retrievedBlobSidecars {
|
|
||||||
require.Equal(t, blobSidecars[i], blob)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with a mix of sidecar types
|
|
||||||
mixedSidecars := []ROSidecar{
|
|
||||||
NewSidecarFromBlobSidecar(ROBlob{}),
|
|
||||||
NewSidecarFromDataColumnSidecar(RODataColumn{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = BlobSidecarsFromSidecars(mixedSidecars)
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDataColumnSidecarsFromSidecars(t *testing.T) {
|
|
||||||
// Create sidecars with data columns
|
|
||||||
dataColumnSidecars := []RODataColumn{{}, {}}
|
|
||||||
sidecars := NewSidecarsFromDataColumnSidecars(dataColumnSidecars)
|
|
||||||
|
|
||||||
// Convert back to data column sidecars
|
|
||||||
retrievedDataColumnSidecars, err := DataColumnSidecarsFromSidecars(sidecars)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, len(dataColumnSidecars), len(retrievedDataColumnSidecars))
|
|
||||||
|
|
||||||
for i, dataColumn := range retrievedDataColumnSidecars {
|
|
||||||
require.Equal(t, dataColumnSidecars[i], dataColumn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with a mix of sidecar types
|
|
||||||
mixedSidecars := []ROSidecar{
|
|
||||||
NewSidecarFromDataColumnSidecar(RODataColumn{}),
|
|
||||||
NewSidecarFromBlobSidecar(ROBlob{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = DataColumnSidecarsFromSidecars(mixedSidecars)
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
@@ -54,6 +54,12 @@ func WithParentRoot(root [fieldparams.RootLength]byte) FuluBlockGeneratorOption
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithSlot(slot primitives.Slot) FuluBlockGeneratorOption {
|
||||||
|
return func(g *fuluBlockGenerator) {
|
||||||
|
g.slot = slot
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func GenerateTestFuluBlockWithSidecars(t *testing.T, blobCount int, options ...FuluBlockGeneratorOption) (blocks.ROBlock, []blocks.RODataColumn, []blocks.VerifiedRODataColumn) {
|
func GenerateTestFuluBlockWithSidecars(t *testing.T, blobCount int, options ...FuluBlockGeneratorOption) (blocks.ROBlock, []blocks.RODataColumn, []blocks.VerifiedRODataColumn) {
|
||||||
generator := &fuluBlockGenerator{blobCount: blobCount}
|
generator := &fuluBlockGenerator{blobCount: blobCount}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user