mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
3 Commits
peerdas-pe
...
state-diff
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3aa7998e32 | ||
|
|
f71de7ed1f | ||
|
|
cf76ea4852 |
@@ -12,6 +12,47 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
// ConvertToAltair converts a Phase 0 beacon state to an Altair beacon state.
|
||||
func ConvertToAltair(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: make([]byte, numValidators),
|
||||
CurrentEpochParticipation: make([]byte, numValidators),
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: make([]uint64, numValidators),
|
||||
}
|
||||
|
||||
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newState, nil
|
||||
}
|
||||
|
||||
// UpgradeToAltair updates input state to return the version Altair state.
|
||||
//
|
||||
// Spec code:
|
||||
@@ -64,39 +105,7 @@ import (
|
||||
// post.next_sync_committee = get_next_sync_committee(post)
|
||||
// return post
|
||||
func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: make([]byte, numValidators),
|
||||
CurrentEpochParticipation: make([]byte, numValidators),
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: make([]uint64, numValidators),
|
||||
}
|
||||
|
||||
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
|
||||
newState, err := ConvertToAltair(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -15,6 +15,129 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ConvertToElectra converts a Deneb beacon state to an Electra beacon state.
|
||||
func ConvertToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := beaconState.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := beaconState.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := beaconState.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobGasUsed, err := payloadHeader.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: beaconState.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: time.CurrentEpoch(beaconState),
|
||||
},
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: beaconState.HistoricalRoots(),
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
Validators: beaconState.Validators(),
|
||||
Balances: beaconState.Balances(),
|
||||
RandaoMixes: beaconState.RandaoMixes(),
|
||||
Slashings: beaconState.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: beaconState.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
}
|
||||
|
||||
// need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
|
||||
}
|
||||
return post, nil
|
||||
}
|
||||
|
||||
// UpgradeToElectra updates inputs a generic state to return the version Electra state.
|
||||
//
|
||||
// nolint:dupword
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/prysmctl/testnet:__pkg__",
|
||||
"//consensus-types/hdiff:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//validator/client:__pkg__",
|
||||
],
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
"info.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"peer_sampling.go",
|
||||
"reconstruction.go",
|
||||
"validator.go",
|
||||
"verification.go",
|
||||
@@ -45,7 +44,6 @@ go_test(
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"peer_sampling_test.go",
|
||||
"reconstruction_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
)
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
@@ -25,6 +25,9 @@ go_library(
|
||||
"migration_state_validators.go",
|
||||
"schema.go",
|
||||
"state.go",
|
||||
"state_diff.go",
|
||||
"state_diff_cache.go",
|
||||
"state_diff_helpers.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
"utils.go",
|
||||
@@ -44,6 +47,7 @@ go_library(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/hdiff:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -51,6 +55,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
@@ -94,6 +99,7 @@ go_test(
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"state_diff_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
@@ -116,6 +122,7 @@ go_test(
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -91,6 +91,7 @@ type Store struct {
|
||||
blockCache *ristretto.Cache[string, interfaces.ReadOnlySignedBeaconBlock]
|
||||
validatorEntryCache *ristretto.Cache[[]byte, *ethpb.Validator]
|
||||
stateSummaryCache *stateSummaryCache
|
||||
stateDiffCache *stateDiffCache
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
@@ -112,6 +113,7 @@ var Buckets = [][]byte{
|
||||
lightClientUpdatesBucket,
|
||||
lightClientBootstrapBucket,
|
||||
lightClientSyncCommitteeBucket,
|
||||
stateDiffBucket,
|
||||
// Indices buckets.
|
||||
blockSlotIndicesBucket,
|
||||
stateSlotIndicesBucket,
|
||||
@@ -182,6 +184,7 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
blockCache: blockCache,
|
||||
validatorEntryCache: validatorCache,
|
||||
stateSummaryCache: newStateSummaryCache(),
|
||||
stateDiffCache: nil,
|
||||
ctx: ctx,
|
||||
}
|
||||
for _, o := range opts {
|
||||
@@ -200,6 +203,14 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if features.Get().EnableStateDiff {
|
||||
sdCache, err := newStateDiffCache(kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kv.stateDiffCache = sdCache
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ var (
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
stateDiffBucket = []byte("state-diff")
|
||||
|
||||
// Light Client Updates Bucket
|
||||
lightClientUpdatesBucket = []byte("light-client-updates")
|
||||
|
||||
232
beacon-chain/db/kv/state_diff.go
Normal file
232
beacon-chain/db/kv/state_diff.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/hdiff"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
stateSuffix = "_s"
|
||||
validatorSuffix = "_v"
|
||||
balancesSuffix = "_b"
|
||||
)
|
||||
|
||||
/*
|
||||
We use a level-based approach to save state diffs. The levels are 0-6, where each level corresponds to an exponent of 2 (exponents[lvl]).
|
||||
The data at level 0 is saved every 2**exponent[0] slots and always contains a full state snapshot that is used as a base for the delta saved at other levels.
|
||||
*/
|
||||
|
||||
// saveStateByDiff takes a state and decides between saving a full state snapshot or a diff.
|
||||
func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.saveStateByDiff")
|
||||
defer span.End()
|
||||
|
||||
if st == nil {
|
||||
return errors.New("state is nil")
|
||||
}
|
||||
|
||||
slot := st.Slot()
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
// Find the level to save the state.
|
||||
lvl := computeLevel(offset, slot)
|
||||
if lvl == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save full state if level is 0.
|
||||
if lvl == 0 {
|
||||
return s.saveFullSnapshot(st)
|
||||
}
|
||||
|
||||
// Get anchor state to compute the diff from.
|
||||
anchorState, err := s.getAnchorState(offset, lvl, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.saveHdiff(lvl, anchorState, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// stateByDiff retrieves the full state for a given slot.
|
||||
func (s *Store) stateByDiff(ctx context.Context, slot primitives.Slot) (state.BeaconState, error) {
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return nil, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
snapshot, diffChain, err := s.getBaseAndDiffChain(offset, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, diff := range diffChain {
|
||||
snapshot, err = hdiff.ApplyDiff(ctx, snapshot, diff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// SaveHdiff computes the diff between the anchor state and the current state and saves it to the database.
|
||||
func (s *Store) saveHdiff(lvl int, anchor, st state.ReadOnlyBeaconState) error {
|
||||
slot := uint64(st.Slot())
|
||||
key := makeKey(lvl, slot)
|
||||
|
||||
diff, err := hdiff.Diff(anchor, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, stateSuffix...)
|
||||
if err := bucket.Put(buf, diff.StateDiff); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = append(key, validatorSuffix...)
|
||||
if err := bucket.Put(buf, diff.ValidatorDiffs); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = append(key, balancesSuffix...)
|
||||
if err := bucket.Put(buf, diff.BalancesDiff); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the full state to the cache (if not the last level).
|
||||
if lvl != len(params.StateHierarchyExponents())-1 {
|
||||
err = s.stateDiffCache.setAnchor(lvl, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveFullSnapshot saves the full level 0 state snapshot to the database.
|
||||
func (s *Store) saveFullSnapshot(st state.ReadOnlyBeaconState) error {
|
||||
slot := uint64(st.Slot())
|
||||
key := makeKey(0, slot)
|
||||
stateBytes, err := st.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// add version key to value
|
||||
enc, err := addKey(st.Version(), stateBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
if err := bucket.Put(key, enc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Save the full state to the cache, and invalidate other levels.
|
||||
s.stateDiffCache.clearAnchors()
|
||||
err = s.stateDiffCache.setAnchor(0, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) getDiff(lvl int, slot uint64) (hdiff.HdiffBytes, error) {
|
||||
key := makeKey(lvl, slot)
|
||||
var stateDiff []byte
|
||||
var validatorDiff []byte
|
||||
var balancesDiff []byte
|
||||
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, stateSuffix...)
|
||||
stateDiff = bucket.Get(buf)
|
||||
if stateDiff == nil {
|
||||
return errors.New("state diff not found")
|
||||
}
|
||||
buf = append(key, validatorSuffix...)
|
||||
validatorDiff = bucket.Get(buf)
|
||||
if validatorDiff == nil {
|
||||
return errors.New("validator diff not found")
|
||||
}
|
||||
buf = append(key, balancesSuffix...)
|
||||
balancesDiff = bucket.Get(buf)
|
||||
if balancesDiff == nil {
|
||||
return errors.New("balances diff not found")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return hdiff.HdiffBytes{}, err
|
||||
}
|
||||
|
||||
return hdiff.HdiffBytes{
|
||||
StateDiff: stateDiff,
|
||||
ValidatorDiffs: validatorDiff,
|
||||
BalancesDiff: balancesDiff,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Store) getFullSnapshot(slot uint64) (state.BeaconState, error) {
|
||||
key := makeKey(0, slot)
|
||||
var enc []byte
|
||||
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
enc = bucket.Get(key)
|
||||
if enc == nil {
|
||||
return errors.New("state not found")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.decodeStateSnapshot(enc)
|
||||
}
|
||||
77
beacon-chain/db/kv/state_diff_cache.go
Normal file
77
beacon-chain/db/kv/state_diff_cache.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type stateDiffCache struct {
|
||||
sync.RWMutex
|
||||
anchors []state.ReadOnlyBeaconState
|
||||
offset uint64
|
||||
}
|
||||
|
||||
func newStateDiffCache(s *Store) (*stateDiffCache, error) {
|
||||
var offset uint64
|
||||
|
||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get([]byte("offset"))
|
||||
if offsetBytes == nil {
|
||||
return errors.New("state diff cache: offset not found")
|
||||
}
|
||||
offset = binary.LittleEndian.Uint64(offsetBytes)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stateDiffCache{
|
||||
anchors: make([]state.ReadOnlyBeaconState, len(params.StateHierarchyExponents())),
|
||||
offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) getAnchor(level int) state.ReadOnlyBeaconState {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.anchors[level]
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) setAnchor(level int, anchor state.ReadOnlyBeaconState) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if level >= len(c.anchors) || level < 0 {
|
||||
return errors.New("state diff cache: anchor level out of range")
|
||||
}
|
||||
c.anchors[level] = anchor
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) getOffset() uint64 {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.offset
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) setOffset(offset uint64) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.offset = offset
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) clearAnchors() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.anchors = make([]state.ReadOnlyBeaconState, len(params.StateHierarchyExponents()))
|
||||
}
|
||||
234
beacon-chain/db/kv/state_diff_helpers.go
Normal file
234
beacon-chain/db/kv/state_diff_helpers.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/hdiff"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
offsetKey = []byte("offset")
|
||||
ErrSlotBeforeOffset = errors.New("slot is before root offset")
|
||||
)
|
||||
|
||||
func makeKey(level int, slot uint64) []byte {
|
||||
buf := make([]byte, 16)
|
||||
buf[0] = byte(level)
|
||||
binary.LittleEndian.PutUint64(buf[1:], slot)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (s *Store) getAnchorState(offset uint64, lvl int, slot primitives.Slot) (anchor state.ReadOnlyBeaconState, err error) {
|
||||
if lvl <= 0 || lvl >= len(params.StateHierarchyExponents()) {
|
||||
return nil, errors.New("invalid value for level")
|
||||
}
|
||||
|
||||
relSlot := uint64(slot) - offset
|
||||
prevExp := params.StateHierarchyExponents()[lvl-1]
|
||||
span := math.PowerOf2(prevExp)
|
||||
anchorSlot := primitives.Slot((relSlot / span * span) + offset)
|
||||
|
||||
// anchorLvl can be [0, lvl-1]
|
||||
anchorLvl := computeLevel(offset, anchorSlot)
|
||||
if anchorLvl == -1 {
|
||||
return nil, errors.New("could not compute anchor level")
|
||||
}
|
||||
|
||||
// Check if we have the anchor in cache.
|
||||
anchor = s.stateDiffCache.getAnchor(anchorLvl)
|
||||
if anchor != nil {
|
||||
return anchor, nil
|
||||
}
|
||||
|
||||
// If not, load it from the database.
|
||||
anchor, err = s.stateByDiff(context.Background(), anchorSlot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save it in the cache.
|
||||
err = s.stateDiffCache.setAnchor(anchorLvl, anchor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return anchor, nil
|
||||
}
|
||||
|
||||
// computeLevel computes the level in the diff tree. Returns -1 in case slot should not be in tree.
|
||||
func computeLevel(offset uint64, slot primitives.Slot) int {
|
||||
rel := uint64(slot) - offset
|
||||
for i, exp := range params.StateHierarchyExponents() {
|
||||
span := math.PowerOf2(exp)
|
||||
if rel%span == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
// If rel isn’t on any of the boundaries, we should ignore saving it.
|
||||
return -1
|
||||
}
|
||||
|
||||
func (s *Store) setOffset(slot primitives.Slot) error {
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get(offsetKey)
|
||||
if offsetBytes != nil {
|
||||
return fmt.Errorf("offset already set to %d", binary.LittleEndian.Uint64(offsetBytes))
|
||||
}
|
||||
|
||||
offsetBytes = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
|
||||
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the offset in the cache.
|
||||
s.stateDiffCache.setOffset(uint64(slot))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) getOffset() uint64 {
|
||||
return s.stateDiffCache.getOffset()
|
||||
}
|
||||
|
||||
func keyForSnapshot(v int) []byte {
|
||||
switch v {
|
||||
case version.Fulu:
|
||||
return fuluKey
|
||||
case version.Electra:
|
||||
return ElectraKey
|
||||
case version.Deneb:
|
||||
return denebKey
|
||||
case version.Capella:
|
||||
return capellaKey
|
||||
case version.Bellatrix:
|
||||
return bellatrixKey
|
||||
case version.Altair:
|
||||
return altairKey
|
||||
default:
|
||||
// Phase0
|
||||
return []byte{}
|
||||
}
|
||||
}
|
||||
|
||||
func addKey(v int, bytes []byte) ([]byte, error) {
|
||||
key := keyForSnapshot(v)
|
||||
enc := make([]byte, len(key)+len(bytes))
|
||||
copy(enc, key)
|
||||
copy(enc[len(key):], bytes)
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
func (s *Store) decodeStateSnapshot(enc []byte) (state.BeaconState, error) {
|
||||
switch {
|
||||
case hasFuluKey(enc):
|
||||
var fuluState ethpb.BeaconStateFulu
|
||||
if err := fuluState.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeFulu(&fuluState)
|
||||
case HasElectraKey(enc):
|
||||
var electraState ethpb.BeaconStateElectra
|
||||
if err := electraState.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeElectra(&electraState)
|
||||
case hasDenebKey(enc):
|
||||
var denebState ethpb.BeaconStateDeneb
|
||||
if err := denebState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeDeneb(&denebState)
|
||||
case hasCapellaKey(enc):
|
||||
var capellaState ethpb.BeaconStateCapella
|
||||
if err := capellaState.UnmarshalSSZ(enc[len(capellaKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeCapella(&capellaState)
|
||||
case hasBellatrixKey(enc):
|
||||
var bellatrixState ethpb.BeaconStateBellatrix
|
||||
if err := bellatrixState.UnmarshalSSZ(enc[len(bellatrixKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeBellatrix(&bellatrixState)
|
||||
case hasAltairKey(enc):
|
||||
var altairState ethpb.BeaconStateAltair
|
||||
if err := altairState.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafeAltair(&altairState)
|
||||
default:
|
||||
var phase0State ethpb.BeaconState
|
||||
if err := phase0State.UnmarshalSSZ(enc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state_native.InitializeFromProtoUnsafePhase0(&phase0State)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) getBaseAndDiffChain(offset uint64, slot primitives.Slot) (state.BeaconState, []hdiff.HdiffBytes, error) {
|
||||
rel := uint64(slot) - offset
|
||||
lvl := computeLevel(offset, slot)
|
||||
if lvl == -1 {
|
||||
return nil, nil, errors.New("slot not in tree")
|
||||
}
|
||||
|
||||
exponents := params.StateHierarchyExponents()
|
||||
|
||||
baseSpan := math.PowerOf2(exponents[0])
|
||||
baseAnchorSlot := (rel / baseSpan * baseSpan) + offset
|
||||
|
||||
var diffChainIndices []uint64
|
||||
for i := 1; i <= lvl; i++ {
|
||||
span := math.PowerOf2(exponents[i])
|
||||
diffSlot := rel / span * span
|
||||
if diffSlot == baseAnchorSlot {
|
||||
continue
|
||||
}
|
||||
diffChainIndices = appendUnique(diffChainIndices, diffSlot+offset)
|
||||
}
|
||||
|
||||
baseSnapshot, err := s.getFullSnapshot(baseAnchorSlot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
diffChain := make([]hdiff.HdiffBytes, 0, len(diffChainIndices))
|
||||
for _, diffSlot := range diffChainIndices {
|
||||
diff, err := s.getDiff(computeLevel(offset, primitives.Slot(diffSlot)), diffSlot)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
diffChain = append(diffChain, diff)
|
||||
}
|
||||
|
||||
return baseSnapshot, diffChain, nil
|
||||
}
|
||||
|
||||
func appendUnique(s []uint64, v uint64) []uint64 {
|
||||
for _, x := range s {
|
||||
if x == v {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return append(s, v)
|
||||
}
|
||||
577
beacon-chain/db/kv/state_diff_test.go
Normal file
577
beacon-chain/db/kv/state_diff_test.go
Normal file
@@ -0,0 +1,577 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func TestStateDiff_LoadOrInitOffset(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
err := setOffsetInDB(db, 10)
|
||||
require.NoError(t, err)
|
||||
offset := db.getOffset()
|
||||
require.Equal(t, uint64(10), offset)
|
||||
|
||||
err = db.setOffset(10)
|
||||
require.ErrorContains(t, "offset already set", err)
|
||||
offset = db.getOffset()
|
||||
require.Equal(t, uint64(10), offset)
|
||||
}
|
||||
|
||||
func TestStateDiff_ComputeLevel(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
offset := db.getOffset()
|
||||
|
||||
// 2 ** 21
|
||||
lvl := computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
|
||||
require.Equal(t, 0, lvl)
|
||||
|
||||
// 2 ** 21 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(21)*3))
|
||||
require.Equal(t, 0, lvl)
|
||||
|
||||
// 2 ** 18
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(18)))
|
||||
require.Equal(t, 1, lvl)
|
||||
|
||||
// 2 ** 18 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(18)*3))
|
||||
require.Equal(t, 1, lvl)
|
||||
|
||||
// 2 ** 16
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(16)))
|
||||
require.Equal(t, 2, lvl)
|
||||
|
||||
// 2 ** 16 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(16)*3))
|
||||
require.Equal(t, 2, lvl)
|
||||
|
||||
// 2 ** 13
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(13)))
|
||||
require.Equal(t, 3, lvl)
|
||||
|
||||
// 2 ** 13 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(13)*3))
|
||||
require.Equal(t, 3, lvl)
|
||||
|
||||
// 2 ** 11
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(11)))
|
||||
require.Equal(t, 4, lvl)
|
||||
|
||||
// 2 ** 11 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(11)*3))
|
||||
require.Equal(t, 4, lvl)
|
||||
|
||||
// 2 ** 9
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(9)))
|
||||
require.Equal(t, 5, lvl)
|
||||
|
||||
// 2 ** 9 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(9)*3))
|
||||
require.Equal(t, 5, lvl)
|
||||
|
||||
// 2 ** 5
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
// 2 ** 5 * 3
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)*3))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
// 2 ** 7
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(7)))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
// 2 ** 5 + 1
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)+1))
|
||||
require.Equal(t, -1, lvl)
|
||||
|
||||
// 2 ** 5 + 16
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)+16))
|
||||
require.Equal(t, -1, lvl)
|
||||
|
||||
// 2 ** 5 + 32
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(5)+32))
|
||||
require.Equal(t, 6, lvl)
|
||||
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveFullSnapshot(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create state with slot 0
|
||||
st, enc := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
s := bucket.Get(makeKey(0, uint64(0)))
|
||||
if s == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
require.DeepSSZEqual(t, enc, s)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadFullSnapshot(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveDiff(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// Create state with slot 2**21
|
||||
slot := primitives.Slot(math.PowerOf2(21))
|
||||
st, enc := createState(t, slot, v)
|
||||
|
||||
err := setOffsetInDB(db, uint64(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
s := bucket.Get(makeKey(0, uint64(slot)))
|
||||
if s == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
require.DeepSSZEqual(t, enc, s)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// create state with slot 2**18 (+2**21)
|
||||
slot = primitives.Slot(math.PowerOf2(18) + math.PowerOf2(21))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
key := makeKey(1, uint64(slot))
|
||||
err = db.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, "_s"...)
|
||||
s := bucket.Get(buf)
|
||||
if s == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
buf = append(key, "_v"...)
|
||||
v := bucket.Get(buf)
|
||||
if v == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
buf = append(key, "_b"...)
|
||||
b := bucket.Get(buf)
|
||||
if b == nil {
|
||||
return bbolt.ErrIncompatibleValue
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiff(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiff_MultipleLevels(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(11))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
|
||||
slot = primitives.Slot(math.PowerOf2(11) + math.PowerOf2(9))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err = db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err = st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err = readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
|
||||
slot = primitives.Slot(math.PowerOf2(11) + math.PowerOf2(9) + math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err = db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err = st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err = readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_SaveAndReadDiffForkTransition(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 5; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
st, _ := createState(t, 0, v)
|
||||
|
||||
err := setOffsetInDB(db, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
slot := primitives.Slot(math.PowerOf2(5))
|
||||
st, _ = createState(t, slot, v+1)
|
||||
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
readSt, err := db.stateByDiff(context.Background(), slot)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, readSt)
|
||||
|
||||
stSSZ, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
readStSSZ, err := readSt.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, stSSZ, readStSSZ)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_OffsetCache(t *testing.T) {
|
||||
// test for slot numbers 0 and 1 for every version
|
||||
for slotNum := 0; slotNum < 2; slotNum++ {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(fmt.Sprintf("slotNum=%d,%s", slotNum, version.String(v)), func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
slot := primitives.Slot(slotNum)
|
||||
err := setOffsetInDB(db, uint64(slot))
|
||||
require.NoError(t, err)
|
||||
st, _ := createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
offset := db.stateDiffCache.getOffset()
|
||||
require.Equal(t, uint64(slotNum), offset)
|
||||
|
||||
slot2 := primitives.Slot(uint64(slotNum) + math.PowerOf2(params.StateHierarchyExponents()[0]))
|
||||
st2, _ := createState(t, slot2, v)
|
||||
err = db.saveStateByDiff(context.Background(), st2)
|
||||
require.NoError(t, err)
|
||||
|
||||
offset = db.stateDiffCache.getOffset()
|
||||
require.Equal(t, uint64(slot), offset)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateDiff_AnchorCache(t *testing.T) {
|
||||
// test for every version
|
||||
for v := 0; v < 6; v++ {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
exponents := params.StateHierarchyExponents()
|
||||
localCache := make([]state.ReadOnlyBeaconState, len(exponents)-1)
|
||||
db := setupDB(t)
|
||||
err := setOffsetInDB(db, 0) // lvl 0
|
||||
require.NoError(t, err)
|
||||
|
||||
// at first the cache should be empty
|
||||
for i := 0; i < len(params.StateHierarchyExponents()); i++ {
|
||||
anchor := db.stateDiffCache.getAnchor(i)
|
||||
require.IsNil(t, anchor)
|
||||
}
|
||||
|
||||
// add level 0
|
||||
slot := primitives.Slot(0) // offset 0 is already set
|
||||
st, _ := createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
localCache[0] = st
|
||||
|
||||
// level 0 should be the same
|
||||
require.DeepEqual(t, localCache[0], db.stateDiffCache.getAnchor(0))
|
||||
|
||||
// rest of the cache should be nil
|
||||
for i := 1; i < len(exponents)-1; i++ {
|
||||
require.IsNil(t, db.stateDiffCache.getAnchor(i))
|
||||
}
|
||||
|
||||
// skip last level as it does not get cached
|
||||
for i := len(exponents) - 2; i > 0; i-- {
|
||||
slot = primitives.Slot(math.PowerOf2(exponents[i]))
|
||||
st, _ := createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
localCache[i] = st
|
||||
|
||||
// anchor cache must match local cache
|
||||
for i := 0; i < len(exponents)-1; i++ {
|
||||
if localCache[i] == nil {
|
||||
require.IsNil(t, db.stateDiffCache.getAnchor(i))
|
||||
continue
|
||||
}
|
||||
localSSZ, err := localCache[i].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
anchorSSZ, err := db.stateDiffCache.getAnchor(i).MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, localSSZ, anchorSSZ)
|
||||
}
|
||||
}
|
||||
|
||||
// moving to a new tree should invalidate the cache except for level 0
|
||||
twoTo21 := math.PowerOf2(21)
|
||||
slot = primitives.Slot(twoTo21)
|
||||
st, _ = createState(t, slot, v)
|
||||
err = db.saveStateByDiff(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
localCache = make([]state.ReadOnlyBeaconState, len(exponents)-1)
|
||||
localCache[0] = st
|
||||
|
||||
// level 0 should be the same
|
||||
require.DeepEqual(t, localCache[0], db.stateDiffCache.getAnchor(0))
|
||||
|
||||
// rest of the cache should be nil
|
||||
for i := 1; i < len(exponents)-1; i++ {
|
||||
require.IsNil(t, db.stateDiffCache.getAnchor(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createState(t *testing.T, slot primitives.Slot, v int) (state.ReadOnlyBeaconState, []byte) {
|
||||
p := params.BeaconConfig()
|
||||
var st state.BeaconState
|
||||
var err error
|
||||
switch v {
|
||||
case version.Altair:
|
||||
st, err = util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.GenesisForkVersion,
|
||||
CurrentVersion: p.AltairForkVersion,
|
||||
Epoch: p.AltairForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Bellatrix:
|
||||
st, err = util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.AltairForkVersion,
|
||||
CurrentVersion: p.BellatrixForkVersion,
|
||||
Epoch: p.BellatrixForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Capella:
|
||||
st, err = util.NewBeaconStateCapella()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.BellatrixForkVersion,
|
||||
CurrentVersion: p.CapellaForkVersion,
|
||||
Epoch: p.CapellaForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Deneb:
|
||||
st, err = util.NewBeaconStateDeneb()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.CapellaForkVersion,
|
||||
CurrentVersion: p.DenebForkVersion,
|
||||
Epoch: p.DenebForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
case version.Electra:
|
||||
st, err = util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.DenebForkVersion,
|
||||
CurrentVersion: p.ElectraForkVersion,
|
||||
Epoch: p.ElectraForkEpoch,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
st, err = util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = st.SetFork(ðpb.Fork{
|
||||
PreviousVersion: p.GenesisForkVersion,
|
||||
CurrentVersion: p.GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = st.SetSlot(slot)
|
||||
require.NoError(t, err)
|
||||
slashings := make([]uint64, 8192)
|
||||
slashings[0] = uint64(rand.Intn(10))
|
||||
err = st.SetSlashings(slashings)
|
||||
require.NoError(t, err)
|
||||
stssz, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
enc, err := addKey(v, stssz)
|
||||
require.NoError(t, err)
|
||||
return st, enc
|
||||
}
|
||||
|
||||
func setOffsetInDB(s *Store, offset uint64) error {
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get(offsetKey)
|
||||
if offsetBytes != nil {
|
||||
return fmt.Errorf("offset already set to %d", binary.LittleEndian.Uint64(offsetBytes))
|
||||
}
|
||||
|
||||
offsetBytes = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, offset)
|
||||
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sdCache, err := newStateDiffCache(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.stateDiffCache = sdCache
|
||||
return nil
|
||||
}
|
||||
@@ -213,7 +213,7 @@ func (s BlobSidecarsByRootReq) Len() int {
|
||||
// ====================================
|
||||
// DataColumnsByRootIdentifiers section
|
||||
// ====================================
|
||||
var _ ssz.Marshaler = DataColumnsByRootIdentifiers{}
|
||||
var _ ssz.Marshaler = (*DataColumnsByRootIdentifiers)(nil)
|
||||
var _ ssz.Unmarshaler = (*DataColumnsByRootIdentifiers)(nil)
|
||||
|
||||
// DataColumnsByRootIdentifiers is used to specify a list of data column targets (root+index) in a DataColumnSidecarsByRoot RPC request.
|
||||
@@ -275,33 +275,33 @@ func (d *DataColumnsByRootIdentifiers) UnmarshalSSZ(buf []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d DataColumnsByRootIdentifiers) MarshalSSZ() ([]byte, error) {
|
||||
func (d *DataColumnsByRootIdentifiers) MarshalSSZ() ([]byte, error) {
|
||||
var err error
|
||||
count := len(d)
|
||||
count := len(*d)
|
||||
maxSize := params.BeaconConfig().MaxRequestBlocksDeneb
|
||||
if uint64(count) > maxSize {
|
||||
return nil, errors.Errorf("data column identifiers list exceeds max size: %d > %d", count, maxSize)
|
||||
}
|
||||
|
||||
if len(d) == 0 {
|
||||
if len(*d) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
sizes := make([]uint32, count)
|
||||
valTotal := uint32(0)
|
||||
for i, elem := range d {
|
||||
for i, elem := range *d {
|
||||
if elem == nil {
|
||||
return nil, errors.New("nil item in DataColumnsByRootIdentifiers list")
|
||||
}
|
||||
sizes[i] = uint32(elem.SizeSSZ())
|
||||
valTotal += sizes[i]
|
||||
}
|
||||
offSize := uint32(4 * len(d))
|
||||
offSize := uint32(4 * len(*d))
|
||||
out := make([]byte, offSize, offSize+valTotal)
|
||||
for i := range sizes {
|
||||
binary.LittleEndian.PutUint32(out[i*4:i*4+4], offSize)
|
||||
offSize += sizes[i]
|
||||
}
|
||||
for _, elem := range d {
|
||||
for _, elem := range *d {
|
||||
out, err = elem.MarshalSSZTo(out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -312,7 +312,7 @@ func (d DataColumnsByRootIdentifiers) MarshalSSZ() ([]byte, error) {
|
||||
}
|
||||
|
||||
// MarshalSSZTo implements ssz.Marshaler. It appends the serialized DataColumnSidecarsByRootReq value to the provided byte slice.
|
||||
func (d DataColumnsByRootIdentifiers) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
func (d *DataColumnsByRootIdentifiers) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
obj, err := d.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -321,11 +321,11 @@ func (d DataColumnsByRootIdentifiers) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
// SizeSSZ implements ssz.Marshaler. It returns the size of the serialized representation.
|
||||
func (d DataColumnsByRootIdentifiers) SizeSSZ() int {
|
||||
func (d *DataColumnsByRootIdentifiers) SizeSSZ() int {
|
||||
size := 0
|
||||
for i := 0; i < len(d); i++ {
|
||||
for i := 0; i < len(*d); i++ {
|
||||
size += 4
|
||||
size += (d)[i].SizeSSZ()
|
||||
size += (*d)[i].SizeSSZ()
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
@@ -10,13 +10,11 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -38,9 +36,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
@@ -49,7 +45,6 @@ go_test(
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -3,15 +3,12 @@ package lookup
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -52,7 +49,6 @@ type BeaconDbBlocker struct {
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// Block returns the beacon block for a given identifier. The identifier can be one of:
|
||||
@@ -216,190 +212,64 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) (
|
||||
|
||||
root := bytesutil.ToBytes32(rootSlice)
|
||||
|
||||
roSignedBlock, err := p.BeaconDB.Block(ctx, root)
|
||||
b, err := p.BeaconDB.Block(ctx, root)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve block %#x from db", rootSlice), Reason: core.Internal}
|
||||
}
|
||||
|
||||
if roSignedBlock == nil {
|
||||
if b == nil {
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("block %#x not found in db", rootSlice), Reason: core.NotFound}
|
||||
}
|
||||
|
||||
// If block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(roSignedBlock.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
// if block is not in the retention window, return 200 w/ empty list
|
||||
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
roBlock := roSignedBlock.Block()
|
||||
|
||||
commitments, err := roBlock.Body().BlobKzgCommitments()
|
||||
commitments, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to retrieve kzg commitments from block %#x", rootSlice), Reason: core.Internal}
|
||||
}
|
||||
|
||||
// If there are no commitments return 200 w/ empty list
|
||||
// if there are no commitments return 200 w/ empty list
|
||||
if len(commitments) == 0 {
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
// Compute the first Fulu slot.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
fuluForkSlot := primitives.Slot(math.MaxUint64)
|
||||
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
|
||||
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate peerDAS start slot"), Reason: core.Internal}
|
||||
}
|
||||
}
|
||||
sum := p.BlobStorage.Summary(root)
|
||||
|
||||
if roBlock.Slot() >= fuluForkSlot {
|
||||
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, root)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrapf(err, "failed to create roBlock with root %#x", root), Reason: core.Internal}
|
||||
}
|
||||
|
||||
return p.blobsFromStoredDataColumns(roBlock, indices)
|
||||
}
|
||||
|
||||
return p.blobsFromStoredBlobs(commitments, root, indices)
|
||||
}
|
||||
|
||||
// blobsFromStoredBlobs retrieves blob sidercars corresponding to `indices` and `root` from the store.
|
||||
// This function expects blob sidecars to be stored (aka. no data column sidecars).
|
||||
func (p *BeaconDbBlocker) blobsFromStoredBlobs(commitments [][]byte, root [fieldparams.RootLength]byte, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
summary := p.BlobStorage.Summary(root)
|
||||
maxBlobCount := summary.MaxBlobsForEpoch()
|
||||
|
||||
for _, index := range indices {
|
||||
if uint64(index) >= maxBlobCount {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", index, maxBlobCount),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
if !summary.HasIndex(uint64(index)) {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d not found", index),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no indices are provided, use all indices that are available in the summary.
|
||||
if len(indices) == 0 {
|
||||
for index := range commitments {
|
||||
if summary.HasIndex(uint64(index)) {
|
||||
indices = append(indices, index)
|
||||
for i := range commitments {
|
||||
if sum.HasIndex(uint64(i)) {
|
||||
indices = append(indices, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, ix := range indices {
|
||||
if uint64(ix) >= sum.MaxBlobsForEpoch() {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", ix, sum.MaxBlobsForEpoch()),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
if !sum.HasIndex(uint64(ix)) {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d not found", ix),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve blob sidecars from the store.
|
||||
blobs := make([]*blocks.VerifiedROBlob, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
blobSidecar, err := p.BlobStorage.Get(root, uint64(index))
|
||||
blobs := make([]*blocks.VerifiedROBlob, len(indices))
|
||||
for i, index := range indices {
|
||||
vblob, err := p.BlobStorage.Get(root, uint64(index))
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", root, index),
|
||||
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", rootSlice, index),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
blobs = append(blobs, &blobSidecar)
|
||||
blobs[i] = &vblob
|
||||
}
|
||||
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
// blobsFromStoredDataColumns retrieves data column sidecars from the store,
|
||||
// reconstructs the whole matrix if needed, converts the matrix to blobs,
|
||||
// and then returns converted blobs corresponding to `indices` and `root`.
|
||||
// This function expects data column sidecars to be stored (aka. no blob sidecars).
|
||||
// If not enough data column sidecars are available to convert blobs from them
|
||||
// (either directly or after reconstruction), an error is returned.
|
||||
func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
root := block.Root()
|
||||
|
||||
// Use all indices if none are provided.
|
||||
if len(indices) == 0 {
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "could not retrieve blob commitments"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
for index := range commitments {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
}
|
||||
|
||||
// Count how many columns we have in the store.
|
||||
summary := p.DataColumnStorage.Summary(root)
|
||||
stored := summary.Stored()
|
||||
count := uint64(len(stored))
|
||||
|
||||
if count < peerdas.MinimumColumnsCountToReconstruct() {
|
||||
// There is no way to reconstruct the data columns.
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.SubscribeAllDataSubnets.Name),
|
||||
Reason: core.NotFound,
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve from the database needed data columns.
|
||||
verifiedRoDataColumnSidecars, err := p.neededDataColumnSidecars(root, stored)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "needed data column sidecars"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct blob sidecars from data column sidecars.
|
||||
verifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(block, verifiedRoDataColumnSidecars, indices)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Wrap(err, "blobs from data columns"),
|
||||
Reason: core.Internal,
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedRoBlobSidecars, nil
|
||||
}
|
||||
|
||||
// neededDataColumnSidecars retrieves all data column sidecars corresponding to (non extended) blobs if available,
|
||||
// else retrieves all data column sidecars from the store.
|
||||
func (p *BeaconDbBlocker) neededDataColumnSidecars(root [fieldparams.RootLength]byte, stored map[uint64]bool) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if we have all the non-extended data columns.
|
||||
cellsPerBlob := fieldparams.CellsPerBlob
|
||||
blobIndices := make([]uint64, 0, cellsPerBlob)
|
||||
hasAllBlobColumns := true
|
||||
for i := range uint64(cellsPerBlob) {
|
||||
if !stored[i] {
|
||||
hasAllBlobColumns = false
|
||||
break
|
||||
}
|
||||
blobIndices = append(blobIndices, i)
|
||||
}
|
||||
|
||||
if hasAllBlobColumns {
|
||||
// Retrieve only the non-extended data columns.
|
||||
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, blobIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns storage get")
|
||||
}
|
||||
|
||||
return verifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// Retrieve all the data columns.
|
||||
verifiedRoSidecars, err := p.DataColumnStorage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data columns storage get")
|
||||
}
|
||||
|
||||
return verifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
@@ -8,15 +8,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -161,335 +158,172 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetBlob(t *testing.T) {
|
||||
const (
|
||||
slot = 123
|
||||
blobCount = 4
|
||||
denebForEpoch = 1
|
||||
fuluForkEpoch = 2
|
||||
)
|
||||
|
||||
setupDeneb := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
setupFulu := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
ctx := t.Context()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save Deneb block and blob sidecars.
|
||||
_, blobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
|
||||
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, slot, blobCount)
|
||||
denebBlockRoot := denebBlock.Root()
|
||||
|
||||
verifiedStoredSidecars := verification.FakeVerifySliceForTest(t, storedBlobSidecars)
|
||||
for i := range verifiedStoredSidecars {
|
||||
err := blobStorage.Save(verifiedStoredSidecars[i])
|
||||
require.NoError(t, err)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
_, bs := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
for i := range testSidecars {
|
||||
require.NoError(t, bs.Save(testSidecars[i]))
|
||||
}
|
||||
|
||||
err = db.SaveBlock(t.Context(), denebBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Electra block and blob sidecars. (Electra block = Fulu block),
|
||||
// save the block, convert blob sidecars to data column sidecars and save the block.
|
||||
fuluForkSlot := fuluForkEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount)
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars))
|
||||
for _, blob := range fuluBlobSidecars {
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob.Blob)
|
||||
cellsAndProogs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
cellsAndProofsList = append(cellsAndProofsList, cellsAndProogs)
|
||||
}
|
||||
|
||||
dataColumnSidecarPb, err := peerdas.DataColumnSidecars(fuluBlock, cellsAndProofsList)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecarPb))
|
||||
for _, sidecarPb := range dataColumnSidecarPb {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecarPb, fuluBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
err = db.SaveBlock(t.Context(), fuluBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := blobs[0].BlockRoot()
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{}
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis", nil)
|
||||
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
require.StringContains(t, "blobs are not supported for Phase 0 fork", rpcErr.Err.Error())
|
||||
assert.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
assert.StringContains(t, "blobs are not supported for Phase 0 fork", rpcErr.Err.Error())
|
||||
})
|
||||
|
||||
t.Run("head", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Root: denebBlockRoot[:]},
|
||||
ChainInfoFetcher: &mockChain.ChainService{Root: blockRoot[:]},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "head", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(retrievedVerifiedSidecars))
|
||||
|
||||
for i := range blobCount {
|
||||
expected := verifiedStoredSidecars[i]
|
||||
|
||||
actual := retrievedVerifiedSidecars[i].BlobSidecar
|
||||
require.NotNil(t, actual)
|
||||
|
||||
require.Equal(t, expected.Index, actual.Index)
|
||||
require.DeepEqual(t, expected.Blob, actual.Blob)
|
||||
require.DeepEqual(t, expected.KzgCommitment, actual.KzgCommitment)
|
||||
require.DeepEqual(t, expected.KzgProof, actual.KzgProof)
|
||||
BlobStorage: bs,
|
||||
}
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "head", nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
sidecar := verifiedBlobs[0].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(0), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[0].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[0].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[0].KzgProof, sidecar.KzgProof)
|
||||
sidecar = verifiedBlobs[1].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(1), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[1].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[1].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[1].KzgProof, sidecar.KzgProof)
|
||||
sidecar = verifiedBlobs[2].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(2), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[2].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[2].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[2].KzgProof, sidecar.KzgProof)
|
||||
sidecar = verifiedBlobs[3].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(3), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[3].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[3].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[3].KzgProof, sidecar.KzgProof)
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "finalized", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedSidecars))
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "finalized", nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
verifiedSidecars, rpcErr := blocker.Blobs(ctx, "justified", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedSidecars))
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "justified", nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("root", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(denebBlockRoot[:]), nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(blockRoot[:]), nil)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 4, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
const index = 2
|
||||
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", []int{index})
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 1, len(retrievedVerifiedSidecars))
|
||||
|
||||
expected := verifiedStoredSidecars[index]
|
||||
actual := retrievedVerifiedSidecars[0].BlobSidecar
|
||||
require.NotNil(t, actual)
|
||||
|
||||
require.Equal(t, uint64(index), actual.Index)
|
||||
require.DeepEqual(t, expected.Blob, actual.Blob)
|
||||
require.DeepEqual(t, expected.KzgCommitment, actual.KzgCommitment)
|
||||
require.DeepEqual(t, expected.KzgProof, actual.KzgProof)
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", []int{2})
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 1, len(verifiedBlobs))
|
||||
sidecar := verifiedBlobs[0].BlobSidecar
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, uint64(2), sidecar.Index)
|
||||
assert.DeepEqual(t, blobs[2].Blob, sidecar.Blob)
|
||||
assert.DeepEqual(t, blobs[2].KzgCommitment, sidecar.KzgCommitment)
|
||||
assert.DeepEqual(t, blobs[2].KzgProof, sidecar.KzgProof)
|
||||
})
|
||||
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("no blob at index", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
|
||||
noBlobIndex := len(storedBlobSidecars) + 1
|
||||
noBlobIndex := len(blobs) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, noBlobIndex})
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
assert.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("index too big", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, math.MaxInt})
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("not enough stored data column sidecars", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[:fieldparams.CellsPerBlob-1])
|
||||
require.NoError(t, err)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
_, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnsCountToReconstruct()+1])
|
||||
require.NoError(t, err)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
|
||||
|
||||
for i, retrievedVerifiedRoBlob := range retrievedVerifiedRoBlobs {
|
||||
retrievedBlobSidecarPb := retrievedVerifiedRoBlob.BlobSidecar
|
||||
initialBlobSidecarPb := fuluBlobSidecars[i].BlobSidecar
|
||||
require.DeepSSZEqual(t, initialBlobSidecarPb, retrievedBlobSidecarPb)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedRoBlobs, rpcErr := blocker.Blobs(ctx, hexutil.Encode(fuluBlockRoot[:]), nil)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, len(fuluBlobSidecars), len(retrievedVerifiedRoBlobs))
|
||||
|
||||
for i, retrievedVerifiedRoBlob := range retrievedVerifiedRoBlobs {
|
||||
retrievedBlobSidecarPb := retrievedVerifiedRoBlob.BlobSidecar
|
||||
initialBlobSidecarPb := fuluBlobSidecars[i].BlobSidecar
|
||||
require.DeepSSZEqual(t, initialBlobSidecarPb, retrievedBlobSidecarPb)
|
||||
}
|
||||
assert.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -264,6 +264,8 @@ type WriteOnlyEth1Data interface {
|
||||
AppendEth1DataVotes(val *ethpb.Eth1Data) error
|
||||
SetEth1DepositIndex(val uint64) error
|
||||
ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (primitives.Epoch, error)
|
||||
SetExitBalanceToConsume(val primitives.Gwei) error
|
||||
SetEarliestExitEpoch(val primitives.Epoch) error
|
||||
}
|
||||
|
||||
// WriteOnlyValidators defines a struct which only has write access to validators methods.
|
||||
@@ -331,6 +333,7 @@ type WriteOnlyWithdrawals interface {
|
||||
DequeuePendingPartialWithdrawals(num uint64) error
|
||||
SetNextWithdrawalIndex(i uint64) error
|
||||
SetNextWithdrawalValidatorIndex(i primitives.ValidatorIndex) error
|
||||
SetPendingPartialWithdrawals(val []*ethpb.PendingPartialWithdrawal) error
|
||||
}
|
||||
|
||||
type WriteOnlyConsolidations interface {
|
||||
|
||||
@@ -75,3 +75,33 @@ func (b *BeaconState) ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (prim
|
||||
|
||||
return b.earliestExitEpoch, nil
|
||||
}
|
||||
|
||||
// SetExitBalanceToConsume sets the exit balance to consume. This method mutates the state.
|
||||
func (b *BeaconState) SetExitBalanceToConsume(exitBalanceToConsume primitives.Gwei) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetExitBalanceToConsume", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.exitBalanceToConsume = exitBalanceToConsume
|
||||
b.markFieldAsDirty(types.ExitBalanceToConsume)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetEarliestExitEpoch sets the earliest exit epoch. This method mutates the state.
|
||||
func (b *BeaconState) SetEarliestExitEpoch(earliestExitEpoch primitives.Epoch) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetEarliestExitEpoch", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.earliestExitEpoch = earliestExitEpoch
|
||||
b.markFieldAsDirty(types.EarliestExitEpoch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -100,3 +100,22 @@ func (b *BeaconState) DequeuePendingPartialWithdrawals(n uint64) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPendingPartialWithdrawals sets the pending partial withdrawals. This method mutates the state.
|
||||
func (b *BeaconState) SetPendingPartialWithdrawals(pendingPartialWithdrawals []*eth.PendingPartialWithdrawal) error {
|
||||
if b.version < version.Electra {
|
||||
return errNotSupported("SetPendingPartialWithdrawals", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if pendingPartialWithdrawals == nil {
|
||||
return errors.New("cannot set nil pending partial withdrawals")
|
||||
}
|
||||
|
||||
b.pendingPartialWithdrawals = pendingPartialWithdrawals
|
||||
b.markFieldAsDirty(types.PendingPartialWithdrawals)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
"block_batcher.go",
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"data_columns_sampling.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
@@ -79,7 +78,6 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
@@ -164,7 +162,6 @@ go_test(
|
||||
"block_batcher_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"data_columns_sampling_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
@@ -210,16 +207,13 @@ go_test(
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -271,8 +265,6 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
@@ -280,7 +272,6 @@ go_test(
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
|
||||
@@ -1,627 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const PeerRefreshInterval = 1 * time.Minute
|
||||
|
||||
type roundSummary struct {
|
||||
RequestedColumns []uint64
|
||||
MissingColumns map[uint64]bool
|
||||
}
|
||||
|
||||
// DataColumnSampler defines the interface for sampling data columns from peers for requested block root and samples count.
|
||||
type DataColumnSampler interface {
|
||||
// Run starts the data column sampling service.
|
||||
Run(ctx context.Context)
|
||||
}
|
||||
|
||||
var _ DataColumnSampler = (*dataColumnSampler1D)(nil)
|
||||
|
||||
// dataColumnSampler1D implements the DataColumnSampler interface for PeerDAS 1D.
|
||||
type dataColumnSampler1D struct {
|
||||
sync.RWMutex
|
||||
|
||||
p2p p2p.P2P
|
||||
clock *startup.Clock
|
||||
ctxMap ContextByteVersions
|
||||
stateNotifier statefeed.Notifier
|
||||
|
||||
// nonCustodyGroups is a set of groups that are not custodied by the node.
|
||||
nonCustodyGroups map[uint64]bool
|
||||
|
||||
// groupsByPeer maps a peer to the groups it is responsible for custody.
|
||||
groupsByPeer map[peer.ID]map[uint64]bool
|
||||
|
||||
// peersByCustodyGroup maps a group to the peer responsible for custody.
|
||||
peersByCustodyGroup map[uint64]map[peer.ID]bool
|
||||
|
||||
// columnVerifier verifies a column according to the specified requirements.
|
||||
columnVerifier verification.NewDataColumnsVerifier
|
||||
|
||||
// custodyInfo contains the custody information of the node.
|
||||
custodyInfo *peerdas.CustodyInfo
|
||||
}
|
||||
|
||||
// newDataColumnSampler1D creates a new 1D data column sampler.
|
||||
func newDataColumnSampler1D(
|
||||
p2p p2p.P2P,
|
||||
clock *startup.Clock,
|
||||
ctxMap ContextByteVersions,
|
||||
stateNotifier statefeed.Notifier,
|
||||
colVerifier verification.NewDataColumnsVerifier,
|
||||
custodyInfo *peerdas.CustodyInfo,
|
||||
) *dataColumnSampler1D {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
peersByCustodyGroup := make(map[uint64]map[peer.ID]bool, numberOfCustodyGroups)
|
||||
|
||||
for i := range numberOfCustodyGroups {
|
||||
peersByCustodyGroup[i] = make(map[peer.ID]bool)
|
||||
}
|
||||
|
||||
return &dataColumnSampler1D{
|
||||
p2p: p2p,
|
||||
clock: clock,
|
||||
ctxMap: ctxMap,
|
||||
stateNotifier: stateNotifier,
|
||||
groupsByPeer: make(map[peer.ID]map[uint64]bool),
|
||||
peersByCustodyGroup: peersByCustodyGroup,
|
||||
columnVerifier: colVerifier,
|
||||
custodyInfo: custodyInfo,
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements DataColumnSampler.
|
||||
func (d *dataColumnSampler1D) Run(ctx context.Context) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
|
||||
// Get the node ID.
|
||||
nodeID := d.p2p.NodeID()
|
||||
|
||||
// Verify if we need to run sampling or not, if not, return directly.
|
||||
// TODO: Rework this part to take into account dynamic custody group count with peer sampling.
|
||||
custodyGroupCount := d.custodyInfo.ActualGroupCount()
|
||||
|
||||
// Retrieve our local node info.
|
||||
localNodeInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("peer info")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: custody group count != data column group count
|
||||
if custodyGroupCount >= peerdas.MinimumColumnsCountToReconstruct() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"custodyGroupCount": custodyGroupCount,
|
||||
"totalGroups": numberOfCustodyGroups,
|
||||
}).Debug("The node custodies at least the half of the groups, no need to sample")
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize non custody groups.
|
||||
d.nonCustodyGroups = make(map[uint64]bool)
|
||||
for i := range numberOfCustodyGroups {
|
||||
if !localNodeInfo.CustodyGroups[i] {
|
||||
d.nonCustodyGroups[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize peer info first.
|
||||
d.refreshPeerInfo()
|
||||
|
||||
// periodically refresh peer info to keep peer <-> column mapping up to date.
|
||||
async.RunEvery(ctx, PeerRefreshInterval, d.refreshPeerInfo)
|
||||
|
||||
// start the sampling loop.
|
||||
d.samplingRoutine(ctx)
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) samplingRoutine(ctx context.Context) {
|
||||
stateCh := make(chan *feed.Event, 1)
|
||||
stateSub := d.stateNotifier.StateFeed().Subscribe(stateCh)
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt := <-stateCh:
|
||||
d.handleStateNotification(ctx, evt)
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("DataColumnSampler1D subscription to state feed failed")
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context canceled, exiting data column sampling loop.")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh peer information.
|
||||
func (d *dataColumnSampler1D) refreshPeerInfo() {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
activePeers := d.p2p.Peers().Active()
|
||||
d.prunePeerInfo(activePeers)
|
||||
|
||||
for _, pid := range activePeers {
|
||||
// Retrieve the custody group count of the peer.
|
||||
retrievedCustodyGroupCount := d.p2p.CustodyGroupCountFromPeer(pid)
|
||||
|
||||
// Look into our store the custody storedGroups for this peer.
|
||||
storedGroups, ok := d.groupsByPeer[pid]
|
||||
storedGroupsCount := uint64(len(storedGroups))
|
||||
|
||||
if ok && storedGroupsCount == retrievedCustodyGroupCount {
|
||||
// No change for this peer.
|
||||
continue
|
||||
}
|
||||
|
||||
nodeID, err := p2p.ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to convert peer ID to node ID")
|
||||
continue
|
||||
}
|
||||
|
||||
// Retrieve the peer info.
|
||||
peerInfo, _, err := peerdas.Info(nodeID, retrievedCustodyGroupCount)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid.String()).Error("Failed to determine peer info")
|
||||
}
|
||||
|
||||
d.groupsByPeer[pid] = peerInfo.CustodyGroups
|
||||
for group := range peerInfo.CustodyGroups {
|
||||
d.peersByCustodyGroup[group][pid] = true
|
||||
}
|
||||
}
|
||||
|
||||
groupsWithoutPeers := make([]uint64, 0)
|
||||
for group, peers := range d.peersByCustodyGroup {
|
||||
if len(peers) == 0 {
|
||||
groupsWithoutPeers = append(groupsWithoutPeers, group)
|
||||
}
|
||||
}
|
||||
|
||||
if len(groupsWithoutPeers) > 0 {
|
||||
slices.Sort[[]uint64](groupsWithoutPeers)
|
||||
log.WithField("groups", groupsWithoutPeers).Warn("Some groups have no peers responsible for custody")
|
||||
}
|
||||
}
|
||||
|
||||
// prunePeerInfo prunes inactive peers from peerByGroup and groupByPeer.
|
||||
// This should not be called outside of refreshPeerInfo without being locked.
|
||||
func (d *dataColumnSampler1D) prunePeerInfo(activePeers []peer.ID) {
|
||||
active := make(map[peer.ID]bool)
|
||||
for _, pid := range activePeers {
|
||||
active[pid] = true
|
||||
}
|
||||
|
||||
for pid := range d.groupsByPeer {
|
||||
if !active[pid] {
|
||||
d.prunePeer(pid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// prunePeer removes a peer from stored peer info map, it should be called with lock held.
|
||||
func (d *dataColumnSampler1D) prunePeer(pid peer.ID) {
|
||||
delete(d.groupsByPeer, pid)
|
||||
for _, peers := range d.peersByCustodyGroup {
|
||||
delete(peers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event *feed.Event) {
|
||||
if event.Type != statefeed.BlockProcessed {
|
||||
return
|
||||
}
|
||||
|
||||
data, ok := event.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *statefeed.BlockProcessedData")
|
||||
return
|
||||
}
|
||||
|
||||
if !data.Verified {
|
||||
// We only process blocks that have been verified
|
||||
log.Error("Data is not verified")
|
||||
return
|
||||
}
|
||||
|
||||
if data.SignedBlock.Version() < version.Fulu {
|
||||
log.Debug("Pre Fulu block, skipping data column sampling")
|
||||
return
|
||||
}
|
||||
|
||||
// Determine if we need to sample data columns for this block.
|
||||
beaconConfig := params.BeaconConfig()
|
||||
samplesPerSlots := beaconConfig.SamplesPerSlot
|
||||
halfOfCustodyGroups := beaconConfig.NumberOfCustodyGroups / 2
|
||||
nonCustodyGroupsCount := uint64(len(d.nonCustodyGroups))
|
||||
|
||||
if nonCustodyGroupsCount <= halfOfCustodyGroups {
|
||||
// Nothing to sample.
|
||||
return
|
||||
}
|
||||
|
||||
// Get the commitments for this block.
|
||||
commitments, err := data.SignedBlock.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
return
|
||||
}
|
||||
|
||||
// Skip if there are no commitments.
|
||||
if len(commitments) == 0 {
|
||||
log.Debug("No commitments in block, skipping data column sampling")
|
||||
return
|
||||
}
|
||||
|
||||
// Randomize columns for sample selection.
|
||||
randomizedColumns, err := randomizeColumns(d.nonCustodyGroups)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to randomize columns")
|
||||
return
|
||||
}
|
||||
|
||||
samplesCount := min(samplesPerSlots, nonCustodyGroupsCount-halfOfCustodyGroups)
|
||||
|
||||
// TODO: Use the first output of `incrementalDAS` as input of the fork choice rule.
|
||||
_, _, err = d.incrementalDAS(ctx, data, randomizedColumns, samplesCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to run incremental DAS")
|
||||
}
|
||||
}
|
||||
|
||||
// incrementalDAS samples data columns from active peers using incremental DAS.
|
||||
// https://ethresear.ch/t/lossydas-lossy-incremental-and-diagonal-sampling-for-data-availability/18963#incrementaldas-dynamically-increase-the-sample-size-10
|
||||
// According to https://github.com/ethereum/consensus-specs/issues/3825, we're going to select query samples exclusively from the non custody columns.
|
||||
func (d *dataColumnSampler1D) incrementalDAS(
|
||||
ctx context.Context,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
columns []uint64,
|
||||
sampleCount uint64,
|
||||
) (bool, []roundSummary, error) {
|
||||
allowedFailures := uint64(0)
|
||||
firstColumnToSample, extendedSampleCount := uint64(0), peerdas.ExtendedSampleCount(sampleCount, allowedFailures)
|
||||
roundSummaries := make([]roundSummary, 0, 1) // We optimistically allocate only one round summary.
|
||||
blockRoot := blockProcessedData.BlockRoot
|
||||
columnsCount := uint64(len(columns))
|
||||
|
||||
start := time.Now()
|
||||
|
||||
for round := 1; ; /*No exit condition */ round++ {
|
||||
if extendedSampleCount > columnsCount {
|
||||
// We already tried to sample all possible columns, this is the unhappy path.
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"round": round - 1,
|
||||
}).Warning("Some columns are still missing after trying to sample all possible columns")
|
||||
return false, roundSummaries, nil
|
||||
}
|
||||
|
||||
// Get the columns to sample for this round.
|
||||
columnsToSample := columns[firstColumnToSample:extendedSampleCount]
|
||||
columnsToSampleCount := extendedSampleCount - firstColumnToSample
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"columns": columnsToSample,
|
||||
"round": round,
|
||||
}).Debug("Start data columns sampling")
|
||||
|
||||
// Sample data columns from peers in parallel.
|
||||
retrievedSamples, err := d.sampleDataColumns(ctx, blockProcessedData, columnsToSample)
|
||||
if err != nil {
|
||||
return false, nil, errors.Wrap(err, "sample data columns")
|
||||
}
|
||||
|
||||
missingSamples := make(map[uint64]bool)
|
||||
for _, column := range columnsToSample {
|
||||
if !retrievedSamples[column] {
|
||||
missingSamples[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
roundSummaries = append(roundSummaries, roundSummary{
|
||||
RequestedColumns: columnsToSample,
|
||||
MissingColumns: missingSamples,
|
||||
})
|
||||
|
||||
retrievedSampleCount := uint64(len(retrievedSamples))
|
||||
if retrievedSampleCount == columnsToSampleCount {
|
||||
// All columns were correctly sampled, this is the happy path.
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"neededRounds": round,
|
||||
"duration": time.Since(start),
|
||||
}).Debug("All columns were successfully sampled")
|
||||
return true, roundSummaries, nil
|
||||
}
|
||||
|
||||
if retrievedSampleCount > columnsToSampleCount {
|
||||
// This should never happen.
|
||||
return false, nil, errors.New("retrieved more columns than requested")
|
||||
}
|
||||
|
||||
// There is still some missing columns, extend the samples.
|
||||
allowedFailures += columnsToSampleCount - retrievedSampleCount
|
||||
oldExtendedSampleCount := extendedSampleCount
|
||||
firstColumnToSample = extendedSampleCount
|
||||
extendedSampleCount = peerdas.ExtendedSampleCount(sampleCount, allowedFailures)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", blockRoot),
|
||||
"round": round,
|
||||
"missingColumnsCount": allowedFailures,
|
||||
"currentSampleIndex": oldExtendedSampleCount,
|
||||
"nextSampleIndex": extendedSampleCount,
|
||||
}).Debug("Some columns are still missing after sampling this round.")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) sampleDataColumns(
|
||||
ctx context.Context,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
columns []uint64,
|
||||
) (map[uint64]bool, error) {
|
||||
// distribute samples to peer
|
||||
peerToColumns, err := d.distributeSamplesToPeer(columns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "distribute samples to peer")
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
|
||||
res := make(map[uint64]bool)
|
||||
|
||||
sampleFromPeer := func(pid peer.ID, cols map[uint64]bool) {
|
||||
defer wg.Done()
|
||||
retrieved := d.sampleDataColumnsFromPeer(ctx, pid, blockProcessedData, cols)
|
||||
|
||||
mu.Lock()
|
||||
for col := range retrieved {
|
||||
res[col] = true
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// sample from peers in parallel
|
||||
for pid, cols := range peerToColumns {
|
||||
wg.Add(1)
|
||||
go sampleFromPeer(pid, cols)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// distributeSamplesToPeer distributes samples to peers based on the columns they are responsible for.
|
||||
// Currently it randomizes peer selection for a column and did not take into account whole peer distribution balance. It could be improved if needed.
|
||||
func (d *dataColumnSampler1D) distributeSamplesToPeer(columns []uint64) (map[peer.ID]map[uint64]bool, error) {
|
||||
dist := make(map[peer.ID]map[uint64]bool)
|
||||
|
||||
for _, column := range columns {
|
||||
custodyGroup, err := peerdas.ComputeCustodyGroupForColumn(column)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute custody group for column")
|
||||
}
|
||||
|
||||
peers := d.peersByCustodyGroup[custodyGroup]
|
||||
if len(peers) == 0 {
|
||||
log.WithField("column", column).Warning("No peers responsible for custody of column")
|
||||
continue
|
||||
}
|
||||
|
||||
pid, err := selectRandomPeer(peers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select random peer")
|
||||
}
|
||||
|
||||
if _, ok := dist[pid]; !ok {
|
||||
dist[pid] = make(map[uint64]bool)
|
||||
}
|
||||
|
||||
dist[pid][column] = true
|
||||
}
|
||||
|
||||
return dist, nil
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) sampleDataColumnsFromPeer(
|
||||
ctx context.Context,
|
||||
pid peer.ID,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
requestedColumns map[uint64]bool,
|
||||
) map[uint64]bool {
|
||||
retrievedColumns := make(map[uint64]bool)
|
||||
|
||||
cols := make([]uint64, 0, len(requestedColumns))
|
||||
for col := range requestedColumns {
|
||||
cols = append(cols, col)
|
||||
}
|
||||
req := ð.DataColumnsByRootIdentifier{
|
||||
BlockRoot: blockProcessedData.BlockRoot[:],
|
||||
Columns: cols,
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
roDataColumns, err := SendDataColumnSidecarsByRootRequest(ctx, d.clock, d.p2p, pid, d.ctxMap, types.DataColumnsByRootIdentifiers{req})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to send data column sidecar by root")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Once peer sampling is used, we should verify all sampled data columns in a single batch instead of looping over columns.
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
if verifyColumn(roDataColumn, blockProcessedData, pid, requestedColumns, d.columnVerifier) {
|
||||
retrievedColumns[roDataColumn.Index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(retrievedColumns) == len(requestedColumns) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", blockProcessedData.BlockRoot),
|
||||
"requestedColumns": sliceFromMap(requestedColumns, true /*sorted*/),
|
||||
}).Debug("Sampled columns from peer successfully")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", blockProcessedData.BlockRoot),
|
||||
"requestedColumns": sliceFromMap(requestedColumns, true /*sorted*/),
|
||||
"retrievedColumns": sliceFromMap(retrievedColumns, true /*sorted*/),
|
||||
}).Debug("Sampled columns from peer with some errors")
|
||||
}
|
||||
|
||||
return retrievedColumns
|
||||
}
|
||||
|
||||
// randomizeColumns returns a slice containing randomly ordered columns belonging to the input `groups`.
|
||||
func randomizeColumns(custodyGroups map[uint64]bool) ([]uint64, error) {
|
||||
// Compute the number of columns per group.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
// Compute the number of columns.
|
||||
groupCount := uint64(len(custodyGroups))
|
||||
expectedColumnCount := groupCount * columnsPerGroup
|
||||
|
||||
// Compute the columns.
|
||||
columns := make([]uint64, 0, expectedColumnCount)
|
||||
for group := range custodyGroups {
|
||||
columnsGroup, err := peerdas.ComputeColumnsForCustodyGroup(group)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute columns for custody group")
|
||||
}
|
||||
|
||||
columns = append(columns, columnsGroup...)
|
||||
}
|
||||
|
||||
actualColumnCount := len(columns)
|
||||
|
||||
// Safety check.
|
||||
if uint64(actualColumnCount) != expectedColumnCount {
|
||||
return nil, errors.New("invalid number of columns, should never happen")
|
||||
}
|
||||
|
||||
// Shuffle the columns.
|
||||
rand.NewGenerator().Shuffle(actualColumnCount, func(i, j int) {
|
||||
columns[i], columns[j] = columns[j], columns[i]
|
||||
})
|
||||
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
// sliceFromMap returns a sorted list of keys from a map.
|
||||
func sliceFromMap(m map[uint64]bool, sorted ...bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
if len(sorted) > 0 && sorted[0] {
|
||||
slices.Sort(result)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// selectRandomPeer returns a random peer from the given list of peers.
|
||||
func selectRandomPeer(peers map[peer.ID]bool) (peer.ID, error) {
|
||||
peersCount := uint64(len(peers))
|
||||
pick := rand.NewGenerator().Uint64() % peersCount
|
||||
|
||||
for peer := range peers {
|
||||
if pick == 0 {
|
||||
return peer, nil
|
||||
}
|
||||
|
||||
pick--
|
||||
}
|
||||
|
||||
// This should never be reached.
|
||||
return peer.ID(""), errors.New("failed to select random peer")
|
||||
}
|
||||
|
||||
// verifyColumn verifies the retrieved column against the root, the index,
|
||||
// the KZG inclusion and the KZG proof.
|
||||
func verifyColumn(
|
||||
roDataColumn blocks.RODataColumn,
|
||||
blockProcessedData *statefeed.BlockProcessedData,
|
||||
pid peer.ID,
|
||||
requestedColumns map[uint64]bool,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
) bool {
|
||||
retrievedColumn := roDataColumn.Index
|
||||
|
||||
// Filter out columns that were not requested.
|
||||
if !requestedColumns[retrievedColumn] {
|
||||
columnsToSampleList := sliceFromMap(requestedColumns, true /*sorted*/)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"requestedColumns": columnsToSampleList,
|
||||
"retrievedColumn": retrievedColumn,
|
||||
}).Debug("Retrieved column was not requested")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
roBlock, err := blocks.NewROBlock(blockProcessedData.SignedBlock)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to create ROBlock")
|
||||
}
|
||||
|
||||
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
||||
|
||||
if err := peerdas.DataColumnsAlignWithBlock(roBlock, roDataColumns); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
||||
verifier := newDataColumnsVerifier(roDataColumns, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to verify data column")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to prove inclusion")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to verify KZG proof")
|
||||
}
|
||||
|
||||
_, err = verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to upgrade RODataColumns to VerifiedRODataColumns - should never happen")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -1,554 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
func TestRandomizeColumns(t *testing.T) {
|
||||
const count uint64 = 128
|
||||
|
||||
// Generate groups.
|
||||
groups := make(map[uint64]bool, count)
|
||||
for i := uint64(0); i < count; i++ {
|
||||
groups[i] = true
|
||||
}
|
||||
|
||||
// Randomize columns.
|
||||
randomizedColumns, err := randomizeColumns(groups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert back to a map.
|
||||
randomizedColumnsMap := make(map[uint64]bool, count)
|
||||
for _, column := range randomizedColumns {
|
||||
randomizedColumnsMap[column] = true
|
||||
}
|
||||
|
||||
// Check duplicates and missing columns.
|
||||
require.Equal(t, len(groups), len(randomizedColumnsMap))
|
||||
|
||||
// Check the values.
|
||||
for column := range randomizedColumnsMap {
|
||||
require.Equal(t, true, column < count)
|
||||
}
|
||||
}
|
||||
|
||||
// createAndConnectPeer creates a peer with a private key `offset` fixed.
|
||||
// The peer is added and connected to `p2pService`.
|
||||
// If a `RPCDataColumnSidecarsByRootTopicV1` request is made with column index `i`,
|
||||
// then the peer will respond with the `dataColumnSidecars[i]` if it is not in `columnsNotToRespond`.
|
||||
// (If `len(dataColumnSidecars) < i`, then this function will panic.)
|
||||
func createAndConnectPeer(
|
||||
t *testing.T,
|
||||
p2pService *p2ptest.TestP2P,
|
||||
chainService *mock.ChainService,
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar,
|
||||
custodyGroupCount uint64,
|
||||
columnsNotToRespond map[uint64]bool,
|
||||
offset int,
|
||||
) *p2ptest.TestP2P {
|
||||
// Create the private key, depending on the offset.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(offset + i)
|
||||
}
|
||||
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the peer.
|
||||
peer := p2ptest.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
|
||||
peer.SetStreamHandler(p2p.RPCDataColumnSidecarsByRootTopicV1+"/ssz_snappy", func(stream network.Stream) {
|
||||
// Decode the request.
|
||||
req := new(p2pTypes.DataColumnsByRootIdentifiers)
|
||||
err := peer.Encoding().DecodeWithMaxLength(stream, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, identifier := range *req {
|
||||
for _, column := range identifier.Columns {
|
||||
// Filter out the columns not to respond.
|
||||
if columnsNotToRespond[column] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the response.
|
||||
resp := dataColumnSidecars[column]
|
||||
|
||||
// Send the response.
|
||||
err := WriteDataColumnSidecarChunk(stream, chainService, p2pService.Encoding(), resp)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the stream.
|
||||
closeStream(stream, log)
|
||||
})
|
||||
|
||||
// Create the record and set the custody count.
|
||||
enr := &enr.Record{}
|
||||
enr.Set(peerdas.Cgc(custodyGroupCount))
|
||||
|
||||
// Add the peer and connect it.
|
||||
p2pService.Peers().Add(enr, peer.PeerID(), nil, network.DirOutbound)
|
||||
p2pService.Peers().SetConnectionState(peer.PeerID(), peers.Connected)
|
||||
p2pService.Connect(peer)
|
||||
|
||||
return peer
|
||||
}
|
||||
|
||||
type dataSamplerTest struct {
|
||||
ctx context.Context
|
||||
p2pSvc *p2ptest.TestP2P
|
||||
peers []*p2ptest.TestP2P
|
||||
ctxMap map[[4]byte]int
|
||||
chainSvc *mock.ChainService
|
||||
blockProcessedData *statefeed.BlockProcessedData
|
||||
blobs []kzg.Blob
|
||||
kzgCommitments [][]byte
|
||||
kzgProofs [][]byte
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar
|
||||
}
|
||||
|
||||
func setupDefaultDataColumnSamplerTest(t *testing.T) (*dataSamplerTest, *dataColumnSampler1D) {
|
||||
const (
|
||||
blobCount uint64 = 3
|
||||
custodyRequirement uint64 = 4
|
||||
)
|
||||
|
||||
test, sampler := setupDataColumnSamplerTest(t, blobCount)
|
||||
|
||||
// Custody columns: [6, 38, 70, 102]
|
||||
p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 1)
|
||||
|
||||
// Custody columns: [3, 35, 67, 99]
|
||||
p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 2)
|
||||
|
||||
// Custody columns: [12, 44, 76, 108]
|
||||
p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 3)
|
||||
|
||||
test.peers = []*p2ptest.TestP2P{p1, p2, p3}
|
||||
|
||||
return test, sampler
|
||||
}
|
||||
|
||||
func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTest, *dataColumnSampler1D) {
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs, commitments and inclusion proofs.
|
||||
blobs := make([]kzg.Blob, blobCount)
|
||||
kzgCommitments := make([][]byte, blobCount)
|
||||
kzgProofs := make([][]byte, blobCount)
|
||||
|
||||
for i := uint64(0); i < blobCount; i++ {
|
||||
blob := getRandBlob(t, int64(i))
|
||||
|
||||
kzgCommitment, kzgProof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob
|
||||
kzgCommitments[i] = kzgCommitment[:]
|
||||
kzgProofs[i] = kzgProof[:]
|
||||
}
|
||||
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
dbBlock.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := dataColumnSidecars[0].GetSignedBlockHeader().Header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blockProcessedData := &statefeed.BlockProcessedData{
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: sBlock,
|
||||
}
|
||||
|
||||
p2pSvc := p2ptest.NewTestP2P(t)
|
||||
chainSvc, clock := defaultMockChain(t)
|
||||
|
||||
test := &dataSamplerTest{
|
||||
ctx: context.Background(),
|
||||
p2pSvc: p2pSvc,
|
||||
peers: []*p2ptest.TestP2P{},
|
||||
ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Fulu},
|
||||
chainSvc: chainSvc,
|
||||
blockProcessedData: blockProcessedData,
|
||||
blobs: blobs,
|
||||
kzgCommitments: kzgCommitments,
|
||||
kzgProofs: kzgProofs,
|
||||
dataColumnSidecars: dataColumnSidecars,
|
||||
}
|
||||
clockSync := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clockSync.SetClock(clock))
|
||||
iniWaiter := verification.NewInitializerWaiter(clockSync, nil, nil)
|
||||
ini, err := iniWaiter.WaitForInitializer(context.Background())
|
||||
require.NoError(t, err)
|
||||
sampler := newDataColumnSampler1D(p2pSvc, clock, test.ctxMap, nil, newDataColumnsVerifierFromInitializer(ini), &peerdas.CustodyInfo{})
|
||||
|
||||
return test, sampler
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_PeerManagement(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
numPeers int
|
||||
custodyRequirement uint64
|
||||
expectedColumns [][]uint64
|
||||
prunePeers map[int]bool // Peers to prune.
|
||||
}{
|
||||
{
|
||||
name: "custodyRequirement=4",
|
||||
numPeers: 3,
|
||||
custodyRequirement: 4,
|
||||
expectedColumns: [][]uint64{
|
||||
{6, 37, 48, 113},
|
||||
{35, 79, 92, 109},
|
||||
{31, 44, 58, 97},
|
||||
},
|
||||
prunePeers: map[int]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custodyRequirement=8",
|
||||
numPeers: 3,
|
||||
custodyRequirement: 8,
|
||||
expectedColumns: [][]uint64{
|
||||
{1, 6, 37, 48, 51, 87, 112, 113},
|
||||
{24, 25, 35, 52, 79, 92, 109, 126},
|
||||
{31, 44, 58, 64, 91, 97, 116, 127},
|
||||
},
|
||||
prunePeers: map[int]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.CustodyRequirement = tc.custodyRequirement
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers))
|
||||
for i := 0; i < tc.numPeers; i++ {
|
||||
p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1)
|
||||
test.peers = append(test.peers, p)
|
||||
}
|
||||
|
||||
// confirm everything works
|
||||
sampler.refreshPeerInfo()
|
||||
require.Equal(t, params.BeaconConfig().NumberOfColumns, uint64(len(sampler.peersByCustodyGroup)))
|
||||
|
||||
require.Equal(t, tc.numPeers, len(sampler.groupsByPeer))
|
||||
for i, peer := range test.peers {
|
||||
// confirm peer has the expected columns
|
||||
require.Equal(t, len(tc.expectedColumns[i]), len(sampler.groupsByPeer[peer.PeerID()]))
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
require.Equal(t, true, sampler.groupsByPeer[peer.PeerID()][column])
|
||||
}
|
||||
|
||||
// confirm column to peer mapping are correct
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
require.Equal(t, true, sampler.peersByCustodyGroup[column][peer.PeerID()])
|
||||
}
|
||||
}
|
||||
|
||||
// prune peers
|
||||
for peer := range tc.prunePeers {
|
||||
err := test.p2pSvc.Disconnect(test.peers[peer].PeerID())
|
||||
test.p2pSvc.Peers().SetConnectionState(test.peers[peer].PeerID(), peers.Disconnected)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
require.Equal(t, tc.numPeers-len(tc.prunePeers), len(sampler.groupsByPeer))
|
||||
for i, peer := range test.peers {
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
expected := true
|
||||
if tc.prunePeers[i] {
|
||||
expected = false
|
||||
}
|
||||
require.Equal(t, expected, sampler.peersByCustodyGroup[column][peer.PeerID()])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_SampleDistribution(t *testing.T) {
|
||||
// TODO: Use `t.Run`.
|
||||
testCases := []struct {
|
||||
numPeers int
|
||||
custodyRequirement uint64
|
||||
columnsToDistribute [][]uint64
|
||||
expectedDistribution []map[int][]uint64
|
||||
}{
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 4,
|
||||
// peer custody maps
|
||||
// p0: {6, 37, 48, 113},
|
||||
// p1: {35, 79, 92, 109},
|
||||
// p2: {31, 44, 58, 97},
|
||||
columnsToDistribute: [][]uint64{
|
||||
{6, 35, 31},
|
||||
{6, 48, 79, 109, 31, 97},
|
||||
{6, 37, 113},
|
||||
{11},
|
||||
},
|
||||
expectedDistribution: []map[int][]uint64{
|
||||
{
|
||||
0: {6}, // p0
|
||||
1: {35}, // p1
|
||||
2: {31}, // p2
|
||||
},
|
||||
{
|
||||
0: {6, 48}, // p0
|
||||
1: {79, 109}, // p1
|
||||
2: {31, 97}, // p2
|
||||
},
|
||||
{
|
||||
0: {6, 37, 113}, // p0
|
||||
},
|
||||
{},
|
||||
},
|
||||
},
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 8,
|
||||
// peer custody maps
|
||||
// p0: {6, 37, 48, 113, 1, 112, 87, 51},
|
||||
// p1: {35, 79, 92, 109, 52, 126, 25, 24},
|
||||
// p2: {31, 44, 58, 97, 116, 91, 64, 127},
|
||||
columnsToDistribute: [][]uint64{
|
||||
{6, 48, 79, 25, 24, 97}, // all covered by peers
|
||||
{6, 35, 31, 32}, // `32` is not in covered by peers
|
||||
},
|
||||
expectedDistribution: []map[int][]uint64{
|
||||
{
|
||||
0: {6, 48}, // p0
|
||||
1: {79, 25, 24}, // p1
|
||||
2: {97}, // p2
|
||||
},
|
||||
{
|
||||
0: {6}, // p0
|
||||
1: {35}, // p1
|
||||
2: {31}, // p2
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
params.SetupTestConfigCleanup(t)
|
||||
for _, tc := range testCases {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.CustodyRequirement = tc.custodyRequirement
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers))
|
||||
for i := 0; i < tc.numPeers; i++ {
|
||||
p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1)
|
||||
test.peers = append(test.peers, p)
|
||||
}
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
for idx, columns := range tc.columnsToDistribute {
|
||||
result, err := sampler.distributeSamplesToPeer(columns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(tc.expectedDistribution[idx]), len(result), fmt.Sprintf("%v - %v", tc.expectedDistribution[idx], result))
|
||||
|
||||
for peerIdx, dist := range tc.expectedDistribution[idx] {
|
||||
for _, column := range dist {
|
||||
peerID := test.peers[peerIdx].PeerID()
|
||||
require.Equal(t, true, result[peerID][column])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) {
|
||||
test, sampler := setupDefaultDataColumnSamplerTest(t)
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
t.Run("sample all columns", func(t *testing.T) {
|
||||
sampleColumns := []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97}
|
||||
retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 12, len(retrieved))
|
||||
for _, column := range sampleColumns {
|
||||
require.Equal(t, true, retrieved[column])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("sample a subset of columns", func(t *testing.T) {
|
||||
sampleColumns := []uint64{35, 31, 79, 48, 113, 97}
|
||||
retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(retrieved))
|
||||
for _, column := range sampleColumns {
|
||||
require.Equal(t, true, retrieved[column])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("sample a subset of columns with missing columns", func(t *testing.T) {
|
||||
sampleColumns := []uint64{35, 31, 100, 79}
|
||||
retrieved, err := sampler.sampleDataColumns(test.ctx, test.blockProcessedData, sampleColumns)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrieved))
|
||||
require.DeepEqual(t, map[uint64]bool{35: true, 31: true, 79: true}, retrieved)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.DataColumnSidecarSubnetCount = 32
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
samplesCount uint64
|
||||
possibleColumnsToRequest []uint64
|
||||
columnsNotToRespond map[uint64]bool
|
||||
expectedSuccess bool
|
||||
expectedRoundSummaries []roundSummary
|
||||
}{
|
||||
{
|
||||
name: "All columns are correctly sampled in a single round",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97},
|
||||
columnsNotToRespond: map[uint64]bool{},
|
||||
expectedSuccess: true,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{6, 35, 31, 37, 79},
|
||||
MissingColumns: map[uint64]bool{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Two missing columns in the first round, ok in the second round",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97},
|
||||
columnsNotToRespond: map[uint64]bool{6: true, 31: true},
|
||||
expectedSuccess: true,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{6, 35, 31, 37, 79},
|
||||
MissingColumns: map[uint64]bool{6: true, 31: true},
|
||||
},
|
||||
{
|
||||
RequestedColumns: []uint64{44, 48, 92, 58, 113, 109},
|
||||
MissingColumns: map[uint64]bool{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Two missing columns in the first round, one missing in the second round. Fail to sample.",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{6, 35, 31, 37, 79, 44, 48, 92, 58, 113, 109, 97},
|
||||
columnsNotToRespond: map[uint64]bool{6: true, 31: true, 48: true},
|
||||
expectedSuccess: false,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{6, 35, 31, 37, 79},
|
||||
MissingColumns: map[uint64]bool{6: true, 31: true},
|
||||
},
|
||||
{
|
||||
RequestedColumns: []uint64{44, 48, 92, 58, 113, 109},
|
||||
MissingColumns: map[uint64]bool{48: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
test, sampler := setupDataColumnSamplerTest(t, 3)
|
||||
p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 1)
|
||||
p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 2)
|
||||
p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, params.BeaconConfig().CustodyRequirement, tc.columnsNotToRespond, 3)
|
||||
test.peers = []*p2ptest.TestP2P{p1, p2, p3}
|
||||
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
success, summaries, err := sampler.incrementalDAS(test.ctx, test.blockProcessedData, tc.possibleColumnsToRequest, tc.samplesCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSuccess, success)
|
||||
require.DeepEqual(t, tc.expectedRoundSummaries, summaries)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func deterministicRandomness(t *testing.T, seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
require.NoError(t, err)
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(t *testing.T, seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(t, seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(t *testing.T, seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
for i := 0; i < len(blob); i += 32 {
|
||||
fieldElementBytes := getRandFieldElement(t, seed+int64(i))
|
||||
copy(blob[i:i+32], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -99,15 +98,12 @@ func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg int
|
||||
log.Debug("Serving data column sidecar by root request")
|
||||
|
||||
count := 0
|
||||
for _, ident := range requestedColumnIdents {
|
||||
for root, columns := range requestedColumnsByRoot {
|
||||
if err := ctx.Err(); err != nil {
|
||||
closeStream(stream, log)
|
||||
return errors.Wrap(err, "context error")
|
||||
}
|
||||
|
||||
root := bytesutil.ToBytes32(ident.BlockRoot)
|
||||
columns := ident.Columns
|
||||
|
||||
// Throttle request processing to no more than batchSize/sec.
|
||||
for range columns {
|
||||
if ticker != nil && count != 0 && count%batchSize == 0 {
|
||||
|
||||
@@ -4,14 +4,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -32,18 +30,16 @@ var errBlobUnmarshal = errors.New("Could not unmarshal chunk-encoded blob")
|
||||
// Any error from the following declaration block should result in peer downscoring.
|
||||
var (
|
||||
// ErrInvalidFetchedData is used to signal that an error occurred which should result in peer downscoring.
|
||||
ErrInvalidFetchedData = errors.New("invalid data returned from peer")
|
||||
errBlobIndexOutOfBounds = errors.Wrap(verification.ErrBlobInvalid, "blob index out of range")
|
||||
errMaxRequestBlobSidecarsExceeded = errors.Wrap(verification.ErrBlobInvalid, "peer exceeded req blob chunk tx limit")
|
||||
errChunkResponseSlotNotAsc = errors.Wrap(verification.ErrBlobInvalid, "blob slot not higher than previous block root")
|
||||
errChunkResponseIndexNotAsc = errors.Wrap(verification.ErrBlobInvalid, "blob indices for a block must start at 0 and increase by 1")
|
||||
errUnrequested = errors.Wrap(verification.ErrBlobInvalid, "received BlobSidecar in response that was not requested")
|
||||
errBlobResponseOutOfBounds = errors.Wrap(verification.ErrBlobInvalid, "received BlobSidecar with slot outside BlobSidecarsByRangeRequest bounds")
|
||||
errChunkResponseBlockMismatch = errors.Wrap(verification.ErrBlobInvalid, "blob block details do not match")
|
||||
errChunkResponseParentMismatch = errors.Wrap(verification.ErrBlobInvalid, "parent root for response element doesn't match previous element root")
|
||||
errDataColumnChunkedReadFailure = errors.New("failed to read stream of chunk-encoded data columns")
|
||||
errMaxRequestDataColumnSidecarsExceeded = errors.New("count of requested data column sidecars exceeds MAX_REQUEST_DATA_COLUMN_SIDECARS")
|
||||
errMaxResponseDataColumnSidecarsExceeded = errors.New("peer returned more data column sidecars than requested")
|
||||
ErrInvalidFetchedData = errors.New("invalid data returned from peer")
|
||||
errBlobIndexOutOfBounds = errors.Wrap(verification.ErrBlobInvalid, "blob index out of range")
|
||||
errMaxRequestBlobSidecarsExceeded = errors.Wrap(verification.ErrBlobInvalid, "peer exceeded req blob chunk tx limit")
|
||||
errChunkResponseSlotNotAsc = errors.Wrap(verification.ErrBlobInvalid, "blob slot not higher than previous block root")
|
||||
errChunkResponseIndexNotAsc = errors.Wrap(verification.ErrBlobInvalid, "blob indices for a block must start at 0 and increase by 1")
|
||||
errUnrequested = errors.Wrap(verification.ErrBlobInvalid, "received BlobSidecar in response that was not requested")
|
||||
errBlobResponseOutOfBounds = errors.Wrap(verification.ErrBlobInvalid, "received BlobSidecar with slot outside BlobSidecarsByRangeRequest bounds")
|
||||
errChunkResponseBlockMismatch = errors.Wrap(verification.ErrBlobInvalid, "blob block details do not match")
|
||||
errChunkResponseParentMismatch = errors.Wrap(verification.ErrBlobInvalid, "parent root for response element doesn't match previous element root")
|
||||
errDataColumnChunkedReadFailure = errors.New("failed to read stream of chunk-encoded data columns")
|
||||
)
|
||||
|
||||
// ------
|
||||
@@ -401,245 +397,6 @@ func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncod
|
||||
// Data column sidecars
|
||||
// --------------------
|
||||
|
||||
// SendDataColumnSidecarsByRangeRequest sends a request for data column sidecars by range
|
||||
// and returns the fetched data column sidecars.
|
||||
func SendDataColumnSidecarsByRangeRequest(
|
||||
ctx context.Context,
|
||||
tor blockchain.TemporalOracle,
|
||||
p2pApi p2p.P2P,
|
||||
pid peer.ID,
|
||||
ctxMap ContextByteVersions,
|
||||
request *ethpb.DataColumnSidecarsByRangeRequest,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Return early if nothing to request.
|
||||
if request == nil || request.Count == 0 || len(request.Columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
// Check if we do not request too many sidecars.
|
||||
columnsCount := uint64(len(request.Columns))
|
||||
totalCount := request.Count * columnsCount
|
||||
if totalCount > maxRequestDataColumnSidecars {
|
||||
return nil, errors.Wrapf(errMaxRequestDataColumnSidecarsExceeded, "requestedCount=%d, allowedCount=%d", totalCount, maxRequestDataColumnSidecars)
|
||||
}
|
||||
|
||||
// Build the topic.
|
||||
currentSlot := tor.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRangeName, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "topic from message")
|
||||
}
|
||||
|
||||
// Build the logs.
|
||||
var columnsLog interface{} = "all"
|
||||
if columnsCount < numberOfColumns {
|
||||
columns := request.Columns
|
||||
slices.Sort(columns)
|
||||
columnsLog = columns
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"topic": topic,
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
"columns": columnsLog,
|
||||
"totalCount": totalCount,
|
||||
})
|
||||
|
||||
// Send the request.
|
||||
stream, err := p2pApi.Send(ctx, request, topic, pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "p2p send")
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, totalCount)
|
||||
for range totalCount {
|
||||
// Avoid reading extra chunks if the context is done.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
validatorSlotWithinBounds, err := isSidecarSlotWithinBounds(request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "is sidecar slot within bounds")
|
||||
}
|
||||
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(
|
||||
stream, p2pApi, ctxMap,
|
||||
validatorSlotWithinBounds,
|
||||
isSidecarIndexRequested(request),
|
||||
)
|
||||
if errors.Is(err, io.EOF) {
|
||||
return roDataColumns, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
if roDataColumn == nil {
|
||||
return nil, errors.New("nil data column sidecar, should never happen")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, *roDataColumn)
|
||||
}
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", totalCount)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// isSidecarSlotWithinBounds verifies that the slot of the data column sidecar is within the bounds of the request.
|
||||
func isSidecarSlotWithinBounds(request *ethpb.DataColumnSidecarsByRangeRequest) (DataColumnResponseValidation, error) {
|
||||
// endSlot is exclusive (while request.StartSlot is inclusive).
|
||||
endSlot, err := request.StartSlot.SafeAdd(request.Count)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "calculate end slot")
|
||||
}
|
||||
|
||||
validator := func(sidecar blocks.RODataColumn) error {
|
||||
slot := sidecar.Slot()
|
||||
|
||||
if !(request.StartSlot <= slot && slot < endSlot) {
|
||||
return errors.Errorf("data column sidecar slot %d out of range [%d, %d[", slot, request.StartSlot, endSlot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return validator, nil
|
||||
}
|
||||
|
||||
// isSidecarIndexRequested verifies that the index of the data column sidecar is found in the requested indices.
|
||||
func isSidecarIndexRequested(request *ethpb.DataColumnSidecarsByRangeRequest) DataColumnResponseValidation {
|
||||
requestedIndices := make(map[uint64]bool)
|
||||
for _, col := range request.Columns {
|
||||
requestedIndices[col] = true
|
||||
}
|
||||
|
||||
return func(sidecar blocks.RODataColumn) error {
|
||||
columnIndex := sidecar.Index
|
||||
if !requestedIndices[columnIndex] {
|
||||
return errors.Errorf("data column sidecar index %d not found in requested indices", columnIndex)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SendDataColumnSidecarsByRootRequest sends a request for data column sidecars by root
|
||||
// and returns the fetched data column sidecars.
|
||||
func SendDataColumnSidecarsByRootRequest(
|
||||
ctx context.Context,
|
||||
tor blockchain.TemporalOracle,
|
||||
p2pApi p2p.P2P,
|
||||
pid peer.ID,
|
||||
ctxMap ContextByteVersions,
|
||||
request p2ptypes.DataColumnsByRootIdentifiers,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Return early if the request is nil.
|
||||
if request == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute how many sidecars are requested.
|
||||
count := uint64(0)
|
||||
for _, identifier := range request {
|
||||
count += uint64(len(identifier.Columns))
|
||||
}
|
||||
|
||||
// Return early if nothing to request.
|
||||
if count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Verify that the request count is within the maximum allowed.
|
||||
maxRequestDataColumnSidecars := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
if count > maxRequestDataColumnSidecars {
|
||||
return nil, errors.Wrapf(errMaxRequestDataColumnSidecarsExceeded, "current: %d, max: %d", count, maxRequestDataColumnSidecars)
|
||||
}
|
||||
|
||||
// Get the topic for the request.
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "topic from message")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
stream, err := p2pApi.Send(ctx, request, topic, pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "p2p api send")
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, count)
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
for range count {
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap, isSidecarIndexRootRequested(request))
|
||||
if errors.Is(err, io.EOF) {
|
||||
return roDataColumns, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
if roDataColumn == nil {
|
||||
return nil, errors.Wrap(err, "nil data column sidecar, should never happen")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, *roDataColumn)
|
||||
}
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p2pApi, ctxMap); !errors.Is(err, io.EOF) {
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", count)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
func isSidecarIndexRootRequested(request p2ptypes.DataColumnsByRootIdentifiers) DataColumnResponseValidation {
|
||||
columnsIndexFromRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
|
||||
for _, sidecar := range request {
|
||||
blockRoot := bytesutil.ToBytes32(sidecar.BlockRoot)
|
||||
if columnsIndexFromRoot[blockRoot] == nil {
|
||||
columnsIndexFromRoot[blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
|
||||
for _, column := range sidecar.Columns {
|
||||
columnsIndexFromRoot[blockRoot][column] = true
|
||||
}
|
||||
}
|
||||
|
||||
return func(sidecar blocks.RODataColumn) error {
|
||||
root, index := sidecar.BlockRoot(), sidecar.Index
|
||||
indices, ok := columnsIndexFromRoot[root]
|
||||
|
||||
if !ok {
|
||||
return errors.Errorf("root #%x returned by peer but not requested", root)
|
||||
}
|
||||
|
||||
if !indices[index] {
|
||||
return errors.Errorf("index %d for root #%x returned by peer but not requested", index, root)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DataColumnResponseValidation represents a function that can validate aspects of a single unmarshaled data column sidecar
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type DataColumnResponseValidation func(column blocks.RODataColumn) error
|
||||
|
||||
@@ -11,9 +11,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
p2pTypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -887,544 +885,6 @@ func TestErrInvalidFetchedDataDistinction(t *testing.T) {
|
||||
require.Equal(t, false, errors.Is(ErrInvalidFetchedData, verification.ErrBlobInvalid))
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsByRangeRequest(t *testing.T) {
|
||||
nilTestCases := []struct {
|
||||
name string
|
||||
request *ethpb.DataColumnSidecarsByRangeRequest
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
request: nil,
|
||||
},
|
||||
{
|
||||
name: "count is 0",
|
||||
request: ðpb.DataColumnSidecarsByRangeRequest{},
|
||||
},
|
||||
{
|
||||
name: "columns is nil",
|
||||
request: ðpb.DataColumnSidecarsByRangeRequest{Count: 1},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range nilTestCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("too many columns in request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 0
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{Count: 1, Columns: []uint64{1, 2, 3}}
|
||||
_, err := SendDataColumnSidecarsByRangeRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
|
||||
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
||||
})
|
||||
|
||||
type slotIndex struct {
|
||||
Slot primitives.Slot
|
||||
Index uint64
|
||||
}
|
||||
|
||||
createSidecar := func(slotIndex slotIndex) *ethpb.DataColumnSidecar {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
return ðpb.DataColumnSidecar{
|
||||
Index: slotIndex.Index,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: slotIndex.Slot,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
slotIndices []slotIndex
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "too many responses",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 0, Index: 1},
|
||||
{Slot: 0, Index: 2},
|
||||
{Slot: 0, Index: 3},
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
{Slot: 0, Index: 3}, // Duplicate
|
||||
},
|
||||
expectedError: errMaxResponseDataColumnSidecarsExceeded,
|
||||
},
|
||||
{
|
||||
name: "perfect match",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 0, Index: 1},
|
||||
{Slot: 0, Index: 2},
|
||||
{Slot: 0, Index: 3},
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "few responses than maximum possible",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 0, Index: 1},
|
||||
{Slot: 0, Index: 2},
|
||||
{Slot: 0, Index: 3},
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
expected := make([]*ethpb.DataColumnSidecar, 0, len(tc.slotIndices))
|
||||
for _, slotIndex := range tc.slotIndices {
|
||||
sidecar := createSidecar(slotIndex)
|
||||
expected = append(expected, sidecar)
|
||||
}
|
||||
|
||||
requestSent := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 0,
|
||||
Count: 2,
|
||||
Columns: []uint64{1, 3, 2},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
p2.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
wg.Done()
|
||||
|
||||
requestReceived := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := p2.Encoding().DecodeWithMaxLength(stream, requestReceived)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, requestSent, requestReceived)
|
||||
|
||||
for _, sidecar := range expected {
|
||||
err := WriteDataColumnSidecarChunk(stream, clock, p2.Encoding(), sidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
ctx := t.Context()
|
||||
ctxMap := ContextByteVersions{[4]byte{245, 165, 253, 66}: version.Fulu}
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRangeRequest(ctx, clock, p1, p2.PeerID(), ctxMap, requestSent)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for i := range expected {
|
||||
require.DeepSSZEqual(t, expected[i], actual[i].DataColumnSidecar)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSidecarSlotWithinBounds(t *testing.T) {
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 10,
|
||||
Count: 10,
|
||||
}
|
||||
|
||||
validator, err := isSidecarSlotWithinBounds(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
isErrorExpected bool
|
||||
}{
|
||||
{
|
||||
name: "too soon",
|
||||
slot: 9,
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "too late",
|
||||
slot: 20,
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "within bounds",
|
||||
slot: 15,
|
||||
isErrorExpected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
sidecarPb := ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: tc.slot,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecar, err := blocks.NewRODataColumn(sidecarPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = validator(sidecar)
|
||||
if tc.isErrorExpected {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSidecarIndexRequested(t *testing.T) {
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
Columns: []uint64{2, 9, 4},
|
||||
}
|
||||
|
||||
validator := isSidecarIndexRequested(request)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
index uint64
|
||||
isErrorExpected bool
|
||||
}{
|
||||
{
|
||||
name: "not requested",
|
||||
index: 1,
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "requested",
|
||||
index: 9,
|
||||
isErrorExpected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
sidecarPb := ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
Index: tc.index,
|
||||
}
|
||||
|
||||
sidecar, err := blocks.NewRODataColumn(sidecarPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = validator(sidecar)
|
||||
if tc.isErrorExpected {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsByRootRequest(t *testing.T) {
|
||||
nilTestCases := []struct {
|
||||
name string
|
||||
request p2ptypes.DataColumnsByRootIdentifiers
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
request: nil,
|
||||
},
|
||||
{
|
||||
name: "count is 0",
|
||||
request: p2ptypes.DataColumnsByRootIdentifiers{{}, {}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range nilTestCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, tc.request)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("too many columns in request", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
beaconConfig := params.BeaconConfig()
|
||||
beaconConfig.MaxRequestDataColumnSidecars = 4
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
request := p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{Columns: []uint64{1, 2, 3}},
|
||||
{Columns: []uint64{4, 5, 6}},
|
||||
}
|
||||
|
||||
_, err := SendDataColumnSidecarsByRootRequest(t.Context(), nil, nil, "aRandomPID", nil, request)
|
||||
require.ErrorContains(t, errMaxRequestDataColumnSidecarsExceeded.Error(), err)
|
||||
})
|
||||
|
||||
type slotIndex struct {
|
||||
Slot primitives.Slot
|
||||
Index uint64
|
||||
}
|
||||
|
||||
createSidecar := func(rootIndex slotIndex) blocks.RODataColumn {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
sidecarPb := ðpb.DataColumnSidecar{
|
||||
Index: rootIndex.Index,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
roSidecar, err := blocks.NewRODataColumn(sidecarPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return roSidecar
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
slotIndices []slotIndex
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "too many responses",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
{Slot: 2, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
{Slot: 2, Index: 3},
|
||||
{Slot: 1, Index: 3}, // Duplicate
|
||||
},
|
||||
expectedError: errMaxResponseDataColumnSidecarsExceeded,
|
||||
},
|
||||
{
|
||||
name: "perfect match",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
{Slot: 2, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
{Slot: 2, Index: 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "few responses than maximum possible",
|
||||
slotIndices: []slotIndex{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 1, Index: 2},
|
||||
{Slot: 1, Index: 3},
|
||||
{Slot: 2, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
p1, p2 := p2ptest.NewTestP2P(t), p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
expected := make([]blocks.RODataColumn, 0, len(tc.slotIndices))
|
||||
for _, slotIndex := range tc.slotIndices {
|
||||
roSidecar := createSidecar(slotIndex)
|
||||
expected = append(expected, roSidecar)
|
||||
}
|
||||
|
||||
blockRoot1, blockRoot2 := expected[0].BlockRoot(), expected[3].BlockRoot()
|
||||
|
||||
sentRequest := p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{BlockRoot: blockRoot1[:], Columns: []uint64{1, 2, 3}},
|
||||
{BlockRoot: blockRoot2[:], Columns: []uint64{1, 2, 3}},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
p2.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
wg.Done()
|
||||
|
||||
requestReceived := new(p2ptypes.DataColumnsByRootIdentifiers)
|
||||
err := p2.Encoding().DecodeWithMaxLength(stream, requestReceived)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(sentRequest), len(*requestReceived))
|
||||
for i := range sentRequest {
|
||||
require.DeepSSZEqual(t, (sentRequest)[i], (*requestReceived)[i])
|
||||
}
|
||||
|
||||
for _, sidecar := range expected {
|
||||
err := WriteDataColumnSidecarChunk(stream, clock, p2.Encoding(), sidecar.DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
ctx := t.Context()
|
||||
ctxMap := ContextByteVersions{[4]byte{245, 165, 253, 66}: version.Fulu}
|
||||
|
||||
actual, err := SendDataColumnSidecarsByRootRequest(ctx, clock, p1, p2.PeerID(), ctxMap, sentRequest)
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorContains(t, tc.expectedError.Error(), err)
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for i := range expected {
|
||||
require.DeepSSZEqual(t, expected[i], actual[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSidecarIndexRootRequested(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
root [fieldparams.RootLength]byte
|
||||
index uint64
|
||||
isErrorExpected bool
|
||||
}{
|
||||
{
|
||||
name: "non requested root",
|
||||
root: [fieldparams.RootLength]byte{2},
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "non requested index",
|
||||
root: [fieldparams.RootLength]byte{1},
|
||||
index: 3,
|
||||
isErrorExpected: true,
|
||||
},
|
||||
{
|
||||
name: "nominal",
|
||||
root: [fieldparams.RootLength]byte{1},
|
||||
index: 2,
|
||||
isErrorExpected: false,
|
||||
},
|
||||
}
|
||||
|
||||
request := types.DataColumnsByRootIdentifiers{
|
||||
{BlockRoot: []byte{1}, Columns: []uint64{1, 2}},
|
||||
}
|
||||
|
||||
validator := isSidecarIndexRootRequested(request)
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const count = 4
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
sidecarPb := ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
Index: tc.index,
|
||||
}
|
||||
|
||||
// There is a discrepancy between `tc.root` and the real root,
|
||||
// but we don't care about it here.
|
||||
sidecar, err := blocks.NewRODataColumnWithRoot(sidecarPb, tc.root)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = validator(sidecar)
|
||||
if tc.isErrorExpected {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadChunkedDataColumnSidecar(t *testing.T) {
|
||||
t.Run("non nil status code", func(t *testing.T) {
|
||||
const reason = "a dummy reason"
|
||||
|
||||
@@ -38,15 +38,6 @@ var (
|
||||
RequireSidecarProposerExpected,
|
||||
}
|
||||
|
||||
// ByRootRequestDataColumnSidecarRequirements defines the set of requirements that DataColumnSidecars received
|
||||
// via the by root request must satisfy in order to upgrade an RODataColumn to a VerifiedRODataColumn.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
||||
ByRootRequestDataColumnSidecarRequirements = []Requirement{
|
||||
RequireValidFields,
|
||||
RequireSidecarInclusionProven,
|
||||
RequireSidecarKzgProofVerified,
|
||||
}
|
||||
|
||||
// ByRangeRequestDataColumnSidecarRequirements defines the set of requirements that DataColumnSidecars received
|
||||
// via the by range request must satisfy in order to upgrade an RODataColumn to a VerifiedRODataColumn.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Implement beacon API blob sidecar enpoint for Fulu.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Fixed
|
||||
- Non deterministic output order of `dataColumnSidecarByRootRPCHandler`.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- PeerDAS: Implement peer sampling.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
- Implement `SendDataColumnSidecarsByRangeRequest`.
|
||||
- Implement `SendDataColumnSidecarsByRootRequest`.
|
||||
3
changelog/potuz_hdiff_diff_type.md
Normal file
3
changelog/potuz_hdiff_diff_type.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add native state diff type and marshalling functions
|
||||
@@ -53,6 +53,7 @@ type Flags struct {
|
||||
EnableDutiesV2 bool // EnableDutiesV2 sets validator client to use the get Duties V2 endpoint
|
||||
EnableWeb bool // EnableWeb enables the webui on the validator client
|
||||
SSZOnly bool // SSZOnly forces the validator client to use SSZ for communication with the beacon node when REST mode is enabled (useful for debugging)
|
||||
EnableStateDiff bool // EnableStateDiff enables the experimental state diff feature for the beacon node.
|
||||
// Logging related toggles.
|
||||
DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected.
|
||||
EnableFullSSZDataLogging bool // Enables logging for full ssz data on rejected gossip messages
|
||||
@@ -277,6 +278,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(enableExperimentalAttestationPool)
|
||||
cfg.EnableExperimentalAttestationPool = true
|
||||
}
|
||||
if ctx.IsSet(enableStateDiff.Name) {
|
||||
logEnabled(enableStateDiff)
|
||||
cfg.EnableStateDiff = true
|
||||
}
|
||||
if ctx.IsSet(forceHeadFlag.Name) {
|
||||
logEnabled(forceHeadFlag)
|
||||
cfg.ForceHead = ctx.String(forceHeadFlag.Name)
|
||||
|
||||
@@ -176,6 +176,10 @@ var (
|
||||
Name: "enable-experimental-attestation-pool",
|
||||
Usage: "Enables an experimental attestation pool design.",
|
||||
}
|
||||
enableStateDiff = &cli.BoolFlag{
|
||||
Name: "enable-state-diff",
|
||||
Usage: "Enables the experimental state diff feature.",
|
||||
}
|
||||
// forceHeadFlag is a flag to force the head of the beacon chain to a specific block.
|
||||
forceHeadFlag = &cli.StringFlag{
|
||||
Name: "sync-from",
|
||||
@@ -267,6 +271,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
DisableQUIC,
|
||||
EnableDiscoveryReboot,
|
||||
enableExperimentalAttestationPool,
|
||||
enableStateDiff,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"mainnet_config.go",
|
||||
"minimal_config.go",
|
||||
"network_config.go",
|
||||
"state_diff_config.go",
|
||||
"testnet_e2e_config.go",
|
||||
"testnet_holesky_config.go",
|
||||
"testnet_hoodi_config.go",
|
||||
|
||||
9
config/params/state_diff_config.go
Normal file
9
config/params/state_diff_config.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package params
|
||||
|
||||
var (
|
||||
stateHierarchyExponents = []uint64{21, 18, 16, 13, 11, 9, 5}
|
||||
)
|
||||
|
||||
func StateHierarchyExponents() []uint64 {
|
||||
return stateHierarchyExponents
|
||||
}
|
||||
@@ -40,6 +40,12 @@ func NewWrappedExecutionData(v proto.Message) (interfaces.ExecutionData, error)
|
||||
case *enginev1.ExecutionBundleElectra:
|
||||
// note: no payload changes in electra so using deneb
|
||||
return WrappedExecutionPayloadDeneb(pbStruct.Payload)
|
||||
case *enginev1.ExecutionPayloadHeader:
|
||||
return WrappedExecutionPayloadHeader(pbStruct)
|
||||
case *enginev1.ExecutionPayloadHeaderCapella:
|
||||
return WrappedExecutionPayloadHeaderCapella(pbStruct)
|
||||
case *enginev1.ExecutionPayloadHeaderDeneb:
|
||||
return WrappedExecutionPayloadHeaderDeneb(pbStruct)
|
||||
default:
|
||||
return nil, ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
53
consensus-types/hdiff/BUILD.bazel
Normal file
53
consensus-types/hdiff/BUILD.bazel
Normal file
@@ -0,0 +1,53 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["state_diff.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/consensus-types/hdiff",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/capella:go_default_library",
|
||||
"//beacon-chain/core/deneb:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/helpers:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"fuzz_test.go",
|
||||
"property_test.go",
|
||||
"security_test.go",
|
||||
"state_diff_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
491
consensus-types/hdiff/fuzz_test.go
Normal file
491
consensus-types/hdiff/fuzz_test.go
Normal file
@@ -0,0 +1,491 @@
|
||||
package hdiff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
// FuzzNewHdiff tests parsing variations of realistic diffs
|
||||
func FuzzNewHdiff(f *testing.F) {
|
||||
// Add seed corpus with various valid diffs from realistic scenarios
|
||||
sizes := []uint64{8, 16, 32}
|
||||
for _, size := range sizes {
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, size)
|
||||
|
||||
// Create various realistic target states
|
||||
scenarios := []string{"slot_change", "balance_change", "validator_change", "multiple_changes"}
|
||||
for _, scenario := range scenarios {
|
||||
target := source.Copy()
|
||||
|
||||
switch scenario {
|
||||
case "slot_change":
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
case "balance_change":
|
||||
balances := target.Balances()
|
||||
if len(balances) > 0 {
|
||||
balances[0] += 1000000000
|
||||
_ = target.SetBalances(balances)
|
||||
}
|
||||
case "validator_change":
|
||||
validators := target.Validators()
|
||||
if len(validators) > 0 {
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
case "multiple_changes":
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
balances := target.Balances()
|
||||
validators := target.Validators()
|
||||
if len(balances) > 0 && len(validators) > 0 {
|
||||
balances[0] += 2000000000
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetBalances(balances)
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
}
|
||||
|
||||
validDiff, err := Diff(source, target)
|
||||
if err == nil {
|
||||
f.Add(validDiff.StateDiff, validDiff.ValidatorDiffs, validDiff.BalancesDiff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, stateDiff, validatorDiffs, balancesDiff []byte) {
|
||||
// Limit input sizes to reasonable bounds
|
||||
if len(stateDiff) > 5000 || len(validatorDiffs) > 5000 || len(balancesDiff) > 5000 {
|
||||
return
|
||||
}
|
||||
|
||||
input := HdiffBytes{
|
||||
StateDiff: stateDiff,
|
||||
ValidatorDiffs: validatorDiffs,
|
||||
BalancesDiff: balancesDiff,
|
||||
}
|
||||
|
||||
// Test parsing - should not panic even with corrupted but bounded data
|
||||
_, err := newHdiff(input)
|
||||
_ = err // Expected to fail with corrupted data
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzNewStateDiff tests the newStateDiff function with random compressed input
|
||||
func FuzzNewStateDiff(f *testing.F) {
|
||||
// Add seed corpus
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, 16)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
|
||||
diff, err := diffToState(source, target)
|
||||
if err == nil {
|
||||
serialized := diff.serialize()
|
||||
f.Add(serialized)
|
||||
}
|
||||
|
||||
// Add edge cases
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01})
|
||||
f.Add([]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07})
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("newStateDiff panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Should never panic, only return error
|
||||
_, err := newStateDiff(data)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzNewValidatorDiffs tests validator diff deserialization
|
||||
func FuzzNewValidatorDiffs(f *testing.F) {
|
||||
// Add seed corpus
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, 8)
|
||||
target := source.Copy()
|
||||
vals := target.Validators()
|
||||
if len(vals) > 0 {
|
||||
modifiedVal := ðpb.Validator{
|
||||
PublicKey: vals[0].PublicKey,
|
||||
WithdrawalCredentials: vals[0].WithdrawalCredentials,
|
||||
EffectiveBalance: vals[0].EffectiveBalance + 1000,
|
||||
Slashed: !vals[0].Slashed,
|
||||
ActivationEligibilityEpoch: vals[0].ActivationEligibilityEpoch,
|
||||
ActivationEpoch: vals[0].ActivationEpoch,
|
||||
ExitEpoch: vals[0].ExitEpoch,
|
||||
WithdrawableEpoch: vals[0].WithdrawableEpoch,
|
||||
}
|
||||
vals[0] = modifiedVal
|
||||
_ = target.SetValidators(vals)
|
||||
|
||||
// Create a simple diff for fuzzing - we'll just use raw bytes
|
||||
_, err := diffToVals(source, target)
|
||||
if err == nil {
|
||||
// Add some realistic validator diff bytes for the corpus
|
||||
f.Add([]byte{1, 0, 0, 0, 0, 0, 0, 0}) // Simple validator diff
|
||||
}
|
||||
}
|
||||
|
||||
// Add edge cases
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01, 0x02, 0x03, 0x04})
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("newValidatorDiffs panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err := newValidatorDiffs(data)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzNewBalancesDiff tests balance diff deserialization
|
||||
func FuzzNewBalancesDiff(f *testing.F) {
|
||||
// Add seed corpus
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, 8)
|
||||
target := source.Copy()
|
||||
balances := target.Balances()
|
||||
if len(balances) > 0 {
|
||||
balances[0] += 1000
|
||||
_ = target.SetBalances(balances)
|
||||
|
||||
// Create a simple diff for fuzzing - we'll just use raw bytes
|
||||
_, err := diffToBalances(source, target)
|
||||
if err == nil {
|
||||
// Add some realistic balance diff bytes for the corpus
|
||||
f.Add([]byte{1, 0, 0, 0, 0, 0, 0, 0}) // Simple balance diff
|
||||
}
|
||||
}
|
||||
|
||||
// Add edge cases
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08})
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("newBalancesDiff panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err := newBalancesDiff(data)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzApplyDiff tests applying variations of valid diffs
|
||||
func FuzzApplyDiff(f *testing.F) {
|
||||
// Test with realistic state variations, not random data
|
||||
ctx := context.Background()
|
||||
|
||||
// Add seed corpus with various valid scenarios
|
||||
sizes := []uint64{8, 16, 32, 64}
|
||||
for _, size := range sizes {
|
||||
source, _ := util.DeterministicGenesisStateElectra(f, size)
|
||||
target := source.Copy()
|
||||
|
||||
// Different types of realistic changes
|
||||
scenarios := []func(){
|
||||
func() { _ = target.SetSlot(source.Slot() + 1) }, // Slot change
|
||||
func() { // Balance change
|
||||
balances := target.Balances()
|
||||
if len(balances) > 0 {
|
||||
balances[0] += 1000000000 // 1 ETH
|
||||
_ = target.SetBalances(balances)
|
||||
}
|
||||
},
|
||||
func() { // Validator change
|
||||
validators := target.Validators()
|
||||
if len(validators) > 0 {
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
testTarget := source.Copy()
|
||||
scenario()
|
||||
|
||||
validDiff, err := Diff(source, testTarget)
|
||||
if err == nil {
|
||||
f.Add(validDiff.StateDiff, validDiff.ValidatorDiffs, validDiff.BalancesDiff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, stateDiff, validatorDiffs, balancesDiff []byte) {
|
||||
// Only test with reasonable sized inputs
|
||||
if len(stateDiff) > 10000 || len(validatorDiffs) > 10000 || len(balancesDiff) > 10000 {
|
||||
return
|
||||
}
|
||||
|
||||
// Create fresh source state for each test
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 8)
|
||||
|
||||
diff := HdiffBytes{
|
||||
StateDiff: stateDiff,
|
||||
ValidatorDiffs: validatorDiffs,
|
||||
BalancesDiff: balancesDiff,
|
||||
}
|
||||
|
||||
// Apply diff - errors are expected for fuzzed data
|
||||
_, err := ApplyDiff(ctx, source, diff)
|
||||
_ = err // Expected to fail with invalid data
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzReadPendingAttestation tests the pending attestation deserialization
|
||||
func FuzzReadPendingAttestation(f *testing.F) {
|
||||
// Add edge cases - this function is particularly vulnerable
|
||||
f.Add([]byte{})
|
||||
f.Add([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}) // 8 bytes
|
||||
f.Add(make([]byte, 200)) // Larger than expected
|
||||
|
||||
// Add a case with large reported length
|
||||
largeLength := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(largeLength, 0xFFFFFFFF) // Large bits length
|
||||
f.Add(largeLength)
|
||||
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("readPendingAttestation panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Make a copy since the function modifies the slice
|
||||
dataCopy := make([]byte, len(data))
|
||||
copy(dataCopy, data)
|
||||
|
||||
_, err := readPendingAttestation(&dataCopy)
|
||||
_ = err
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzKmpIndex tests the KMP algorithm implementation
|
||||
func FuzzKmpIndex(f *testing.F) {
|
||||
// Test with integer pointers to match the actual usage
|
||||
f.Add(0, "1,2,3", "1,2,3,4,5")
|
||||
f.Add(3, "1,2,3", "1,2,3,1,2,3")
|
||||
f.Add(0, "", "1,2,3")
|
||||
|
||||
f.Fuzz(func(t *testing.T, lens int, patternStr string, textStr string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("kmpIndex panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Parse comma-separated strings into int slices
|
||||
var pattern, text []int
|
||||
if patternStr != "" {
|
||||
for _, s := range strings.Split(patternStr, ",") {
|
||||
if val, err := strconv.Atoi(strings.TrimSpace(s)); err == nil {
|
||||
pattern = append(pattern, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
if textStr != "" {
|
||||
for _, s := range strings.Split(textStr, ",") {
|
||||
if val, err := strconv.Atoi(strings.TrimSpace(s)); err == nil {
|
||||
text = append(text, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to pointer slices as used in actual code
|
||||
patternPtrs := make([]*int, len(pattern))
|
||||
for i := range pattern {
|
||||
val := pattern[i]
|
||||
patternPtrs[i] = &val
|
||||
}
|
||||
|
||||
textPtrs := make([]*int, len(text))
|
||||
for i := range text {
|
||||
val := text[i]
|
||||
textPtrs[i] = &val
|
||||
}
|
||||
|
||||
integerEquals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
// Clamp lens to reasonable range to avoid infinite loops
|
||||
if lens < 0 {
|
||||
lens = 0
|
||||
}
|
||||
if lens > len(textPtrs) {
|
||||
lens = len(textPtrs)
|
||||
}
|
||||
|
||||
result := kmpIndex(lens, textPtrs, integerEquals)
|
||||
|
||||
// Basic sanity check
|
||||
if result < 0 || result > lens {
|
||||
t.Errorf("kmpIndex returned invalid result: %d for lens=%d", result, lens)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzComputeLPS tests the LPS computation for KMP
|
||||
func FuzzComputeLPS(f *testing.F) {
|
||||
// Add seed cases
|
||||
f.Add("1,2,1")
|
||||
f.Add("1,1,1")
|
||||
f.Add("1,2,3,4")
|
||||
f.Add("")
|
||||
|
||||
f.Fuzz(func(t *testing.T, patternStr string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("computeLPS panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Parse comma-separated string into int slice
|
||||
var pattern []int
|
||||
if patternStr != "" {
|
||||
for _, s := range strings.Split(patternStr, ",") {
|
||||
if val, err := strconv.Atoi(strings.TrimSpace(s)); err == nil {
|
||||
pattern = append(pattern, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to pointer slice
|
||||
patternPtrs := make([]*int, len(pattern))
|
||||
for i := range pattern {
|
||||
val := pattern[i]
|
||||
patternPtrs[i] = &val
|
||||
}
|
||||
|
||||
integerEquals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
result := computeLPS(patternPtrs, integerEquals)
|
||||
|
||||
// Verify result length matches input
|
||||
if len(result) != len(pattern) {
|
||||
t.Errorf("computeLPS returned wrong length: got %d, expected %d", len(result), len(pattern))
|
||||
}
|
||||
|
||||
// Verify all LPS values are non-negative and within bounds
|
||||
for i, lps := range result {
|
||||
if lps < 0 || lps > i {
|
||||
t.Errorf("Invalid LPS value at index %d: %d", i, lps)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzDiffToBalances tests balance diff computation
|
||||
func FuzzDiffToBalances(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, sourceData, targetData []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("diffToBalances panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Convert byte data to balance arrays
|
||||
var sourceBalances, targetBalances []uint64
|
||||
|
||||
// Parse source balances (8 bytes per uint64)
|
||||
for i := 0; i+7 < len(sourceData) && len(sourceBalances) < 100; i += 8 {
|
||||
balance := binary.LittleEndian.Uint64(sourceData[i : i+8])
|
||||
sourceBalances = append(sourceBalances, balance)
|
||||
}
|
||||
|
||||
// Parse target balances
|
||||
for i := 0; i+7 < len(targetData) && len(targetBalances) < 100; i += 8 {
|
||||
balance := binary.LittleEndian.Uint64(targetData[i : i+8])
|
||||
targetBalances = append(targetBalances, balance)
|
||||
}
|
||||
|
||||
// Create states with the provided balances
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
target, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
|
||||
if len(sourceBalances) > 0 {
|
||||
_ = source.SetBalances(sourceBalances)
|
||||
}
|
||||
if len(targetBalances) > 0 {
|
||||
_ = target.SetBalances(targetBalances)
|
||||
}
|
||||
|
||||
result, err := diffToBalances(source, target)
|
||||
|
||||
// If no error, verify result consistency
|
||||
if err == nil && len(result) > 0 {
|
||||
// Result length should match target length
|
||||
if len(result) != len(target.Balances()) {
|
||||
t.Errorf("diffToBalances result length mismatch: got %d, expected %d",
|
||||
len(result), len(target.Balances()))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FuzzValidatorsEqual tests validator comparison
|
||||
func FuzzValidatorsEqual(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Errorf("validatorsEqual panicked: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create two validators and fuzz their fields
|
||||
if len(data) < 16 {
|
||||
return
|
||||
}
|
||||
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 2)
|
||||
validators := source.Validators()
|
||||
if len(validators) < 2 {
|
||||
return
|
||||
}
|
||||
|
||||
val1 := validators[0]
|
||||
val2 := validators[1]
|
||||
|
||||
// Modify validator fields based on fuzz data
|
||||
if len(data) > 0 && data[0]%2 == 0 {
|
||||
val2.EffectiveBalance = val1.EffectiveBalance + uint64(data[0])
|
||||
}
|
||||
if len(data) > 1 && data[1]%2 == 0 {
|
||||
val2.Slashed = !val1.Slashed
|
||||
}
|
||||
|
||||
// Create ReadOnlyValidator wrappers if needed
|
||||
// Since validatorsEqual expects ReadOnlyValidator interface,
|
||||
// we'll skip this test for now as it requires state wrapper implementation
|
||||
_ = val1
|
||||
_ = val2
|
||||
})
|
||||
}
|
||||
391
consensus-types/hdiff/property_test.go
Normal file
391
consensus-types/hdiff/property_test.go
Normal file
@@ -0,0 +1,391 @@
|
||||
package hdiff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
// PropertyTestRoundTrip verifies that diff->apply is idempotent with realistic data
|
||||
func FuzzPropertyRoundTrip(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, slotDelta uint64, balanceData []byte, validatorData []byte) {
|
||||
// Limit to realistic ranges
|
||||
if slotDelta > 32 { // Max one epoch
|
||||
slotDelta = slotDelta % 32
|
||||
}
|
||||
|
||||
// Convert byte data to realistic deltas and changes
|
||||
var balanceDeltas []int64
|
||||
var validatorChanges []bool
|
||||
|
||||
// Parse balance deltas - limit to realistic amounts (8 bytes per int64)
|
||||
for i := 0; i+7 < len(balanceData) && len(balanceDeltas) < 20; i += 8 {
|
||||
delta := int64(binary.LittleEndian.Uint64(balanceData[i : i+8]))
|
||||
// Keep deltas realistic (max 10 ETH change)
|
||||
if delta > 10000000000 {
|
||||
delta = delta % 10000000000
|
||||
}
|
||||
if delta < -10000000000 {
|
||||
delta = -((-delta) % 10000000000)
|
||||
}
|
||||
balanceDeltas = append(balanceDeltas, delta)
|
||||
}
|
||||
|
||||
// Parse validator changes (1 byte per bool) - limit to small number
|
||||
for i := 0; i < len(validatorData) && len(validatorChanges) < 10; i++ {
|
||||
validatorChanges = append(validatorChanges, validatorData[i]%2 == 0)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create source state with reasonable size
|
||||
validatorCount := uint64(len(validatorChanges) + 8) // Minimum 8 validators
|
||||
if validatorCount > 64 {
|
||||
validatorCount = 64 // Cap at 64 for performance
|
||||
}
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, validatorCount)
|
||||
|
||||
// Create target state with modifications
|
||||
target := source.Copy()
|
||||
|
||||
// Apply slot change
|
||||
_ = target.SetSlot(source.Slot() + primitives.Slot(slotDelta))
|
||||
|
||||
// Apply realistic balance changes
|
||||
if len(balanceDeltas) > 0 {
|
||||
balances := target.Balances()
|
||||
for i, delta := range balanceDeltas {
|
||||
if i >= len(balances) {
|
||||
break
|
||||
}
|
||||
// Apply realistic balance changes with safe bounds
|
||||
if delta < 0 {
|
||||
if uint64(-delta) > balances[i] {
|
||||
balances[i] = 0 // Can't go below 0
|
||||
} else {
|
||||
balances[i] -= uint64(-delta)
|
||||
}
|
||||
} else {
|
||||
// Cap at reasonable maximum (1000 ETH)
|
||||
maxBalance := uint64(1000000000000) // 1000 ETH in Gwei
|
||||
if balances[i]+uint64(delta) > maxBalance {
|
||||
balances[i] = maxBalance
|
||||
} else {
|
||||
balances[i] += uint64(delta)
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = target.SetBalances(balances)
|
||||
}
|
||||
|
||||
// Apply realistic validator changes
|
||||
if len(validatorChanges) > 0 {
|
||||
validators := target.Validators()
|
||||
for i, shouldChange := range validatorChanges {
|
||||
if i >= len(validators) {
|
||||
break
|
||||
}
|
||||
if shouldChange {
|
||||
// Make realistic changes - small effective balance adjustments
|
||||
validators[i].EffectiveBalance += 1000000000 // 1 ETH
|
||||
}
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
|
||||
// Create diff
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
// If diff creation fails, that's acceptable for malformed inputs
|
||||
return
|
||||
}
|
||||
|
||||
// Apply diff
|
||||
result, err := ApplyDiff(ctx, source, diff)
|
||||
if err != nil {
|
||||
// If diff application fails, that's acceptable
|
||||
return
|
||||
}
|
||||
|
||||
// Verify round-trip property: source + diff = target
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
|
||||
// Verify balance consistency
|
||||
targetBalances := target.Balances()
|
||||
resultBalances := result.Balances()
|
||||
require.Equal(t, len(targetBalances), len(resultBalances))
|
||||
for i := range targetBalances {
|
||||
require.Equal(t, targetBalances[i], resultBalances[i], "Balance mismatch at index %d", i)
|
||||
}
|
||||
|
||||
// Verify validator consistency
|
||||
targetVals := target.Validators()
|
||||
resultVals := result.Validators()
|
||||
require.Equal(t, len(targetVals), len(resultVals))
|
||||
for i := range targetVals {
|
||||
require.Equal(t, targetVals[i].Slashed, resultVals[i].Slashed, "Validator slashing mismatch at index %d", i)
|
||||
require.Equal(t, targetVals[i].EffectiveBalance, resultVals[i].EffectiveBalance, "Validator balance mismatch at index %d", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestReasonablePerformance verifies operations complete quickly with realistic data
|
||||
func FuzzPropertyResourceBounds(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, validatorCount uint8, slotDelta uint8, changeCount uint8) {
|
||||
// Use realistic parameters
|
||||
validators := uint64(validatorCount%64 + 8) // 8-71 validators
|
||||
slots := uint64(slotDelta % 32) // 0-31 slots
|
||||
changes := int(changeCount % 10) // 0-9 changes
|
||||
|
||||
// Create realistic states
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, validators)
|
||||
target := source.Copy()
|
||||
|
||||
// Apply realistic changes
|
||||
_ = target.SetSlot(source.Slot() + primitives.Slot(slots))
|
||||
|
||||
if changes > 0 {
|
||||
validatorList := target.Validators()
|
||||
for i := 0; i < changes && i < len(validatorList); i++ {
|
||||
validatorList[i].EffectiveBalance += 1000000000 // 1 ETH
|
||||
}
|
||||
_ = target.SetValidators(validatorList)
|
||||
}
|
||||
|
||||
// Operations should complete quickly
|
||||
start := time.Now()
|
||||
diff, err := Diff(source, target)
|
||||
duration := time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
// Should be fast
|
||||
require.Equal(t, true, duration < time.Second, "Diff creation too slow: %v", duration)
|
||||
|
||||
// Apply should also be fast
|
||||
start = time.Now()
|
||||
_, err = ApplyDiff(context.Background(), source, diff)
|
||||
duration = time.Since(start)
|
||||
|
||||
if err == nil {
|
||||
require.Equal(t, true, duration < time.Second, "Diff application too slow: %v", duration)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestDiffSize verifies that diffs are smaller than full states for typical cases
|
||||
func FuzzPropertyDiffEfficiency(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, slotDelta uint64, numChanges uint8) {
|
||||
if slotDelta > 100 {
|
||||
slotDelta = slotDelta % 100
|
||||
}
|
||||
if numChanges > 10 {
|
||||
numChanges = numChanges % 10
|
||||
}
|
||||
|
||||
// Create states with small differences
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 64)
|
||||
target := source.Copy()
|
||||
|
||||
_ = target.SetSlot(source.Slot() + primitives.Slot(slotDelta))
|
||||
|
||||
// Make a few small changes
|
||||
if numChanges > 0 {
|
||||
validators := target.Validators()
|
||||
for i := uint8(0); i < numChanges && int(i) < len(validators); i++ {
|
||||
validators[i].EffectiveBalance += 1000
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
}
|
||||
|
||||
// Create diff
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// For small changes, diff should be much smaller than full state
|
||||
sourceSSZ, err := source.MarshalSSZ()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
diffSize := len(diff.StateDiff) + len(diff.ValidatorDiffs) + len(diff.BalancesDiff)
|
||||
|
||||
// Diff should be smaller than full state for small changes
|
||||
if numChanges <= 5 && slotDelta <= 10 {
|
||||
require.Equal(t, true, diffSize < len(sourceSSZ)/2,
|
||||
"Diff size %d should be less than half of state size %d", diffSize, len(sourceSSZ))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestBalanceConservation verifies that balance operations don't create/destroy value unexpectedly
|
||||
func FuzzPropertyBalanceConservation(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, balanceData []byte) {
|
||||
// Convert byte data to balance changes
|
||||
var balanceChanges []int64
|
||||
for i := 0; i+7 < len(balanceData) && len(balanceChanges) < 50; i += 8 {
|
||||
change := int64(binary.LittleEndian.Uint64(balanceData[i : i+8]))
|
||||
balanceChanges = append(balanceChanges, change)
|
||||
}
|
||||
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, uint64(len(balanceChanges)+10))
|
||||
originalBalances := source.Balances()
|
||||
|
||||
// Calculate total before
|
||||
var totalBefore uint64
|
||||
for _, balance := range originalBalances {
|
||||
totalBefore += balance
|
||||
}
|
||||
|
||||
// Apply balance changes via diff system
|
||||
target := source.Copy()
|
||||
targetBalances := target.Balances()
|
||||
|
||||
var totalDelta int64
|
||||
for i, delta := range balanceChanges {
|
||||
if i >= len(targetBalances) {
|
||||
break
|
||||
}
|
||||
|
||||
// Prevent underflow
|
||||
if delta < 0 && uint64(-delta) > targetBalances[i] {
|
||||
totalDelta += int64(targetBalances[i]) // Lost amount
|
||||
targetBalances[i] = 0
|
||||
} else if delta < 0 {
|
||||
targetBalances[i] -= uint64(-delta)
|
||||
totalDelta += delta
|
||||
} else {
|
||||
// Prevent overflow
|
||||
if uint64(delta) > math.MaxUint64-targetBalances[i] {
|
||||
gained := math.MaxUint64 - targetBalances[i]
|
||||
totalDelta += int64(gained)
|
||||
targetBalances[i] = math.MaxUint64
|
||||
} else {
|
||||
targetBalances[i] += uint64(delta)
|
||||
totalDelta += delta
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = target.SetBalances(targetBalances)
|
||||
|
||||
// Apply through diff system
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate total after
|
||||
resultBalances := result.Balances()
|
||||
var totalAfter uint64
|
||||
for _, balance := range resultBalances {
|
||||
totalAfter += balance
|
||||
}
|
||||
|
||||
// Verify conservation (accounting for intended changes)
|
||||
expectedTotal := totalBefore
|
||||
if totalDelta >= 0 {
|
||||
expectedTotal += uint64(totalDelta)
|
||||
} else {
|
||||
if uint64(-totalDelta) <= expectedTotal {
|
||||
expectedTotal -= uint64(-totalDelta)
|
||||
} else {
|
||||
expectedTotal = 0
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, expectedTotal, totalAfter,
|
||||
"Balance conservation violated: before=%d, delta=%d, expected=%d, actual=%d",
|
||||
totalBefore, totalDelta, expectedTotal, totalAfter)
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestMonotonicSlot verifies slot only increases
|
||||
func FuzzPropertyMonotonicSlot(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, slotDelta uint64) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
target := source.Copy()
|
||||
|
||||
targetSlot := source.Slot() + primitives.Slot(slotDelta)
|
||||
_ = target.SetSlot(targetSlot)
|
||||
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Slot should never decrease
|
||||
require.Equal(t, true, result.Slot() >= source.Slot(),
|
||||
"Slot decreased from %d to %d", source.Slot(), result.Slot())
|
||||
|
||||
// Slot should match target
|
||||
require.Equal(t, targetSlot, result.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
// PropertyTestValidatorIndexIntegrity verifies validator indices remain consistent
|
||||
func FuzzPropertyValidatorIndices(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, changeData []byte) {
|
||||
// Convert byte data to boolean changes
|
||||
var changes []bool
|
||||
for i := 0; i < len(changeData) && len(changes) < 20; i++ {
|
||||
changes = append(changes, changeData[i]%2 == 0)
|
||||
}
|
||||
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, uint64(len(changes)+5))
|
||||
target := source.Copy()
|
||||
|
||||
// Apply changes
|
||||
validators := target.Validators()
|
||||
for i, shouldChange := range changes {
|
||||
if i >= len(validators) {
|
||||
break
|
||||
}
|
||||
if shouldChange {
|
||||
validators[i].EffectiveBalance += 1000
|
||||
}
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
diff, err := Diff(source, target)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Validator count should not decrease
|
||||
require.Equal(t, true, len(result.Validators()) >= len(source.Validators()),
|
||||
"Validator count decreased from %d to %d", len(source.Validators()), len(result.Validators()))
|
||||
|
||||
// Public keys should be preserved for existing validators
|
||||
sourceVals := source.Validators()
|
||||
resultVals := result.Validators()
|
||||
for i := range sourceVals {
|
||||
if i < len(resultVals) {
|
||||
require.Equal(t, sourceVals[i].PublicKey, resultVals[i].PublicKey,
|
||||
"Public key changed at validator index %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
393
consensus-types/hdiff/security_test.go
Normal file
393
consensus-types/hdiff/security_test.go
Normal file
@@ -0,0 +1,393 @@
|
||||
package hdiff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
// TestIntegerOverflowProtection tests protection against balance overflow attacks
|
||||
func TestIntegerOverflowProtection(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 8)
|
||||
|
||||
// Test balance overflow in diffToBalances - use realistic values
|
||||
t.Run("balance_diff_overflow", func(t *testing.T) {
|
||||
target := source.Copy()
|
||||
balances := target.Balances()
|
||||
|
||||
// Set high but realistic balance values (32 ETH in Gwei = 32e9)
|
||||
balances[0] = 32000000000 // 32 ETH
|
||||
balances[1] = 64000000000 // 64 ETH
|
||||
_ = target.SetBalances(balances)
|
||||
|
||||
// This should work fine with realistic values
|
||||
diffs, err := diffToBalances(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the diffs are reasonable
|
||||
require.Equal(t, true, len(diffs) > 0, "Should have balance diffs")
|
||||
})
|
||||
|
||||
// Test reasonable balance changes
|
||||
t.Run("realistic_balance_changes", func(t *testing.T) {
|
||||
// Create realistic balance changes (slashing, rewards)
|
||||
balancesDiff := []int64{1000000000, -500000000, 2000000000} // 1 ETH gain, 0.5 ETH loss, 2 ETH gain
|
||||
|
||||
// Apply to state with normal balances
|
||||
testSource := source.Copy()
|
||||
normalBalances := []uint64{32000000000, 32000000000, 32000000000} // 32 ETH each
|
||||
_ = testSource.SetBalances(normalBalances)
|
||||
|
||||
// This should work fine
|
||||
result, err := applyBalancesDiff(testSource, balancesDiff)
|
||||
require.NoError(t, err)
|
||||
|
||||
resultBalances := result.Balances()
|
||||
require.Equal(t, uint64(33000000000), resultBalances[0]) // 33 ETH
|
||||
require.Equal(t, uint64(31500000000), resultBalances[1]) // 31.5 ETH
|
||||
require.Equal(t, uint64(34000000000), resultBalances[2]) // 34 ETH
|
||||
})
|
||||
}
|
||||
|
||||
// TestReasonablePerformance tests that operations complete in reasonable time
|
||||
func TestReasonablePerformance(t *testing.T) {
|
||||
t.Run("large_state_performance", func(t *testing.T) {
|
||||
// Test with a large but realistic validator set
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 1000) // 1000 validators
|
||||
target := source.Copy()
|
||||
|
||||
// Make realistic changes
|
||||
_ = target.SetSlot(source.Slot() + 32) // One epoch
|
||||
validators := target.Validators()
|
||||
for i := 0; i < 100; i++ { // 10% of validators changed
|
||||
validators[i].EffectiveBalance += 1000000000 // 1 ETH change
|
||||
}
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// Should complete quickly
|
||||
start := time.Now()
|
||||
diff, err := Diff(source, target)
|
||||
duration := time.Since(start)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, duration < time.Second, "Diff creation took too long: %v", duration)
|
||||
require.Equal(t, true, len(diff.StateDiff) > 0, "Should have state diff")
|
||||
})
|
||||
|
||||
t.Run("realistic_diff_application", func(t *testing.T) {
|
||||
// Test applying diffs to large states
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 500)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
|
||||
// Create and apply diff
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
start := time.Now()
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
duration := time.Since(start)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
require.Equal(t, true, duration < time.Second, "Diff application took too long: %v", duration)
|
||||
})
|
||||
}
|
||||
|
||||
// TestStateTransitionValidation tests realistic state transition scenarios
|
||||
func TestStateTransitionValidation(t *testing.T) {
|
||||
t.Run("validator_slashing_scenario", func(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
target := source.Copy()
|
||||
|
||||
// Simulate validator slashing (realistic scenario)
|
||||
validators := target.Validators()
|
||||
validators[0].Slashed = true
|
||||
validators[0].EffectiveBalance = 0 // Slashed validator loses balance
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// This should work fine
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result.Validators()[0].Slashed)
|
||||
require.Equal(t, uint64(0), result.Validators()[0].EffectiveBalance)
|
||||
})
|
||||
|
||||
t.Run("epoch_transition_scenario", func(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 64)
|
||||
target := source.Copy()
|
||||
|
||||
// Simulate epoch transition with multiple changes
|
||||
_ = target.SetSlot(source.Slot() + 32) // One epoch
|
||||
|
||||
// Some validators get rewards, others get penalties
|
||||
balances := target.Balances()
|
||||
for i := 0; i < len(balances); i++ {
|
||||
if i%2 == 0 {
|
||||
balances[i] += 100000000 // 0.1 ETH reward
|
||||
} else {
|
||||
if balances[i] > 50000000 {
|
||||
balances[i] -= 50000000 // 0.05 ETH penalty
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = target.SetBalances(balances)
|
||||
|
||||
// This should work smoothly
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
})
|
||||
|
||||
t.Run("consistent_state_root", func(t *testing.T) {
|
||||
// Test that diffs preserve state consistency
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 32)
|
||||
target := source.Copy()
|
||||
|
||||
// Make minimal changes
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
|
||||
// Diff and apply should be consistent
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Result should match target
|
||||
require.Equal(t, target.Slot(), result.Slot())
|
||||
require.Equal(t, len(target.Validators()), len(result.Validators()))
|
||||
require.Equal(t, len(target.Balances()), len(result.Balances()))
|
||||
})
|
||||
}
|
||||
|
||||
// TestSerializationRoundTrip tests serialization consistency
|
||||
func TestSerializationRoundTrip(t *testing.T) {
|
||||
t.Run("diff_serialization_consistency", func(t *testing.T) {
|
||||
// Test that serialization and deserialization are consistent
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
target := source.Copy()
|
||||
|
||||
// Make changes
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
validators := target.Validators()
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// Create diff
|
||||
diff1, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deserialize and re-serialize
|
||||
hdiff, err := newHdiff(diff1)
|
||||
require.NoError(t, err)
|
||||
|
||||
diff2 := hdiff.serialize()
|
||||
|
||||
// Apply both diffs - should get same result
|
||||
result1, err := ApplyDiff(context.Background(), source, diff1)
|
||||
require.NoError(t, err)
|
||||
|
||||
result2, err := ApplyDiff(context.Background(), source, diff2)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, result1.Slot(), result2.Slot())
|
||||
require.Equal(t, result1.Validators()[0].EffectiveBalance, result2.Validators()[0].EffectiveBalance)
|
||||
})
|
||||
|
||||
t.Run("empty_diff_handling", func(t *testing.T) {
|
||||
// Test that empty diffs are handled correctly
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 8)
|
||||
target := source.Copy() // No changes
|
||||
|
||||
// Should create minimal diff
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Apply should work and return equivalent state
|
||||
result, err := ApplyDiff(context.Background(), source, diff)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, source.Slot(), result.Slot())
|
||||
require.Equal(t, len(source.Validators()), len(result.Validators()))
|
||||
})
|
||||
|
||||
t.Run("compression_efficiency", func(t *testing.T) {
|
||||
// Test that compression is working effectively
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 100)
|
||||
target := source.Copy()
|
||||
|
||||
// Make small changes
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
validators := target.Validators()
|
||||
validators[0].EffectiveBalance += 1000000000
|
||||
_ = target.SetValidators(validators)
|
||||
|
||||
// Create diff
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get full state size
|
||||
fullStateSSZ, err := target.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Diff should be much smaller than full state
|
||||
diffSize := len(diff.StateDiff) + len(diff.ValidatorDiffs) + len(diff.BalancesDiff)
|
||||
require.Equal(t, true, diffSize < len(fullStateSSZ)/2,
|
||||
"Diff should be smaller than full state: diff=%d, full=%d", diffSize, len(fullStateSSZ))
|
||||
})
|
||||
}
|
||||
|
||||
// TestKMPSecurity tests the KMP algorithm for security issues
|
||||
func TestKMPSecurity(t *testing.T) {
|
||||
t.Run("nil_pointer_handling", func(t *testing.T) {
|
||||
// Test with nil pointers in the pattern/text
|
||||
pattern := []*int{nil, nil, nil}
|
||||
text := []*int{nil, nil, nil, nil, nil}
|
||||
|
||||
equals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
// Should not panic - result can be any integer
|
||||
result := kmpIndex(len(pattern), text, equals)
|
||||
_ = result // Any result is valid, just ensure no panic
|
||||
})
|
||||
|
||||
t.Run("empty_pattern_edge_case", func(t *testing.T) {
|
||||
var pattern []*int
|
||||
text := []*int{new(int), new(int)}
|
||||
|
||||
equals := func(a, b *int) bool { return a == b }
|
||||
|
||||
result := kmpIndex(0, text, equals)
|
||||
require.Equal(t, 0, result, "Empty pattern should return 0")
|
||||
_ = pattern // Silence unused variable warning
|
||||
})
|
||||
|
||||
t.Run("realistic_pattern_performance", func(t *testing.T) {
|
||||
// Test with realistic sizes to ensure good performance
|
||||
realisticSize := 100 // More realistic for validator arrays
|
||||
pattern := make([]*int, realisticSize)
|
||||
text := make([]*int, realisticSize*2)
|
||||
|
||||
// Create realistic pattern
|
||||
for i := range pattern {
|
||||
val := i % 10 // More variation
|
||||
pattern[i] = &val
|
||||
}
|
||||
for i := range text {
|
||||
val := i % 10
|
||||
text[i] = &val
|
||||
}
|
||||
|
||||
equals := func(a, b *int) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
result := kmpIndex(len(pattern), text, equals)
|
||||
duration := time.Since(start)
|
||||
|
||||
// Should complete quickly with realistic inputs
|
||||
require.Equal(t, true, duration < time.Second,
|
||||
"KMP took too long: %v", duration)
|
||||
_ = result // Any result is valid, just ensure performance is good
|
||||
})
|
||||
}
|
||||
|
||||
// TestConcurrencySafety tests thread safety of the hdiff operations
|
||||
func TestConcurrencySafety(t *testing.T) {
|
||||
t.Run("concurrent_diff_creation", func(t *testing.T) {
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 32)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 1)
|
||||
|
||||
const numGoroutines = 10
|
||||
const iterations = 100
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines*iterations)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
for j := 0; j < iterations; j++ {
|
||||
_, err := Diff(source, target)
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("worker %d iteration %d: %v", workerID, j, err)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
// Check for any errors
|
||||
for err := range errors {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("concurrent_diff_application", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
source, _ := util.DeterministicGenesisStateElectra(t, 16)
|
||||
target := source.Copy()
|
||||
_ = target.SetSlot(source.Slot() + 5)
|
||||
|
||||
diff, err := Diff(source, target)
|
||||
require.NoError(t, err)
|
||||
|
||||
const numGoroutines = 10
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Each goroutine needs its own copy of the source state
|
||||
localSource := source.Copy()
|
||||
_, err := ApplyDiff(ctx, localSource, diff)
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("worker %d: %v", workerID, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
// Check for any errors
|
||||
for err := range errors {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
2040
consensus-types/hdiff/state_diff.go
Normal file
2040
consensus-types/hdiff/state_diff.go
Normal file
File diff suppressed because it is too large
Load Diff
1195
consensus-types/hdiff/state_diff_test.go
Normal file
1195
consensus-types/hdiff/state_diff_test.go
Normal file
File diff suppressed because it is too large
Load Diff
9
consensus-types/helpers/BUILD.bazel
Normal file
9
consensus-types/helpers/BUILD.bazel
Normal file
@@ -0,0 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["comparisons.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/consensus-types/helpers",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//proto/prysm/v1alpha1:go_default_library"],
|
||||
)
|
||||
109
consensus-types/helpers/comparisons.go
Normal file
109
consensus-types/helpers/comparisons.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func ForksEqual(s, t *ethpb.Fork) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if s.Epoch != t.Epoch {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.PreviousVersion, t.PreviousVersion) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(s.CurrentVersion, t.CurrentVersion)
|
||||
}
|
||||
|
||||
func BlockHeadersEqual(s, t *ethpb.BeaconBlockHeader) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if s.Slot != t.Slot {
|
||||
return false
|
||||
}
|
||||
if s.ProposerIndex != t.ProposerIndex {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.ParentRoot, t.ParentRoot) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.StateRoot, t.StateRoot) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(s.BodyRoot, t.BodyRoot)
|
||||
}
|
||||
|
||||
func Eth1DataEqual(s, t *ethpb.Eth1Data) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.DepositRoot, t.DepositRoot) {
|
||||
return false
|
||||
}
|
||||
if s.DepositCount != t.DepositCount {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(s.BlockHash, t.BlockHash)
|
||||
}
|
||||
|
||||
func PendingDepositsEqual(s, t *ethpb.PendingDeposit) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.PublicKey, t.PublicKey) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.WithdrawalCredentials, t.WithdrawalCredentials) {
|
||||
return false
|
||||
}
|
||||
if s.Amount != t.Amount {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(s.Signature, t.Signature) {
|
||||
return false
|
||||
}
|
||||
return s.Slot == t.Slot
|
||||
}
|
||||
|
||||
func PendingPartialWithdrawalsEqual(s, t *ethpb.PendingPartialWithdrawal) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
if s.Index != t.Index {
|
||||
return false
|
||||
}
|
||||
if s.Amount != t.Amount {
|
||||
return false
|
||||
}
|
||||
return s.WithdrawableEpoch == t.WithdrawableEpoch
|
||||
}
|
||||
|
||||
func PendingConsolidationsEqual(s, t *ethpb.PendingConsolidation) bool {
|
||||
if s == nil && t == nil {
|
||||
return true
|
||||
}
|
||||
if s == nil || t == nil {
|
||||
return false
|
||||
}
|
||||
return s.SourceIndex == t.SourceIndex && s.TargetIndex == t.TargetIndex
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
@@ -109,21 +110,14 @@ func GenerateTestFuluBlockWithSidecars(t *testing.T, blobCount int, options ...F
|
||||
block.Block.ParentRoot = generator.parent[:]
|
||||
block.Block.ProposerIndex = generator.proposer
|
||||
|
||||
blobs := make([]kzg.Blob, 0, generator.blobCount)
|
||||
commitments := make([][]byte, 0, generator.blobCount)
|
||||
|
||||
for i := range generator.blobCount {
|
||||
blob := kzg.Blob{uint8(i)}
|
||||
|
||||
commitment, err := kzg.BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs = append(blobs, blob)
|
||||
commitments = append(commitments, commitment[:])
|
||||
block.Block.Body.BlobKzgCommitments = make([][]byte, blobCount)
|
||||
for i := range blobCount {
|
||||
var commitment [fieldparams.KzgCommitmentSize]byte
|
||||
binary.LittleEndian.PutUint16(commitment[:16], uint16(i))
|
||||
binary.LittleEndian.PutUint16(commitment[16:32], uint16(generator.slot))
|
||||
block.Block.Body.BlobKzgCommitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
block.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
body, err := blocks.NewBeaconBlockBody(block.Block.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -155,30 +149,39 @@ func GenerateTestFuluBlockWithSidecars(t *testing.T, blobCount int, options ...F
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
sbb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
sh, err := sbb.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := make([]kzg.Blob, blobCount)
|
||||
for i, commitment := range block.Block.Body.BlobKzgCommitments {
|
||||
roSidecars := GenerateTestDenebBlobSidecar(t, root, sh, i, commitment, inclusion[i])
|
||||
blobs[i] = kzg.Blob(roSidecars.Blob)
|
||||
}
|
||||
|
||||
cellsAndProofs := GenerateCellsAndProofs(t, blobs)
|
||||
|
||||
sidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
dataColumns, err := peerdas.DataColumnSidecars(sbb, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
roSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, root)
|
||||
roSidecars := make([]blocks.RODataColumn, 0, len(dataColumns))
|
||||
roVerifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumns))
|
||||
for _, dataColumn := range dataColumns {
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(dataColumn, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
roVerifiedSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
|
||||
roSidecars = append(roSidecars, roSidecar)
|
||||
verifiedRoSidecars = append(verifiedRoSidecars, roVerifiedSidecar)
|
||||
roVerifiedSidecars = append(roVerifiedSidecars, roVerifiedSidecar)
|
||||
}
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(signedBeaconBlock, root)
|
||||
rob, err := blocks.NewROBlockWithRoot(sbb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
return roBlock, roSidecars, verifiedRoSidecars
|
||||
return rob, roSidecars, roVerifiedSidecars
|
||||
}
|
||||
|
||||
func GenerateCellsAndProofs(t testing.TB, blobs []kzg.Blob) []kzg.CellsAndProofs {
|
||||
|
||||
Reference in New Issue
Block a user