mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
22 Commits
fix-fulu-e
...
async-dyna
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
53e33ef559 | ||
|
|
fe96d226ea | ||
|
|
df86f57507 | ||
|
|
9b0a3e9632 | ||
|
|
5e079aa62c | ||
|
|
5c68ec5c39 | ||
|
|
5410232bef | ||
|
|
3f5c4df7e0 | ||
|
|
5c348dff59 | ||
|
|
8136ff7c3a | ||
|
|
f690af81fa | ||
|
|
029b896c79 | ||
|
|
e1117a7de2 | ||
|
|
39b2a02f66 | ||
|
|
4e8a710b64 | ||
|
|
7191a5bcdf | ||
|
|
d335a52c49 | ||
|
|
c7401f5e75 | ||
|
|
0057cc57b5 | ||
|
|
b1dc5e485d | ||
|
|
f035da6fc5 | ||
|
|
854f4bc9a3 |
@@ -2993,7 +2993,7 @@ There are two known issues with this release:
|
||||
|
||||
### Added
|
||||
|
||||
- Web3Signer support. See the [documentation](https://docs.prylabs.network/docs/next/wallet/web3signer) for more
|
||||
- Web3Signer support. See the [documentation](https://prysm.offchainlabs.com/docs/manage-wallet/web3signer/) for more
|
||||
details.
|
||||
- Bellatrix support. See [kiln testnet instructions](https://hackmd.io/OqIoTiQvS9KOIataIFksBQ?view)
|
||||
- Weak subjectivity sync / checkpoint sync. This is an experimental feature and may have unintended side effects for
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||
to read [Why Bazel?](https://github.com/OffchainLabs/documentation/issues/138) to fully
|
||||
to read [Why Bazel?](https://prysm.offchainlabs.com/docs/install-prysm/install-with-bazel/#why-bazel) to fully
|
||||
understand the reasoning behind an additional layer of build tooling via Bazel rather than a pure
|
||||
"go build" project.
|
||||
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/middleware"
|
||||
)
|
||||
|
||||
@@ -29,3 +29,24 @@ func RunEvery(ctx context.Context, period time.Duration, f func()) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// RunEveryDynamic runs the provided command periodically with a dynamic interval.
|
||||
// The interval is determined by calling the intervalFunc before each execution.
|
||||
// It runs in a goroutine, and can be cancelled by finishing the supplied context.
|
||||
func RunEveryDynamic(ctx context.Context, intervalFunc func() time.Duration, f func()) {
|
||||
go func() {
|
||||
for {
|
||||
// Get the next interval duration
|
||||
interval := intervalFunc()
|
||||
timer := time.NewTimer(interval)
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
f()
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -38,3 +38,51 @@ func TestEveryRuns(t *testing.T) {
|
||||
t.Error("Counter incremented after stop")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEveryDynamicRuns(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
|
||||
i := int32(0)
|
||||
intervalCount := int32(0)
|
||||
|
||||
// Start with 50ms intervals, then increase to 100ms after 2 calls
|
||||
async.RunEveryDynamic(ctx, func() time.Duration {
|
||||
count := atomic.LoadInt32(&intervalCount)
|
||||
atomic.AddInt32(&intervalCount, 1)
|
||||
if count < 2 {
|
||||
return 50 * time.Millisecond
|
||||
}
|
||||
return 100 * time.Millisecond
|
||||
}, func() {
|
||||
atomic.AddInt32(&i, 1)
|
||||
})
|
||||
|
||||
// After 150ms, should have run at least 2 times (at 50ms and 100ms)
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
count1 := atomic.LoadInt32(&i)
|
||||
if count1 < 2 {
|
||||
t.Errorf("Expected at least 2 runs after 150ms, got %d", count1)
|
||||
}
|
||||
|
||||
// After another 150ms (total 300ms), should have run at least 3 times
|
||||
// (50ms, 100ms, 150ms, 250ms)
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
count2 := atomic.LoadInt32(&i)
|
||||
if count2 < 3 {
|
||||
t.Errorf("Expected at least 3 runs after 300ms, got %d", count2)
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
||||
// Sleep for a bit to let the cancel take place.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
last := atomic.LoadInt32(&i)
|
||||
|
||||
// Sleep for a bit and ensure the value has not increased.
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
if atomic.LoadInt32(&i) != last {
|
||||
t.Error("Counter incremented after stop")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -366,7 +366,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
|
||||
// Create invalid commitment (wrong size)
|
||||
invalidCommitment := make([]byte, 32) // Should be 48 bytes
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
@@ -456,10 +456,10 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
if i == 1 && j == 64 {
|
||||
|
||||
60
beacon-chain/cache/sync_committee.go
vendored
60
beacon-chain/cache/sync_committee.go
vendored
@@ -67,6 +67,30 @@ func (s *SyncCommitteeCache) Clear() {
|
||||
s.cache = cache.NewFIFO(keyFn)
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions returns current period positions of validator indices with respect with
|
||||
// sync committee. If any input validator index has no assignment, an empty list will be returned
|
||||
// for that validator. If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
// Manual checking of state for index position in state is recommended when `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
func (s *SyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
pos, err := s.positionsInCommittee(root, indices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([][]primitives.CommitteeIndex, len(pos))
|
||||
for i, p := range pos {
|
||||
if p == nil {
|
||||
result[i] = []primitives.CommitteeIndex{}
|
||||
} else {
|
||||
result[i] = p.currentPeriod
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
|
||||
// sync committee. If the input validator index has no assignment, an empty list will be returned.
|
||||
// If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
@@ -104,11 +128,7 @@ func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx primi
|
||||
return pos.nextPeriod, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
func (s *SyncCommitteeCache) positionsInCommittee(root [32]byte, indices []primitives.ValidatorIndex) ([]*positionInCommittee, error) {
|
||||
obj, exists, err := s.cache.GetByKey(key(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -121,13 +141,33 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
if !ok {
|
||||
return nil, errNotSyncCommitteeIndexPosition
|
||||
}
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
|
||||
if !ok {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result := make([]*positionInCommittee, len(indices))
|
||||
for i, idx := range indices {
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[idx]
|
||||
if ok {
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
result[i] = idxInCommittee
|
||||
} else {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result[i] = nil
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
positions, err := s.positionsInCommittee(root, []primitives.ValidatorIndex{valIdx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(positions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
return idxInCommittee, nil
|
||||
return positions[0], nil
|
||||
}
|
||||
|
||||
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
|
||||
|
||||
@@ -16,6 +16,11 @@ func NewSyncCommittee() *FakeSyncCommitteeCache {
|
||||
return &FakeSyncCommitteeCache{}
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions -- fake
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CurrentEpochIndexPosition -- fake.
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx primitives.ValidatorIndex) ([]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
@@ -39,11 +40,11 @@ func ProcessAttesterSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, slashFunc)
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -56,7 +57,7 @@ func ProcessAttesterSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
@@ -75,10 +76,9 @@ func ProcessAttesterSlashing(
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
|
||||
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d",
|
||||
validatorIndex)
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d", validatorIndex)
|
||||
}
|
||||
slashedAny = true
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -44,11 +45,10 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{Epoch: 1}},
|
||||
})}}
|
||||
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := primitives.Slot(0)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
Slot: currentSlot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -62,16 +62,15 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, "attestations are not slashable", err)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := primitives.Slot(0)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
Slot: currentSlot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -101,7 +100,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
|
||||
}
|
||||
|
||||
@@ -243,7 +242,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, tc.st.SetSlot(currentSlot))
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator)
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(tc.st))
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -265,3 +264,83 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashing_ExitEpochGetsUpdated(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisStateElectra(t, 8)
|
||||
bal, err := helpers.TotalActiveBalance(st)
|
||||
require.NoError(t, err)
|
||||
perEpochChurn := helpers.ActivationExitChurnLimit(primitives.Gwei(bal))
|
||||
vals := st.Validators()
|
||||
|
||||
// We set the total effective balance of slashed validators
|
||||
// higher than the churn limit for a single epoch.
|
||||
vals[0].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[1].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[2].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[3].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
|
||||
sl1att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
sl1att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
slashing1 := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: sl1att1,
|
||||
Attestation_2: sl1att2,
|
||||
}
|
||||
sl2att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{2, 3},
|
||||
})
|
||||
sl2att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{2, 3},
|
||||
})
|
||||
slashing2 := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: sl2att1,
|
||||
Attestation_2: sl2att2,
|
||||
}
|
||||
|
||||
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
signingRoot, err := signing.ComputeSigningRoot(sl1att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := keys[0].Sign(signingRoot[:])
|
||||
sig1 := keys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl1att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl1att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[0].Sign(signingRoot[:])
|
||||
sig1 = keys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl1att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl2att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[2].Sign(signingRoot[:])
|
||||
sig1 = keys[3].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl2att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl2att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[2].Sign(signingRoot[:])
|
||||
sig1 = keys[3].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl2att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
assert.Equal(t, primitives.Epoch(0), exitInfo.HighestExitEpoch)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), st, []ethpb.AttSlashing{slashing1, slashing2}, exitInfo)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Epoch(6), exitInfo.HighestExitEpoch)
|
||||
}
|
||||
|
||||
@@ -191,7 +191,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(p)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.SlashValidator)
|
||||
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, p)
|
||||
}
|
||||
@@ -224,7 +224,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
|
||||
}
|
||||
@@ -334,7 +334,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e})
|
||||
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and exit: %v", r, err, state, e)
|
||||
}
|
||||
@@ -351,7 +351,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e})
|
||||
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, e)
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
if !newRegistry[expectedSlashedVal].Slashed {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -50,13 +49,12 @@ func ProcessVoluntaryExits(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
exits []*ethpb.SignedVoluntaryExit,
|
||||
exitInfo *v.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
// Avoid calculating the epoch churn if no exits exist.
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
@@ -68,15 +66,8 @@ func ProcessVoluntaryExits(
|
||||
if err := VerifyExitAndSignature(val, beaconState, exit); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
|
||||
}
|
||||
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
|
||||
if err == nil {
|
||||
if exitEpoch > maxExitEpoch {
|
||||
maxExitEpoch = exitEpoch
|
||||
churn = 1
|
||||
} else if exitEpoch == maxExitEpoch {
|
||||
churn++
|
||||
}
|
||||
} else if !errors.Is(err, v.ErrValidatorAlreadyExited) {
|
||||
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, exitInfo)
|
||||
if err != nil && !errors.Is(err, v.ErrValidatorAlreadyExited) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -46,7 +47,7 @@ func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "validator has not been active long enough to exit"
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -76,7 +77,7 @@ func TestProcessVoluntaryExits_ExitAlreadySubmitted(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "validator with index 0 has already submitted an exit, which will take place at epoch: 10"
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -124,7 +125,7 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not process exits")
|
||||
newRegistry := newState.Validators()
|
||||
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(primitives.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)) {
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -19,11 +19,6 @@ import (
|
||||
// ErrCouldNotVerifyBlockHeader is returned when a block header's signature cannot be verified.
|
||||
var ErrCouldNotVerifyBlockHeader = errors.New("could not verify beacon block header")
|
||||
|
||||
type slashValidatorFunc func(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
vid primitives.ValidatorIndex) (state.BeaconState, error)
|
||||
|
||||
// ProcessProposerSlashings is one of the operations performed
|
||||
// on each processed beacon block to slash proposers based on
|
||||
// slashing conditions if any slashable events occurred.
|
||||
@@ -54,11 +49,11 @@ func ProcessProposerSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []*ethpb.ProposerSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, slashFunc)
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,7 +66,7 @@ func ProcessProposerSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
if slashing == nil {
|
||||
@@ -80,7 +75,7 @@ func ProcessProposerSlashing(
|
||||
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) {
|
||||
},
|
||||
}
|
||||
want := "mismatched header slots"
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func TestProcessProposerSlashings_SameHeaders(t *testing.T) {
|
||||
},
|
||||
}
|
||||
want := "expected slashing headers to differ"
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) {
|
||||
"validator with key %#x is not slashable",
|
||||
bytesutil.ToBytes48(beaconState.Validators()[0].PublicKey),
|
||||
)
|
||||
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -220,7 +220,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -268,7 +268,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -316,7 +316,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusCapella(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
|
||||
@@ -84,8 +84,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
|
||||
// Handle validator ejections.
|
||||
for _, idx := range eligibleForEjection {
|
||||
var err error
|
||||
// exitQueueEpoch and churn arguments are not used in electra.
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, 0 /*exitQueueEpoch*/, 0 /*churn*/)
|
||||
// exit info is not used in electra
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, &validators.ExitInfo{})
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return fmt.Errorf("failed to initiate validator exit at index %d: %w", idx, err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -46,18 +47,21 @@ var (
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
func ProcessOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
bb := block.Body()
|
||||
// Electra extends the altair operations.
|
||||
st, err := ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), v.SlashValidator)
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), v.SlashValidator)
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
@@ -68,7 +72,7 @@ func ProcessOperations(
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits())
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
|
||||
@@ -147,9 +147,8 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
if isFullExitRequest {
|
||||
// Only exit validator if it has no pending withdrawals in the queue
|
||||
if pendingBalanceToWithdraw == 0 {
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
var err error
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, vIdx, maxExitEpoch, churn)
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -99,8 +99,7 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, maxExitEpoch, churn)
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
if err := electra.ProcessEpoch(ctx, state); err != nil {
|
||||
return errors.Wrap(err, "could not process epoch in fulu transition")
|
||||
}
|
||||
return processProposerLookahead(ctx, state)
|
||||
return ProcessProposerLookahead(ctx, state)
|
||||
}
|
||||
|
||||
func processProposerLookahead(ctx context.Context, state state.BeaconState) error {
|
||||
func ProcessProposerLookahead(ctx context.Context, state state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "fulu.processProposerLookahead")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -87,6 +87,11 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// UpdateTotalActiveBalanceCache updates the cache with the given total active balance.
|
||||
func UpdateTotalActiveBalanceCache(s state.BeaconState, total uint64) error {
|
||||
return balanceCache.AddTotalEffectiveBalance(s, total)
|
||||
}
|
||||
|
||||
// IncreaseBalance increases validator with the given 'index' balance by 'delta' in Gwei.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
|
||||
@@ -297,3 +297,30 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateTotalActiveBalanceCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create a test state with some validators
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
|
||||
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
|
||||
{EffectiveBalance: 31 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test updating cache with a specific total
|
||||
testTotal := uint64(95 * 1e9) // 32 + 32 + 31 = 95
|
||||
err = helpers.UpdateTotalActiveBalanceCache(state, testTotal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the cache was updated by retrieving the total active balance
|
||||
// which should now return the cached value
|
||||
cachedTotal, err := helpers.TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testTotal, cachedTotal, "Cache should return the updated total")
|
||||
}
|
||||
|
||||
@@ -21,6 +21,39 @@ var (
|
||||
syncCommitteeCache = cache.NewSyncCommittee()
|
||||
)
|
||||
|
||||
// CurrentPeriodPositions returns committee indices of the current period sync committee for input validators.
|
||||
func CurrentPeriodPositions(st state.BeaconState, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pos, err := syncCommitteeCache.CurrentPeriodPositions(root, indices)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fill in the cache on miss.
|
||||
go func() {
|
||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
|
||||
log.WithError(err).Error("Could not fill sync committee cache on miss")
|
||||
}
|
||||
}()
|
||||
|
||||
pos = make([][]primitives.CommitteeIndex, len(indices))
|
||||
for i, idx := range indices {
|
||||
pubkey := st.PubkeyAtIndex(idx)
|
||||
pos[i] = findSubCommitteeIndices(pubkey[:], committee.Pubkeys)
|
||||
}
|
||||
return pos, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pos, nil
|
||||
}
|
||||
|
||||
// IsCurrentPeriodSyncCommittee returns true if the input validator index belongs in the current period sync committee
|
||||
// along with the sync committee root.
|
||||
// 1. Checks if the public key exists in the sync committee cache
|
||||
|
||||
@@ -17,6 +17,38 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestCurrentPeriodPositions(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys[i] = bytesutil.PadTo(k, 48)
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee([32]byte{}, state))
|
||||
|
||||
positions, err := helpers.CurrentPeriodPositions(state, []primitives.ValidatorIndex{0, 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(positions))
|
||||
require.Equal(t, 1, len(positions[0]))
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), positions[0][0])
|
||||
require.Equal(t, 1, len(positions[1]))
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), positions[1][0])
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
b "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -374,15 +375,18 @@ func ProcessBlockForStateRoot(
|
||||
}
|
||||
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
|
||||
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
@@ -393,7 +397,7 @@ func altairOperations(
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
@@ -401,15 +405,18 @@ func altairOperations(
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
func phase0Operations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
|
||||
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
}
|
||||
@@ -420,5 +427,9 @@ func phase0Operations(
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
}
|
||||
return b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -13,34 +13,55 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ExitInfo provides information about validator exits in the state.
|
||||
type ExitInfo struct {
|
||||
HighestExitEpoch primitives.Epoch
|
||||
Churn uint64
|
||||
TotalActiveBalance uint64
|
||||
}
|
||||
|
||||
// ErrValidatorAlreadyExited is an error raised when trying to process an exit of
|
||||
// an already exited validator
|
||||
var ErrValidatorAlreadyExited = errors.New("validator already exited")
|
||||
|
||||
// MaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// epoch and the number of them
|
||||
func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
// ExitInformation returns information about validator exits.
|
||||
func ExitInformation(s state.BeaconState) *ExitInfo {
|
||||
exitInfo := &ExitInfo{}
|
||||
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
currentEpoch := slots.ToEpoch(s.Slot())
|
||||
totalActiveBalance := uint64(0)
|
||||
|
||||
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
e := val.ExitEpoch()
|
||||
if e != farFutureEpoch {
|
||||
if e > maxExitEpoch {
|
||||
maxExitEpoch = e
|
||||
churn = 1
|
||||
} else if e == maxExitEpoch {
|
||||
churn++
|
||||
if e > exitInfo.HighestExitEpoch {
|
||||
exitInfo.HighestExitEpoch = e
|
||||
exitInfo.Churn = 1
|
||||
} else if e == exitInfo.HighestExitEpoch {
|
||||
exitInfo.Churn++
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate total active balance in the same loop
|
||||
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
totalActiveBalance += val.EffectiveBalance()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
_ = err
|
||||
return
|
||||
|
||||
// Apply minimum balance as per spec
|
||||
exitInfo.TotalActiveBalance = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
|
||||
return exitInfo
|
||||
}
|
||||
|
||||
// InitiateValidatorExit takes in validator index and updates
|
||||
@@ -64,59 +85,117 @@ func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, c
|
||||
// # Set validator exit epoch and withdrawable epoch
|
||||
// validator.exit_epoch = exit_queue_epoch
|
||||
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex, exitQueueEpoch primitives.Epoch, churn uint64) (state.BeaconState, primitives.Epoch, error) {
|
||||
func InitiateValidatorExit(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
idx primitives.ValidatorIndex,
|
||||
exitInfo *ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
validator, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, validator.ExitEpoch, ErrValidatorAlreadyExited
|
||||
return s, ErrValidatorAlreadyExited
|
||||
}
|
||||
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
// Relevant spec code from phase0:
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
|
||||
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitableEpoch > exitQueueEpoch {
|
||||
exitQueueEpoch = exitableEpoch
|
||||
churn = 0
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// [Modified in Electra:EIP7251]
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
|
||||
var err error
|
||||
exitQueueEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
|
||||
exitInfo.HighestExitEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
validator.ExitEpoch = exitQueueEpoch
|
||||
validator.WithdrawableEpoch, err = exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
validator.ExitEpoch = exitInfo.HighestExitEpoch
|
||||
validator.WithdrawableEpoch, err = exitInfo.HighestExitEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
return s, exitQueueEpoch, nil
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InitiateValidatorExitForTotalBal has the same functionality as InitiateValidatorExit,
|
||||
// the only difference being how total active balance is obtained. In InitiateValidatorExit
|
||||
// it is calculated inside the function and in InitiateValidatorExitForTotalBal it's a
|
||||
// function argument.
|
||||
func InitiateValidatorExitForTotalBal(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
idx primitives.ValidatorIndex,
|
||||
exitInfo *ExitInfo,
|
||||
totalActiveBalance primitives.Gwei,
|
||||
) (state.BeaconState, error) {
|
||||
validator, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, ErrValidatorAlreadyExited
|
||||
}
|
||||
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// [Modified in Electra:EIP7251]
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
|
||||
var err error
|
||||
exitInfo.HighestExitEpoch, err = s.ExitEpochAndUpdateChurnForTotalBal(totalActiveBalance, primitives.Gwei(validator.EffectiveBalance))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
validator.ExitEpoch = exitInfo.HighestExitEpoch
|
||||
validator.WithdrawableEpoch, err = exitInfo.HighestExitEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, exitInfo *ExitInfo) error {
|
||||
// Relevant spec code from phase0:
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
|
||||
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitableEpoch > exitInfo.HighestExitEpoch {
|
||||
exitInfo.HighestExitEpoch = exitableEpoch
|
||||
exitInfo.Churn = 0
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if exitInfo.Churn >= currentChurn {
|
||||
exitInfo.HighestExitEpoch, err = exitInfo.HighestExitEpoch.SafeAdd(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exitInfo.Churn = 1
|
||||
} else {
|
||||
exitInfo.Churn = exitInfo.Churn + 1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SlashValidator slashes the malicious validator's balance and awards
|
||||
@@ -152,9 +231,12 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
func SlashValidator(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
slashedIdx primitives.ValidatorIndex) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
slashedIdx primitives.ValidatorIndex,
|
||||
exitInfo *ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
|
||||
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
}
|
||||
|
||||
@@ -49,9 +49,11 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, 0, 199, 1)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: 199, Churn: 1}
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, 0, exitInfo)
|
||||
require.ErrorIs(t, err, validators.ErrValidatorAlreadyExited)
|
||||
require.Equal(t, exitEpoch, epoch)
|
||||
assert.Equal(t, primitives.Epoch(199), exitInfo.HighestExitEpoch)
|
||||
assert.Equal(t, uint64(1), exitInfo.Churn)
|
||||
v, err := newState.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, exitEpoch, v.ExitEpoch, "Already exited")
|
||||
@@ -68,9 +70,11 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 1)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: exitedEpoch + 2, Churn: 1}
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+2, epoch)
|
||||
assert.Equal(t, exitedEpoch+2, exitInfo.HighestExitEpoch)
|
||||
assert.Equal(t, uint64(2), exitInfo.Churn)
|
||||
v, err := newState.ValidatorAtIndex(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, exitedEpoch+2, v.ExitEpoch, "Exit epoch was not the highest")
|
||||
@@ -88,9 +92,11 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 4)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: exitedEpoch + 2, Churn: 4}
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+3, epoch)
|
||||
assert.Equal(t, exitedEpoch+3, exitInfo.HighestExitEpoch)
|
||||
assert.Equal(t, uint64(1), exitInfo.Churn)
|
||||
|
||||
// Because of exit queue overflow,
|
||||
// validator who init exited has to wait one more epoch.
|
||||
@@ -110,7 +116,8 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
_, _, err = validators.InitiateValidatorExit(t.Context(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: params.BeaconConfig().FarFutureEpoch - 1, Churn: 1}
|
||||
_, err = validators.InitiateValidatorExit(t.Context(), state, 1, exitInfo)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
@@ -146,12 +153,11 @@ func TestInitiateValidatorExit_ProperExit_Electra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), ebtc)
|
||||
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, 0, 0) // exitQueueEpoch and churn are not used in electra
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, &validators.ExitInfo{}) // exit info is not used in electra
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expect that the exit epoch is the next available epoch with max seed lookahead.
|
||||
want := helpers.ActivationExitEpoch(exitedEpoch + 1)
|
||||
require.Equal(t, want, epoch)
|
||||
v, err := newState.ValidatorAtIndex(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, want, v.ExitEpoch, "Exit epoch was not the highest")
|
||||
@@ -190,7 +196,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
require.NoError(t, err, "Could not get proposer")
|
||||
proposerBal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
require.Equal(t, true, slashedState.Version() == version.Phase0)
|
||||
|
||||
@@ -244,7 +250,7 @@ func TestSlashValidator_Electra(t *testing.T) {
|
||||
require.NoError(t, err, "Could not get proposer")
|
||||
proposerBal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
require.Equal(t, true, slashedState.Version() == version.Electra)
|
||||
|
||||
@@ -505,8 +511,8 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
epoch, churn := validators.MaxExitEpochAndChurn(s)
|
||||
require.Equal(t, tt.wantedEpoch, epoch)
|
||||
require.Equal(t, tt.wantedChurn, churn)
|
||||
exitInfo := validators.ExitInformation(s)
|
||||
require.Equal(t, tt.wantedEpoch, exitInfo.HighestExitEpoch)
|
||||
require.Equal(t, tt.wantedChurn, exitInfo.Churn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,19 +116,43 @@ func (l *periodicEpochLayout) pruneBefore(before primitives.Epoch) (*pruneSummar
|
||||
}
|
||||
// Roll up summaries and clean up per-epoch directories.
|
||||
rollup := &pruneSummary{}
|
||||
|
||||
// Track which period directories might be empty after epoch removal
|
||||
periodsToCheck := make(map[string]struct{})
|
||||
|
||||
for epoch, sum := range sums {
|
||||
rollup.blobsPruned += sum.blobsPruned
|
||||
rollup.failedRemovals = append(rollup.failedRemovals, sum.failedRemovals...)
|
||||
rmdir := l.epochDir(epoch)
|
||||
periodDir := l.periodDir(epoch)
|
||||
|
||||
if len(sum.failedRemovals) == 0 {
|
||||
if err := l.fs.Remove(rmdir); err != nil {
|
||||
log.WithField("dir", rmdir).WithError(err).Error("Failed to remove epoch directory while pruning")
|
||||
} else {
|
||||
periodsToCheck[periodDir] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
log.WithField("dir", rmdir).WithField("numFailed", len(sum.failedRemovals)).WithError(err).Error("Unable to remove epoch directory due to pruning failures")
|
||||
}
|
||||
}
|
||||
|
||||
//Clean up empty period directories
|
||||
for periodDir := range periodsToCheck {
|
||||
entries, err := afero.ReadDir(l.fs, periodDir)
|
||||
if err != nil {
|
||||
log.WithField("dir", periodDir).WithError(err).Debug("Failed to read period directory contents")
|
||||
continue
|
||||
}
|
||||
|
||||
// Only attempt to remove if directory is empty
|
||||
if len(entries) == 0 {
|
||||
if err := l.fs.Remove(periodDir); err != nil {
|
||||
log.WithField("dir", periodDir).WithError(err).Error("Failed to remove empty period directory")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rollup, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -195,3 +196,48 @@ func TestLayoutPruneBefore(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLayoutByEpochPruneBefore(t *testing.T) {
|
||||
roots := testRoots(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
pruned []testIdent
|
||||
remain []testIdent
|
||||
err error
|
||||
sum pruneSummary
|
||||
}{
|
||||
{
|
||||
name: "single epoch period cleanup",
|
||||
pruned: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 367076, index: 0}},
|
||||
},
|
||||
remain: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[1], epoch: 371176, index: 0}}, // Different period
|
||||
},
|
||||
sum: pruneSummary{blobsPruned: 1},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t, WithLayout(LayoutNameByEpoch))
|
||||
pruned := testSetupBlobIdentPaths(t, fs, bs, c.pruned)
|
||||
remain := testSetupBlobIdentPaths(t, fs, bs, c.remain)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
for _, id := range pruned {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.Equal(t, true, os.IsNotExist(err))
|
||||
|
||||
dirs := bs.layout.blockParentDirs(id)
|
||||
for i := len(dirs) - 1; i > 0; i-- {
|
||||
_, err = fs.Stat(dirs[i])
|
||||
require.Equal(t, true, os.IsNotExist(err))
|
||||
}
|
||||
}
|
||||
for _, id := range remain {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ var (
|
||||
"lodestar",
|
||||
"js-libp2p",
|
||||
"rust-libp2p",
|
||||
"erigon/caplin",
|
||||
}
|
||||
p2pPeerCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_peer_count",
|
||||
|
||||
@@ -50,6 +50,7 @@ const (
|
||||
|
||||
// TestP2P represents a p2p implementation that can be used for testing.
|
||||
type TestP2P struct {
|
||||
mu sync.Mutex
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
@@ -243,6 +244,8 @@ func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler)
|
||||
|
||||
// JoinTopic will join PubSub topic, if not already joined.
|
||||
func (p *TestP2P) JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if _, ok := p.joinedTopics[topic]; !ok {
|
||||
joinedTopic, err := p.pubsub.Join(topic, opts...)
|
||||
if err != nil {
|
||||
|
||||
@@ -1230,6 +1230,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
// Warning: no longer supported post Fulu fork
|
||||
template: "/prysm/v1/beacon/blobs",
|
||||
name: namespace + ".PublishBlobs",
|
||||
middleware: []middleware.Middleware{
|
||||
|
||||
@@ -334,26 +334,26 @@ func (s *Server) GetBlockAttestationsV2(w http.ResponseWriter, r *http.Request)
|
||||
consensusAtts := blk.Block().Body().Attestations()
|
||||
|
||||
v := blk.Block().Version()
|
||||
var attStructs []interface{}
|
||||
attStructs := make([]interface{}, len(consensusAtts))
|
||||
if v >= version.Electra {
|
||||
for _, att := range consensusAtts {
|
||||
for index, att := range consensusAtts {
|
||||
a, ok := att.(*eth.AttestationElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("unable to convert consensus attestations electra of type %T", att), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct := structs.AttElectraFromConsensus(a)
|
||||
attStructs = append(attStructs, attStruct)
|
||||
attStructs[index] = attStruct
|
||||
}
|
||||
} else {
|
||||
for _, att := range consensusAtts {
|
||||
for index, att := range consensusAtts {
|
||||
a, ok := att.(*eth.Attestation)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("unable to convert consensus attestation of type %T", att), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct := structs.AttFromConsensus(a)
|
||||
attStructs = append(attStructs, attStruct)
|
||||
attStructs[index] = attStruct
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,19 +17,19 @@ func TestBlocks_NewSignedBeaconBlock_EquivocationFix(t *testing.T) {
|
||||
var block structs.SignedBeaconBlock
|
||||
err := json.Unmarshal([]byte(rpctesting.Phase0Block), &block)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
// Convert to generic format
|
||||
genericBlock, err := block.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
// Test the FIX: pass genericBlock.Block instead of genericBlock
|
||||
// This is what our fix changed in handlers.go line 704 and 858
|
||||
_, err = blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
require.NoError(t, err, "NewSignedBeaconBlock should work with genericBlock.Block")
|
||||
|
||||
|
||||
// Test the BROKEN version: pass genericBlock directly (this should fail)
|
||||
_, err = blocks.NewSignedBeaconBlock(genericBlock)
|
||||
if err == nil {
|
||||
t.Errorf("NewSignedBeaconBlock should fail with whole genericBlock but succeeded")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -910,6 +910,100 @@ func TestGetBlockAttestations(t *testing.T) {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("empty-attestations", func(t *testing.T) {
|
||||
t.Run("v1", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Body.Attestations = []*eth.Attestation{} // Explicitly set empty attestations
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &chainMock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v1/beacon/blocks/{block_id}/attestations", nil)
|
||||
request.SetPathValue("block_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBlockAttestations(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlockAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
|
||||
// Ensure data is empty array, not null
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 0, len(resp.Data))
|
||||
})
|
||||
|
||||
t.Run("v2-pre-electra", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Body.Attestations = []*eth.Attestation{} // Explicitly set empty attestations
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &chainMock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v2/beacon/blocks/{block_id}/attestations", nil)
|
||||
request.SetPathValue("block_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBlockAttestationsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlockAttestationsV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
// Ensure data is "[]", not null
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, string(json.RawMessage("[]")), string(resp.Data))
|
||||
})
|
||||
|
||||
t.Run("v2-electra", func(t *testing.T) {
|
||||
eb := util.NewBeaconBlockFulu()
|
||||
eb.Block.Body.Attestations = []*eth.AttestationElectra{} // Explicitly set empty attestations
|
||||
esb, err := blocks.NewSignedBeaconBlock(eb)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &chainMock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: esb},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v2/beacon/blocks/{block_id}/attestations", nil)
|
||||
request.SetPathValue("block_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBlockAttestationsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlockAttestationsV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
|
||||
// Ensure data is "[]", not null
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, string(json.RawMessage("[]")), string(resp.Data))
|
||||
assert.Equal(t, "fulu", resp.Version)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetBlindedBlock(t *testing.T) {
|
||||
|
||||
@@ -68,7 +68,8 @@ func (rs *BlockRewardService) GetBlockRewardsData(ctx context.Context, blk inter
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
st, err = coreblocks.ProcessAttesterSlashings(ctx, st, blk.Body().AttesterSlashings(), validators.SlashValidator)
|
||||
exitInfo := validators.ExitInformation(st)
|
||||
st, err = coreblocks.ProcessAttesterSlashings(ctx, st, blk.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, &httputil.DefaultJsonError{
|
||||
Message: "Could not get attester slashing rewards: " + err.Error(),
|
||||
@@ -82,7 +83,7 @@ func (rs *BlockRewardService) GetBlockRewardsData(ctx context.Context, blk inter
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
st, err = coreblocks.ProcessProposerSlashings(ctx, st, blk.Body().ProposerSlashings(), validators.SlashValidator)
|
||||
st, err = coreblocks.ProcessProposerSlashings(ctx, st, blk.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, &httputil.DefaultJsonError{
|
||||
Message: "Could not get proposer slashing rewards: " + err.Error(),
|
||||
|
||||
@@ -186,6 +186,7 @@ func (s *Server) GetChainHead(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// Warning: no longer supported post Fulu blobs
|
||||
func (s *Server) PublishBlobs(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.PublishBlobs")
|
||||
defer span.End()
|
||||
@@ -215,6 +216,15 @@ func (s *Server) PublishBlobs(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not decode blob sidecar: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
scEpoch := slots.ToEpoch(sc.SignedBlockHeader.Header.Slot)
|
||||
if scEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
httputil.HandleError(w, "Blob sidecars not supported for pre deneb", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if scEpoch > params.BeaconConfig().FuluForkEpoch {
|
||||
httputil.HandleError(w, "Blob sidecars not supported for post fulu blobs", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sc, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
|
||||
@@ -1017,6 +1017,11 @@ func TestPublishBlobs_BadBlockRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPublishBlobs(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 0 // Set Deneb fork to epoch 0 so slot 0 is valid
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server := &Server{
|
||||
BlobReceiver: &chainMock.ChainService{},
|
||||
Broadcaster: &mockp2p.MockBroadcaster{},
|
||||
@@ -1032,3 +1037,94 @@ func TestPublishBlobs(t *testing.T) {
|
||||
assert.Equal(t, len(server.BlobReceiver.(*chainMock.ChainService).Blobs), 1)
|
||||
assert.Equal(t, server.Broadcaster.(*mockp2p.MockBroadcaster).BroadcastCalled.Load(), true)
|
||||
}
|
||||
|
||||
func TestPublishBlobs_PreDeneb(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 10 // Set Deneb fork to epoch 10, so slot 0 is pre-Deneb
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server := &Server{
|
||||
BlobReceiver: &chainMock.ChainService{},
|
||||
Broadcaster: &mockp2p.MockBroadcaster{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.PublishBlobsRequest)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlobs(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, "Blob sidecars not supported for pre deneb", writer.Body.String())
|
||||
|
||||
assert.Equal(t, len(server.BlobReceiver.(*chainMock.ChainService).Blobs), 0)
|
||||
assert.Equal(t, server.Broadcaster.(*mockp2p.MockBroadcaster).BroadcastCalled.Load(), false)
|
||||
}
|
||||
|
||||
func TestPublishBlobs_PostFulu(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 0
|
||||
cfg.FuluForkEpoch = 0 // Set Fulu fork to epoch 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
server := &Server{
|
||||
BlobReceiver: &chainMock.ChainService{},
|
||||
Broadcaster: &mockp2p.MockBroadcaster{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
// Create a custom request with a slot in epoch 1 (which is > FuluForkEpoch of 0)
|
||||
postFuluRequest := fmt.Sprintf(`{
|
||||
"block_root" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"blob_sidecars" : {
|
||||
"sidecars" : [
|
||||
{
|
||||
"blob" : "%s",
|
||||
"index" : "0",
|
||||
"kzg_commitment" : "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"kzg_commitment_inclusion_proof" : [
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
],
|
||||
"kzg_proof" : "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"signed_block_header" : {
|
||||
"message" : {
|
||||
"body_root" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parent_root" : "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"proposer_index" : "0",
|
||||
"slot" : "33",
|
||||
"state_root" : "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"signature" : "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}`, rpctesting.Blob)
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(postFuluRequest)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlobs(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, "Blob sidecars not supported for post fulu blobs", writer.Body.String())
|
||||
|
||||
assert.Equal(t, len(server.BlobReceiver.(*chainMock.ChainService).Blobs), 0)
|
||||
assert.Equal(t, server.Broadcaster.(*mockp2p.MockBroadcaster).BroadcastCalled.Load(), false)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestServer_GetBeaconConfig(t *testing.T) {
|
||||
conf := params.BeaconConfig()
|
||||
confType := reflect.TypeOf(conf).Elem()
|
||||
numFields := confType.NumField()
|
||||
|
||||
|
||||
// Count only exported fields, as unexported fields are not included in the config
|
||||
exportedFields := 0
|
||||
for i := 0; i < numFields; i++ {
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
func (vs *Server) setSyncAggregate(ctx context.Context, blk interfaces.SignedBeaconBlock) {
|
||||
@@ -51,21 +54,28 @@ func (vs *Server) getSyncAggregate(ctx context.Context, slot primitives.Slot, ro
|
||||
if vs.SyncCommitteePool == nil {
|
||||
return nil, errors.New("sync committee pool is nil")
|
||||
}
|
||||
// Contributions have to match the input root
|
||||
contributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
|
||||
|
||||
poolContributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerContributions := proposerSyncContributions(contributions).filterByBlockRoot(root)
|
||||
// Contributions have to match the input root
|
||||
proposerContributions := proposerSyncContributions(poolContributions).filterByBlockRoot(root)
|
||||
|
||||
// Each sync subcommittee is 128 bits and the sync committee is 512 bits for mainnet.
|
||||
aggregatedContributions, err := vs.aggregatedSyncCommitteeMessages(ctx, slot, root, poolContributions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get aggregated sync committee messages")
|
||||
}
|
||||
proposerContributions = append(proposerContributions, aggregatedContributions...)
|
||||
|
||||
subcommitteeCount := params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
var bitsHolder [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
for i := uint64(0); i < subcommitteeCount; i++ {
|
||||
bitsHolder = append(bitsHolder, ethpb.NewSyncCommitteeAggregationBits())
|
||||
}
|
||||
sigsHolder := make([]bls.Signature, 0, params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
sigsHolder := make([]bls.Signature, 0, params.BeaconConfig().SyncCommitteeSize/subcommitteeCount)
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
for i := uint64(0); i < subcommitteeCount; i++ {
|
||||
cs := proposerContributions.filterBySubIndex(i)
|
||||
aggregates, err := synccontribution.Aggregate(cs)
|
||||
if err != nil {
|
||||
@@ -107,3 +117,107 @@ func (vs *Server) getSyncAggregate(ctx context.Context, slot primitives.Slot, ro
|
||||
SyncCommitteeSignature: syncSigBytes[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vs *Server) aggregatedSyncCommitteeMessages(
|
||||
ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
root [32]byte,
|
||||
poolContributions []*ethpb.SyncCommitteeContribution,
|
||||
) ([]*ethpb.SyncCommitteeContribution, error) {
|
||||
subcommitteeCount := params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
subcommitteeSize := params.BeaconConfig().SyncCommitteeSize / subcommitteeCount
|
||||
sigsPerSubcommittee := make([][][]byte, subcommitteeCount)
|
||||
bitsPerSubcommittee := make([]bitfield.Bitfield, subcommitteeCount)
|
||||
for i := uint64(0); i < subcommitteeCount; i++ {
|
||||
sigsPerSubcommittee[i] = make([][]byte, 0, subcommitteeSize)
|
||||
bitsPerSubcommittee[i] = ethpb.NewSyncCommitteeAggregationBits()
|
||||
}
|
||||
|
||||
// Get committee position(s) for each message's validator index.
|
||||
scMessages, err := vs.SyncCommitteePool.SyncCommitteeMessages(slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get sync committee messages")
|
||||
}
|
||||
messageIndices := make([]primitives.ValidatorIndex, 0, len(scMessages))
|
||||
messageSigs := make([][]byte, 0, len(scMessages))
|
||||
for _, msg := range scMessages {
|
||||
if bytes.Equal(root[:], msg.BlockRoot) {
|
||||
messageIndices = append(messageIndices, msg.ValidatorIndex)
|
||||
messageSigs = append(messageSigs, msg.Signature)
|
||||
}
|
||||
}
|
||||
st, err := vs.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
positions, err := helpers.CurrentPeriodPositions(st, messageIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get sync committee positions")
|
||||
}
|
||||
|
||||
// Based on committee position(s), set the appropriate subcommittee bit and signature.
|
||||
for i, ci := range positions {
|
||||
for _, index := range ci {
|
||||
k := uint64(index)
|
||||
subnetIndex := k / subcommitteeSize
|
||||
indexMod := k % subcommitteeSize
|
||||
|
||||
// Existing aggregated contributions from the pool intersecting with aggregates
|
||||
// created from single sync committee messages can result in bit intersections
|
||||
// that fail to produce the best possible final aggregate. Ignoring bits that are
|
||||
// already set in pool contributions makes intersections impossible.
|
||||
intersects := false
|
||||
for _, poolContrib := range poolContributions {
|
||||
if poolContrib.SubcommitteeIndex == subnetIndex && poolContrib.AggregationBits.BitAt(indexMod) {
|
||||
intersects = true
|
||||
}
|
||||
}
|
||||
if !intersects && !bitsPerSubcommittee[subnetIndex].BitAt(indexMod) {
|
||||
bitsPerSubcommittee[subnetIndex].SetBitAt(indexMod, true)
|
||||
sigsPerSubcommittee[subnetIndex] = append(sigsPerSubcommittee[subnetIndex], messageSigs[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate.
|
||||
result := make([]*ethpb.SyncCommitteeContribution, 0, subcommitteeCount)
|
||||
for i := uint64(0); i < subcommitteeCount; i++ {
|
||||
aggregatedSig := make([]byte, 96)
|
||||
aggregatedSig[0] = 0xC0
|
||||
if len(sigsPerSubcommittee[i]) != 0 {
|
||||
contrib, err := aggregateSyncSubcommitteeMessages(slot, root, i, bitsPerSubcommittee[i], sigsPerSubcommittee[i])
|
||||
if err != nil {
|
||||
// Skip aggregating this subcommittee
|
||||
log.WithError(err).Errorf("Could not aggregate sync messages for subcommittee %d", i)
|
||||
continue
|
||||
}
|
||||
result = append(result, contrib)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func aggregateSyncSubcommitteeMessages(
|
||||
slot primitives.Slot,
|
||||
root [32]byte,
|
||||
subcommitteeIndex uint64,
|
||||
bits bitfield.Bitfield,
|
||||
sigs [][]byte,
|
||||
) (*ethpb.SyncCommitteeContribution, error) {
|
||||
var err error
|
||||
uncompressedSigs := make([]bls.Signature, len(sigs))
|
||||
for i, sig := range sigs {
|
||||
uncompressedSigs[i], err = bls.SignatureFromBytesNoValidation(sig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create signature from bytes")
|
||||
}
|
||||
}
|
||||
return ðpb.SyncCommitteeContribution{
|
||||
Slot: slot,
|
||||
BlockRoot: root[:],
|
||||
SubcommitteeIndex: subcommitteeIndex,
|
||||
AggregationBits: bits.Bytes(),
|
||||
Signature: bls.AggregateSignatures(uncompressedSigs).Marshal(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -3,13 +3,67 @@ package validator
|
||||
import (
|
||||
"testing"
|
||||
|
||||
chainmock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/synccommittee"
|
||||
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
func TestProposer_GetSyncAggregate_OK(t *testing.T) {
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &chainmock.ChainService{State: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
r := params.BeaconConfig().ZeroHash
|
||||
conts := []*ethpb.SyncCommitteeContribution{
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
}
|
||||
|
||||
for _, cont := range conts {
|
||||
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeContribution(cont))
|
||||
}
|
||||
|
||||
aggregate, err := proposerServer.getSyncAggregate(t.Context(), 1, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.Bitvector32{0xf, 0xf, 0xf, 0xf}, aggregate.SyncCommitteeBits)
|
||||
|
||||
aggregate, err = proposerServer.getSyncAggregate(t.Context(), 2, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.Bitvector32{0xaa, 0xaa, 0xaa, 0xaa}, aggregate.SyncCommitteeBits)
|
||||
|
||||
aggregate, err = proposerServer.getSyncAggregate(t.Context(), 3, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.NewBitvector32(), aggregate.SyncCommitteeBits)
|
||||
}
|
||||
|
||||
func TestServer_SetSyncAggregate_EmptyCase(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockAltair())
|
||||
require.NoError(t, err)
|
||||
@@ -25,3 +79,123 @@ func TestServer_SetSyncAggregate_EmptyCase(t *testing.T) {
|
||||
}
|
||||
require.DeepEqual(t, want, agg)
|
||||
}
|
||||
|
||||
func TestProposer_GetSyncAggregate_IncludesSyncCommitteeMessages(t *testing.T) {
|
||||
// TEST SETUP
|
||||
// - validator 0 is selected twice in subcommittee 0 (indexes [0,1])
|
||||
// - validator 1 is selected once in subcommittee 0 (index 2)
|
||||
// - validator 2 is selected twice in subcommittee 1 (indexes [0,1])
|
||||
// - validator 3 is selected once in subcommittee 1 (index 2)
|
||||
// - sync committee aggregates in the pool have index 3 set for both subcommittees
|
||||
|
||||
subcommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
helpers.ClearCache()
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
vals := make([]*ethpb.Validator, 4)
|
||||
vals[0] = ðpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf0}, 48)}
|
||||
vals[1] = ðpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf1}, 48)}
|
||||
vals[2] = ðpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf2}, 48)}
|
||||
vals[3] = ðpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf3}, 48)}
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
sc := ðpb.SyncCommittee{
|
||||
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
}
|
||||
sc.Pubkeys[0] = vals[0].PublicKey
|
||||
sc.Pubkeys[1] = vals[0].PublicKey
|
||||
sc.Pubkeys[2] = vals[1].PublicKey
|
||||
sc.Pubkeys[subcommitteeSize] = vals[2].PublicKey
|
||||
sc.Pubkeys[subcommitteeSize+1] = vals[2].PublicKey
|
||||
sc.Pubkeys[subcommitteeSize+2] = vals[3].PublicKey
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(sc))
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &chainmock.ChainService{State: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
r := params.BeaconConfig().ZeroHash
|
||||
msgs := []*ethpb.SyncCommitteeMessage{
|
||||
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 0, Signature: bls.NewAggregateSignature().Marshal()},
|
||||
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 1, Signature: bls.NewAggregateSignature().Marshal()},
|
||||
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 2, Signature: bls.NewAggregateSignature().Marshal()},
|
||||
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 3, Signature: bls.NewAggregateSignature().Marshal()},
|
||||
}
|
||||
for _, msg := range msgs {
|
||||
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeMessage(msg))
|
||||
}
|
||||
subcommittee0AggBits := ethpb.NewSyncCommitteeAggregationBits()
|
||||
subcommittee0AggBits.SetBitAt(3, true)
|
||||
subcommittee1AggBits := ethpb.NewSyncCommitteeAggregationBits()
|
||||
subcommittee1AggBits.SetBitAt(3, true)
|
||||
conts := []*ethpb.SyncCommitteeContribution{
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: subcommittee0AggBits, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: subcommittee1AggBits, BlockRoot: r[:]},
|
||||
}
|
||||
for _, cont := range conts {
|
||||
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeContribution(cont))
|
||||
}
|
||||
|
||||
// The final sync aggregates must have indexes [0,1,2,3] set for both subcommittees
|
||||
sa, err := proposerServer.getSyncAggregate(t.Context(), 1, r)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(0))
|
||||
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(1))
|
||||
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(2))
|
||||
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(3))
|
||||
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(subcommitteeSize))
|
||||
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(subcommitteeSize+1))
|
||||
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(subcommitteeSize+2))
|
||||
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(subcommitteeSize+3))
|
||||
}
|
||||
|
||||
func Test_aggregatedSyncCommitteeMessages_NoIntersectionWithPoolContributions(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
vals := make([]*ethpb.Validator, 4)
|
||||
vals[0] = ðpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf0}, 48)}
|
||||
vals[1] = ðpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf1}, 48)}
|
||||
vals[2] = ðpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf2}, 48)}
|
||||
vals[3] = ðpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf3}, 48)}
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
sc := ðpb.SyncCommittee{
|
||||
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
}
|
||||
sc.Pubkeys[0] = vals[0].PublicKey
|
||||
sc.Pubkeys[1] = vals[1].PublicKey
|
||||
sc.Pubkeys[2] = vals[2].PublicKey
|
||||
sc.Pubkeys[3] = vals[3].PublicKey
|
||||
require.NoError(t, st.SetCurrentSyncCommittee(sc))
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &chainmock.ChainService{State: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
r := params.BeaconConfig().ZeroHash
|
||||
msgs := []*ethpb.SyncCommitteeMessage{
|
||||
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 0, Signature: bls.NewAggregateSignature().Marshal()},
|
||||
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 1, Signature: bls.NewAggregateSignature().Marshal()},
|
||||
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 2, Signature: bls.NewAggregateSignature().Marshal()},
|
||||
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 3, Signature: bls.NewAggregateSignature().Marshal()},
|
||||
}
|
||||
for _, msg := range msgs {
|
||||
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeMessage(msg))
|
||||
}
|
||||
subcommitteeAggBits := ethpb.NewSyncCommitteeAggregationBits()
|
||||
subcommitteeAggBits.SetBitAt(3, true)
|
||||
cont := ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 0,
|
||||
Signature: bls.NewAggregateSignature().Marshal(),
|
||||
AggregationBits: subcommitteeAggBits,
|
||||
BlockRoot: r[:],
|
||||
}
|
||||
|
||||
aggregated, err := proposerServer.aggregatedSyncCommitteeMessages(t.Context(), 1, r, []*ethpb.SyncCommitteeContribution{cont})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(aggregated))
|
||||
assert.Equal(t, false, aggregated[0].AggregationBits.BitAt(3))
|
||||
}
|
||||
|
||||
@@ -4,16 +4,23 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*ethpb.ProposerSlashing, []ethpb.AttSlashing) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(head)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(head, exitInfo.TotalActiveBalance); err != nil {
|
||||
log.WithError(err).Warn("Could not update total active balance cache")
|
||||
}
|
||||
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
|
||||
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
|
||||
for _, slashing := range proposerSlashings {
|
||||
_, err := blocks.ProcessProposerSlashing(ctx, head, slashing, v.SlashValidator)
|
||||
_, err = blocks.ProcessProposerSlashing(ctx, head, slashing, exitInfo)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not validate proposer slashing for block inclusion")
|
||||
continue
|
||||
@@ -23,7 +30,7 @@ func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*
|
||||
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
|
||||
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
|
||||
for _, slashing := range attSlashings {
|
||||
_, err := blocks.ProcessAttesterSlashing(ctx, head, slashing, v.SlashValidator)
|
||||
_, err = blocks.ProcessAttesterSlashing(ctx, head, slashing, exitInfo)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not validate attester slashing for block inclusion")
|
||||
continue
|
||||
|
||||
@@ -3096,49 +3096,6 @@ func TestProposer_DeleteAttsInPool_Aggregated(t *testing.T) {
|
||||
assert.Equal(t, 0, len(atts), "Did not delete unaggregated attestation")
|
||||
}
|
||||
|
||||
func TestProposer_GetSyncAggregate_OK(t *testing.T) {
|
||||
proposerServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
r := params.BeaconConfig().ZeroHash
|
||||
conts := []*ethpb.SyncCommitteeContribution{
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
}
|
||||
|
||||
for _, cont := range conts {
|
||||
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeContribution(cont))
|
||||
}
|
||||
|
||||
aggregate, err := proposerServer.getSyncAggregate(t.Context(), 1, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.Bitvector32{0xf, 0xf, 0xf, 0xf}, aggregate.SyncCommitteeBits)
|
||||
|
||||
aggregate, err = proposerServer.getSyncAggregate(t.Context(), 2, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.Bitvector32{0xaa, 0xaa, 0xaa, 0xaa}, aggregate.SyncCommitteeBits)
|
||||
|
||||
aggregate, err = proposerServer.getSyncAggregate(t.Context(), 3, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.NewBitvector32(), aggregate.SyncCommitteeBits)
|
||||
}
|
||||
|
||||
func TestProposer_PrepareBeaconProposer(t *testing.T) {
|
||||
type args struct {
|
||||
request *ethpb.PrepareBeaconProposerRequest
|
||||
|
||||
@@ -67,6 +67,13 @@ func WithNower(n Nower) ClockOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeAsNow will create a Nower based on the given time.Time and set it as the Now() implementation.
|
||||
func WithTimeAsNow(t time.Time) ClockOpt {
|
||||
return func(g *Clock) {
|
||||
g.now = func() time.Time { return t }
|
||||
}
|
||||
}
|
||||
|
||||
// NewClock constructs a Clock value from a genesis timestamp (t) and a Genesis Validator Root (vr).
|
||||
// The WithNower ClockOpt can be used in tests to specify an alternate `time.Now` implementation,
|
||||
// for instance to return a value for `Now` spanning a certain number of slots from genesis time, to control the current slot.
|
||||
|
||||
@@ -265,6 +265,7 @@ type WriteOnlyEth1Data interface {
|
||||
AppendEth1DataVotes(val *ethpb.Eth1Data) error
|
||||
SetEth1DepositIndex(val uint64) error
|
||||
ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (primitives.Epoch, error)
|
||||
ExitEpochAndUpdateChurnForTotalBal(totalActiveBalance primitives.Gwei, exitBalance primitives.Gwei) (primitives.Epoch, error)
|
||||
}
|
||||
|
||||
// WriteOnlyValidators defines a struct which only has write access to validators methods.
|
||||
|
||||
@@ -44,11 +44,27 @@ func (b *BeaconState) ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (prim
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return b.exitEpochAndUpdateChurn(primitives.Gwei(activeBal), exitBalance)
|
||||
}
|
||||
|
||||
// ExitEpochAndUpdateChurnForTotalBal has the same functionality as ExitEpochAndUpdateChurn,
|
||||
// the only difference being how total active balance is obtained. In ExitEpochAndUpdateChurn
|
||||
// it is calculated inside the function and in ExitEpochAndUpdateChurnForTotalBal it's a
|
||||
// function argument.
|
||||
func (b *BeaconState) ExitEpochAndUpdateChurnForTotalBal(totalActiveBalance primitives.Gwei, exitBalance primitives.Gwei) (primitives.Epoch, error) {
|
||||
if b.version < version.Electra {
|
||||
return 0, errNotSupported("ExitEpochAndUpdateChurnForTotalBal", b.version)
|
||||
}
|
||||
|
||||
return b.exitEpochAndUpdateChurn(totalActiveBalance, exitBalance)
|
||||
}
|
||||
|
||||
func (b *BeaconState) exitEpochAndUpdateChurn(totalActiveBalance primitives.Gwei, exitBalance primitives.Gwei) (primitives.Epoch, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
earliestExitEpoch := max(b.earliestExitEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(b.slot)))
|
||||
perEpochChurn := helpers.ActivationExitChurnLimit(primitives.Gwei(activeBal)) // Guaranteed to be non-zero.
|
||||
perEpochChurn := helpers.ActivationExitChurnLimit(totalActiveBalance) // Guaranteed to be non-zero.
|
||||
|
||||
// New epoch for exits
|
||||
var exitBalanceToConsume primitives.Gwei
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"batch_verifier.go",
|
||||
"block_batcher.go",
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"custody.go",
|
||||
"data_column_sidecars.go",
|
||||
@@ -165,7 +164,6 @@ go_test(
|
||||
"batch_verifier_test.go",
|
||||
"blobs_test.go",
|
||||
"block_batcher_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"custody_test.go",
|
||||
"data_column_sidecars_test.go",
|
||||
@@ -194,6 +192,7 @@ go_test(
|
||||
"slot_aware_cache_test.go",
|
||||
"subscriber_beacon_aggregate_proof_test.go",
|
||||
"subscriber_beacon_blocks_test.go",
|
||||
"subscriber_data_column_sidecar_test.go",
|
||||
"subscriber_test.go",
|
||||
"subscription_topic_handler_test.go",
|
||||
"sync_fuzz_test.go",
|
||||
@@ -266,6 +265,7 @@ go_test(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/equality:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
const broadcastBLSChangesRateLimit = 128
|
||||
|
||||
// This routine broadcasts known BLS changes at the Capella fork.
|
||||
func (s *Service) broadcastBLSChanges(currSlot types.Slot) {
|
||||
capellaSlotStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
if err != nil {
|
||||
// only possible error is an overflow, so we exit early from the method
|
||||
return
|
||||
}
|
||||
if currSlot != capellaSlotStart {
|
||||
return
|
||||
}
|
||||
changes, err := s.cfg.blsToExecPool.PendingBLSToExecChanges()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get BLS to execution changes")
|
||||
}
|
||||
if len(changes) == 0 {
|
||||
return
|
||||
}
|
||||
source := rand.NewGenerator()
|
||||
length := len(changes)
|
||||
broadcastChanges := make([]*ethpb.SignedBLSToExecutionChange, length)
|
||||
for i := 0; i < length; i++ {
|
||||
idx := source.Intn(len(changes))
|
||||
broadcastChanges[i] = changes[idx]
|
||||
changes = append(changes[:idx], changes[idx+1:]...)
|
||||
}
|
||||
|
||||
go s.rateBLSChanges(s.ctx, broadcastChanges)
|
||||
}
|
||||
|
||||
func (s *Service) broadcastBLSBatch(ctx context.Context, ptr *[]*ethpb.SignedBLSToExecutionChange) {
|
||||
limit := broadcastBLSChangesRateLimit
|
||||
if len(*ptr) < broadcastBLSChangesRateLimit {
|
||||
limit = len(*ptr)
|
||||
}
|
||||
st, err := s.cfg.chain.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head state")
|
||||
return
|
||||
}
|
||||
for _, ch := range (*ptr)[:limit] {
|
||||
if ch != nil {
|
||||
_, err := blocks.ValidateBLSToExecutionChange(st, ch)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not validate BLS to execution change")
|
||||
continue
|
||||
}
|
||||
if err := s.cfg.p2p.Broadcast(ctx, ch); err != nil {
|
||||
log.WithError(err).Error("could not broadcast BLS to execution changes.")
|
||||
}
|
||||
}
|
||||
}
|
||||
*ptr = (*ptr)[limit:]
|
||||
}
|
||||
|
||||
func (s *Service) rateBLSChanges(ctx context.Context, changes []*ethpb.SignedBLSToExecutionChange) {
|
||||
s.broadcastBLSBatch(ctx, &changes)
|
||||
if len(changes) == 0 {
|
||||
return
|
||||
}
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.broadcastBLSBatch(ctx, &changes)
|
||||
if len(changes) == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
testingdb "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/blstoexec"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestBroadcastBLSChanges(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig()
|
||||
c.CapellaForkEpoch = c.BellatrixForkEpoch.Add(2)
|
||||
params.OverrideBeaconConfig(c)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
s := NewService(t.Context(),
|
||||
WithP2P(mockp2p.NewTestP2P(t)),
|
||||
WithInitialSync(&mockSync.Sync{IsSyncing: false}),
|
||||
WithChainService(chainService),
|
||||
WithOperationNotifier(chainService.OperationNotifier()),
|
||||
WithBlsToExecPool(blstoexec.NewPool()),
|
||||
)
|
||||
var emptySig [96]byte
|
||||
s.cfg.blsToExecPool.InsertBLSToExecChange(ðpb.SignedBLSToExecutionChange{
|
||||
Message: ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: 10,
|
||||
FromBlsPubkey: make([]byte, 48),
|
||||
ToExecutionAddress: make([]byte, 20),
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
})
|
||||
|
||||
capellaStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.broadcastBLSChanges(capellaStart + 1)
|
||||
}
|
||||
|
||||
func TestRateBLSChanges(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig()
|
||||
c.CapellaForkEpoch = c.BellatrixForkEpoch.Add(2)
|
||||
params.OverrideBeaconConfig(c)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
p1 := mockp2p.NewTestP2P(t)
|
||||
s := NewService(t.Context(),
|
||||
WithP2P(p1),
|
||||
WithInitialSync(&mockSync.Sync{IsSyncing: false}),
|
||||
WithChainService(chainService),
|
||||
WithOperationNotifier(chainService.OperationNotifier()),
|
||||
WithBlsToExecPool(blstoexec.NewPool()),
|
||||
)
|
||||
beaconDB := testingdb.SetupDB(t)
|
||||
s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New())
|
||||
s.cfg.beaconDB = beaconDB
|
||||
s.initCaches()
|
||||
st, keys := util.DeterministicGenesisStateCapella(t, 256)
|
||||
s.cfg.chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)),
|
||||
State: st,
|
||||
}
|
||||
|
||||
for i := 0; i < 200; i++ {
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: primitives.ValidatorIndex(i),
|
||||
FromBlsPubkey: keys[i+1].PublicKey().Marshal(),
|
||||
ToExecutionAddress: bytesutil.PadTo([]byte("address"), 20),
|
||||
}
|
||||
epoch := params.BeaconConfig().CapellaForkEpoch + 1
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBLSToExecutionChange, st.GenesisValidatorsRoot())
|
||||
assert.NoError(t, err)
|
||||
htr, err := signing.Data(message.HashTreeRoot, domain)
|
||||
assert.NoError(t, err)
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: keys[i+1].Sign(htr[:]).Marshal(),
|
||||
}
|
||||
|
||||
s.cfg.blsToExecPool.InsertBLSToExecChange(signed)
|
||||
}
|
||||
|
||||
require.Equal(t, false, p1.BroadcastCalled.Load())
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.broadcastBLSChanges(slot)
|
||||
time.Sleep(100 * time.Millisecond) // Need a sleep for the go routine to be ready
|
||||
require.Equal(t, true, p1.BroadcastCalled.Load())
|
||||
require.LogsDoNotContain(t, logHook, "could not")
|
||||
|
||||
p1.BroadcastCalled.Store(false)
|
||||
time.Sleep(500 * time.Millisecond) // Need a sleep for the second batch to be broadcast
|
||||
require.Equal(t, true, p1.BroadcastCalled.Load())
|
||||
require.LogsDoNotContain(t, logHook, "could not")
|
||||
}
|
||||
|
||||
func TestBroadcastBLSBatch_changes_slice(t *testing.T) {
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
FromBlsPubkey: make([]byte, 48),
|
||||
ToExecutionAddress: make([]byte, 20),
|
||||
}
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
changes := make([]*ethpb.SignedBLSToExecutionChange, 200)
|
||||
for i := 0; i < len(changes); i++ {
|
||||
changes[i] = signed
|
||||
}
|
||||
p1 := mockp2p.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
s := NewService(t.Context(),
|
||||
WithP2P(p1),
|
||||
WithInitialSync(&mockSync.Sync{IsSyncing: false}),
|
||||
WithChainService(chainService),
|
||||
WithOperationNotifier(chainService.OperationNotifier()),
|
||||
WithBlsToExecPool(blstoexec.NewPool()),
|
||||
)
|
||||
beaconDB := testingdb.SetupDB(t)
|
||||
s.cfg.stateGen = stategen.New(beaconDB, doublylinkedtree.New())
|
||||
s.cfg.beaconDB = beaconDB
|
||||
s.initCaches()
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 32)
|
||||
s.cfg.chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)),
|
||||
State: st,
|
||||
}
|
||||
|
||||
s.broadcastBLSBatch(s.ctx, &changes)
|
||||
require.Equal(t, 200-128, len(changes))
|
||||
}
|
||||
@@ -106,6 +106,9 @@ func (s *Service) custodyGroupCount() (uint64, error) {
|
||||
// validatorsCustodyRequirements computes the custody requirements based on the
|
||||
// finalized state and the tracked validators.
|
||||
func (s *Service) validatorsCustodyRequirement() (uint64, error) {
|
||||
if s.trackedValidatorsCache == nil {
|
||||
return 0, nil
|
||||
}
|
||||
// Get the indices of the tracked validators.
|
||||
indices := s.trackedValidatorsCache.Indices()
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
// Is a background routine that observes for new incoming forks. Depending on the epoch
|
||||
// it will be in charge of subscribing/unsubscribing the relevant topics at the fork boundaries.
|
||||
func (s *Service) forkWatcher() {
|
||||
<-s.initialSyncComplete
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
@@ -28,9 +29,6 @@ func (s *Service) forkWatcher() {
|
||||
log.WithError(err).Error("Unable to check for fork in the previous epoch")
|
||||
continue
|
||||
}
|
||||
// Broadcast BLS changes at the Capella fork boundary
|
||||
s.broadcastBLSChanges(currSlot)
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
slotTicker.Done()
|
||||
|
||||
@@ -2,96 +2,84 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func defaultClockWithTimeAtEpoch(epoch primitives.Epoch) *startup.Clock {
|
||||
now := genesis.Time().Add(params.EpochsDuration(epoch, params.BeaconConfig()))
|
||||
return startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithTimeAsNow(now))
|
||||
}
|
||||
|
||||
func testForkWatcherService(t *testing.T, current primitives.Epoch) *Service {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: genesis.Time(),
|
||||
ValidatorsRoot: genesis.ValidatorsRoot(),
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Millisecond)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: defaultClockWithTimeAtEpoch(current),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
initialSyncComplete: closedChan,
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 1096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
currEpoch primitives.Epoch
|
||||
wantErr bool
|
||||
postSvcCheck func(t *testing.T, s *Service)
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
checkRegistration func(t *testing.T, s *Service)
|
||||
forkEpoch primitives.Epoch
|
||||
epochAtRegistration func(primitives.Epoch) primitives.Epoch
|
||||
nextForkEpoch primitives.Epoch
|
||||
}{
|
||||
{
|
||||
name: "no fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(time.Duration(-params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) * time.Second)
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 10,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
|
||||
},
|
||||
name: "no fork in the next epoch",
|
||||
forkEpoch: params.BeaconConfig().AltairForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 2 },
|
||||
nextForkEpoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
checkRegistration: func(t *testing.T, s *Service) {},
|
||||
},
|
||||
{
|
||||
name: "altair fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(-4 * oneEpoch())
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
name: "altair fork in the next epoch",
|
||||
forkEpoch: params.BeaconConfig().AltairForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
nextForkEpoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
@@ -99,375 +87,132 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRangeTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRootTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
expected := fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), digest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
// TODO: we should check subcommittee indices here but we need to work with the committee cache to do it properly
|
||||
/*
|
||||
subIndices := mapFromCount(params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.SyncCommitteeSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
*/
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 3
|
||||
bCfg.BellatrixForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
name: "capella fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
|
||||
expected := fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), digest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().DenebForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
{
|
||||
name: "deneb fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(-4 * oneEpoch())
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.DenebForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
subIndices := mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().DenebForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
{
|
||||
name: "electra fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(-4 * oneEpoch())
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
subIndices := mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().FuluForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
{
|
||||
name: "fulu fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now().Add(-4 * oneEpoch())
|
||||
vr := [32]byte{'A'}
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: gt,
|
||||
ValidatorsRoot: vr,
|
||||
}
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(gt, vr),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(5)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV3+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().FuluForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().FuluForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcCreator(t)
|
||||
if err := s.registerForUpcomingFork(tt.currEpoch); (err != nil) != tt.wantErr {
|
||||
t.Errorf("registerForUpcomingFork() error = %v, wantErr %v", err, tt.wantErr)
|
||||
current := tt.epochAtRegistration(tt.forkEpoch)
|
||||
s := testForkWatcherService(t, current)
|
||||
wg := attachSpawner(s)
|
||||
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
tt.checkRegistration(t, s)
|
||||
|
||||
if current != tt.forkEpoch-1 {
|
||||
return
|
||||
}
|
||||
tt.postSvcCheck(t, s)
|
||||
|
||||
// Ensure the topics were registered for the upcoming fork
|
||||
digest := params.ForkDigest(tt.forkEpoch)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
|
||||
// After this point we are checking deregistration, which doesn't apply if there isn't a higher
|
||||
// nextForkEpoch.
|
||||
if tt.forkEpoch >= tt.nextForkEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
nextDigest := params.ForkDigest(tt.nextForkEpoch)
|
||||
// Move the clock to just before the next fork epoch and ensure deregistration is correct
|
||||
wg = attachSpawner(s)
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(tt.nextForkEpoch - 1)
|
||||
require.NoError(t, s.registerForUpcomingFork(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
// deregister as if it is the epoch after the next fork epoch
|
||||
require.NoError(t, s.deregisterFromPastFork(tt.nextForkEpoch+1))
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(nextDigest))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
currEpoch primitives.Epoch
|
||||
wantErr bool
|
||||
postSvcCheck func(t *testing.T, s *Service)
|
||||
}{
|
||||
{
|
||||
name: "no fork in the previous epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: clock,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
err := r.registerRPCHandlers()
|
||||
assert.NoError(t, err)
|
||||
return r
|
||||
},
|
||||
currEpoch: 10,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
ptcls := s.cfg.p2p.Host().Mux().Protocols()
|
||||
pMap := make(map[string]bool)
|
||||
for _, p := range ptcls {
|
||||
pMap[string(p)] = true
|
||||
}
|
||||
assert.Equal(t, true, pMap[p2p.RPCGoodByeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCStatusTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCPingTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCMetaDataTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "altair fork in the previous epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 3
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: clock,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
prevGenesis := chainService.Genesis
|
||||
// To allow registration of v1 handlers
|
||||
chainService.Genesis = time.Now().Add(-1 * oneEpoch())
|
||||
err := r.registerRPCHandlers()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainService.Genesis = prevGenesis
|
||||
previous, err := r.rpcHandlerByTopicFromFork(version.Phase0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
next, err := r.rpcHandlerByTopicFromFork(version.Altair)
|
||||
assert.NoError(t, err)
|
||||
|
||||
handlerByTopic := addedRPCHandlerByTopic(previous, next)
|
||||
|
||||
for topic, handler := range handlerByTopic {
|
||||
r.registerRPC(topic, handler)
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(0)
|
||||
assert.NoError(t, err)
|
||||
r.registerSubscribers(0, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
digest = params.ForkDigest(3)
|
||||
r.registerSubscribers(3, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(0)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
digest = params.ForkDigest(3)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
|
||||
ptcls := s.cfg.p2p.Host().Mux().Protocols()
|
||||
pMap := make(map[string]bool)
|
||||
for _, p := range ptcls {
|
||||
pMap[string(p)] = true
|
||||
}
|
||||
assert.Equal(t, true, pMap[p2p.RPCGoodByeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCStatusTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCPingTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCMetaDataTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRangeTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRootTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
|
||||
assert.Equal(t, false, pMap[p2p.RPCMetaDataTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, false, pMap[p2p.RPCBlocksByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, false, pMap[p2p.RPCBlocksByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bellatrix fork in the previous epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
clock := startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot)
|
||||
bCfg := params.BeaconConfig().Copy()
|
||||
bCfg.AltairForkEpoch = 1
|
||||
bCfg.BellatrixForkEpoch = 3
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: clock,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
digest := params.ForkDigest(1)
|
||||
r.registerSubscribers(1, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
digest = params.ForkDigest(3)
|
||||
r.registerSubscribers(3, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(1)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
digest = params.ForkDigest(3)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcCreator(t)
|
||||
if err := s.deregisterFromPastFork(tt.currEpoch); (err != nil) != tt.wantErr {
|
||||
t.Errorf("registerForUpcomingFork() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
tt.postSvcCheck(t, s)
|
||||
})
|
||||
func attachSpawner(s *Service) *sync.WaitGroup {
|
||||
wg := new(sync.WaitGroup)
|
||||
s.subscriptionSpawner = func(f func()) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
f()
|
||||
}()
|
||||
}
|
||||
return wg
|
||||
}
|
||||
|
||||
// oneEpoch returns the duration of one epoch.
|
||||
|
||||
@@ -450,7 +450,12 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
for _, p := range peers {
|
||||
blocks, err := f.requestBlocks(ctx, req, p)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Could not request blocks by range from peer")
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": p,
|
||||
"startSlot": req.StartSlot,
|
||||
"count": req.Count,
|
||||
"step": req.Step,
|
||||
}).WithError(err).Debug("Could not request blocks by range from peer")
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
|
||||
@@ -92,22 +92,72 @@ func SendBeaconBlocksByRangeRequest(
|
||||
// The response MUST contain no more than `count` blocks, and no more than
|
||||
// MAX_REQUEST_BLOCKS blocks.
|
||||
currentEpoch := slots.ToEpoch(tor.CurrentSlot())
|
||||
if i >= req.Count || i >= params.MaxRequestBlock(currentEpoch) {
|
||||
maxBlocks := params.MaxRequestBlock(currentEpoch)
|
||||
if i >= req.Count {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockIndex": i,
|
||||
"requestedCount": req.Count,
|
||||
"blockSlot": blk.Block().Slot(),
|
||||
"peer": pid,
|
||||
"reason": "exceeded requested count",
|
||||
}).Debug("Peer returned invalid data: too many blocks")
|
||||
return nil, ErrInvalidFetchedData
|
||||
}
|
||||
if i >= maxBlocks {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockIndex": i,
|
||||
"maxBlocks": maxBlocks,
|
||||
"currentEpoch": currentEpoch,
|
||||
"blockSlot": blk.Block().Slot(),
|
||||
"peer": pid,
|
||||
"reason": "exceeded MAX_REQUEST_BLOCKS",
|
||||
}).Debug("Peer returned invalid data: exceeded protocol limit")
|
||||
return nil, ErrInvalidFetchedData
|
||||
}
|
||||
// Returned blocks MUST be in the slot range [start_slot, start_slot + count * step).
|
||||
if blk.Block().Slot() < req.StartSlot || blk.Block().Slot() >= req.StartSlot.Add(req.Count*req.Step) {
|
||||
endSlot := req.StartSlot.Add(req.Count * req.Step)
|
||||
if blk.Block().Slot() < req.StartSlot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockSlot": blk.Block().Slot(),
|
||||
"requestedStart": req.StartSlot,
|
||||
"peer": pid,
|
||||
"reason": "block slot before requested start",
|
||||
}).Debug("Peer returned invalid data: block too early")
|
||||
return nil, ErrInvalidFetchedData
|
||||
}
|
||||
if blk.Block().Slot() >= endSlot {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockSlot": blk.Block().Slot(),
|
||||
"requestedStart": req.StartSlot,
|
||||
"requestedEnd": endSlot,
|
||||
"requestedCount": req.Count,
|
||||
"requestedStep": req.Step,
|
||||
"peer": pid,
|
||||
"reason": "block slot >= start + count*step",
|
||||
}).Debug("Peer returned invalid data: block beyond range")
|
||||
return nil, ErrInvalidFetchedData
|
||||
}
|
||||
// Returned blocks, where they exist, MUST be sent in a consecutive order.
|
||||
// Consecutive blocks MUST have values in `step` increments (slots may be skipped in between).
|
||||
isSlotOutOfOrder := false
|
||||
outOfOrderReason := ""
|
||||
if prevSlot >= blk.Block().Slot() {
|
||||
isSlotOutOfOrder = true
|
||||
outOfOrderReason = "slot not increasing"
|
||||
} else if req.Step != 0 && blk.Block().Slot().SubSlot(prevSlot).Mod(req.Step) != 0 {
|
||||
isSlotOutOfOrder = true
|
||||
slotDiff := blk.Block().Slot().SubSlot(prevSlot)
|
||||
outOfOrderReason = fmt.Sprintf("slot diff %d not multiple of step %d", slotDiff, req.Step)
|
||||
}
|
||||
if !isFirstChunk && isSlotOutOfOrder {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockSlot": blk.Block().Slot(),
|
||||
"prevSlot": prevSlot,
|
||||
"requestedStep": req.Step,
|
||||
"blockIndex": i,
|
||||
"peer": pid,
|
||||
"reason": outOfOrderReason,
|
||||
}).Debug("Peer returned invalid data: blocks out of order")
|
||||
return nil, ErrInvalidFetchedData
|
||||
}
|
||||
prevSlot = blk.Block().Slot()
|
||||
|
||||
@@ -316,11 +316,14 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
beaconDB: db,
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
clockWaiter: cw,
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, r)
|
||||
clock := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
r.verifierWaiter = verification.NewInitializerWaiter(clock, chain.ForkChoiceStore, r.cfg.stateGen)
|
||||
@@ -337,9 +340,12 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
p2p: p2,
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, r2)
|
||||
clock = startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
r2.verifierWaiter = verification.NewInitializerWaiter(clock, chain2.ForkChoiceStore, r2.cfg.stateGen)
|
||||
@@ -909,13 +915,16 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
|
||||
ctx: ctx,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
clockWaiter: cw,
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, r)
|
||||
clock := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
r.verifierWaiter = verification.NewInitializerWaiter(clock, chain.ForkChoiceStore, r.cfg.stateGen)
|
||||
|
||||
@@ -178,6 +178,7 @@ type Service struct {
|
||||
lcStore *lightClient.Store
|
||||
dataColumnLogCh chan dataColumnLogEntry
|
||||
registeredNetworkEntry params.NetworkScheduleEntry
|
||||
subscriptionSpawner func(func()) // see Service.spawn for details
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
@@ -254,7 +255,7 @@ func (s *Service) Start() {
|
||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.startTasksPostInitialSync()
|
||||
go s.startDiscoveryAndSubscriptions()
|
||||
go s.processDataColumnLogs()
|
||||
|
||||
s.cfg.p2p.AddConnectionHandler(s.reValidatePeer, s.sendGoodbye)
|
||||
@@ -384,32 +385,31 @@ func (s *Service) waitForChainStart() {
|
||||
s.markForChainStart()
|
||||
}
|
||||
|
||||
func (s *Service) startTasksPostInitialSync() {
|
||||
func (s *Service) startDiscoveryAndSubscriptions() {
|
||||
// Wait for the chain to start.
|
||||
s.waitForChainStart()
|
||||
|
||||
select {
|
||||
case <-s.initialSyncComplete:
|
||||
// Compute the current epoch.
|
||||
currentSlot := slots.CurrentSlot(s.cfg.clock.GenesisTime())
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Compute the current fork forkDigest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve current fork digest")
|
||||
return
|
||||
}
|
||||
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
s.registerSubscribers(currentEpoch, forkDigest)
|
||||
|
||||
// Start the fork watcher.
|
||||
go s.forkWatcher()
|
||||
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
if s.ctx.Err() != nil {
|
||||
log.Debug("Context closed, exiting StartDiscoveryAndSubscription")
|
||||
return
|
||||
}
|
||||
|
||||
// Compute the current epoch.
|
||||
currentSlot := slots.CurrentSlot(s.cfg.clock.GenesisTime())
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Compute the current fork forkDigest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve current fork digest")
|
||||
return
|
||||
}
|
||||
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
s.registerSubscribers(currentEpoch, forkDigest)
|
||||
|
||||
// Start the fork watcher.
|
||||
go s.forkWatcher()
|
||||
}
|
||||
|
||||
func (s *Service) writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream) {
|
||||
|
||||
@@ -69,7 +69,7 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
}
|
||||
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
go r.startTasksPostInitialSync()
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
var vr [32]byte
|
||||
@@ -150,7 +150,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
|
||||
syncCompleteCh := make(chan bool)
|
||||
go func() {
|
||||
r.startTasksPostInitialSync()
|
||||
r.startDiscoveryAndSubscriptions()
|
||||
syncCompleteCh <- true
|
||||
}()
|
||||
|
||||
@@ -206,8 +206,9 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
clockWaiter: gs,
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
|
||||
go r.startTasksPostInitialSync()
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
var vr [32]byte
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
r.waitForChainStart()
|
||||
@@ -220,9 +221,6 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
|
||||
close(r.initialSyncComplete)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.Host().Mux().Protocols()))
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
@@ -35,40 +36,114 @@ import (
|
||||
|
||||
const pubsubMessageTimeout = 30 * time.Second
|
||||
|
||||
type (
|
||||
// wrappedVal represents a gossip validator which also returns an error along with the result.
|
||||
wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.ValidationResult, error)
|
||||
|
||||
// subHandler represents handler for a given subscription.
|
||||
subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// parameters used for the `subscribeWithParameters` function.
|
||||
subscribeParameters struct {
|
||||
topicFormat string
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
digest [4]byte
|
||||
|
||||
// getSubnetsToJoin is a function that returns all subnets the node should join.
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
|
||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||
// but for which no subscriptions are needed.
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
|
||||
subscribeToSubnetsParameters struct {
|
||||
subscriptionBySubnet map[uint64]*pubsub.Subscription
|
||||
topicFormat string
|
||||
digest [4]byte
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
)
|
||||
|
||||
var errInvalidDigest = errors.New("invalid digest")
|
||||
|
||||
// wrappedVal represents a gossip validator which also returns an error along with the result.
|
||||
type wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.ValidationResult, error)
|
||||
|
||||
// subHandler represents handler for a given subscription.
|
||||
type subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// subscribeParameters holds the parameters that are needed to construct a set of subscriptions topics for a given
|
||||
// set of gossipsub subnets.
|
||||
type subscribeParameters struct {
|
||||
topicFormat string
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
digest [4]byte
|
||||
// getSubnetsToJoin is a function that returns all subnets the node should join.
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||
// but for which no subscriptions are needed.
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
|
||||
// shortTopic is a less verbose version of topic strings used for logging.
|
||||
func (p subscribeParameters) shortTopic() string {
|
||||
short := p.topicFormat
|
||||
fmtLen := len(short)
|
||||
if fmtLen >= 3 && short[fmtLen-3:] == "_%d" {
|
||||
short = short[:fmtLen-3]
|
||||
}
|
||||
return fmt.Sprintf(short, p.digest)
|
||||
}
|
||||
|
||||
func (p subscribeParameters) logFields() logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"topic": p.shortTopic(),
|
||||
}
|
||||
}
|
||||
|
||||
// fullTopic is the fully qualified topic string, given to gossipsub.
|
||||
func (p subscribeParameters) fullTopic(subnet uint64, suffix string) string {
|
||||
return fmt.Sprintf(p.topicFormat, p.digest, subnet) + suffix
|
||||
}
|
||||
|
||||
// subnetTracker keeps track of which subnets we are subscribed to, out of the set of
|
||||
// possible subnets described by a `subscribeParameters`.
|
||||
type subnetTracker struct {
|
||||
subscribeParameters
|
||||
mu sync.RWMutex
|
||||
subscriptions map[uint64]*pubsub.Subscription
|
||||
}
|
||||
|
||||
func newSubnetTracker(p subscribeParameters) *subnetTracker {
|
||||
return &subnetTracker{
|
||||
subscribeParameters: p,
|
||||
subscriptions: make(map[uint64]*pubsub.Subscription),
|
||||
}
|
||||
}
|
||||
|
||||
// unwanted takes a list of wanted subnets and returns a list of currently subscribed subnets that are not included.
|
||||
func (t *subnetTracker) unwanted(wanted map[uint64]bool) []uint64 {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
unwanted := make([]uint64, 0, len(t.subscriptions))
|
||||
for subnet := range t.subscriptions {
|
||||
if wanted == nil || !wanted[subnet] {
|
||||
unwanted = append(unwanted, subnet)
|
||||
}
|
||||
}
|
||||
return unwanted
|
||||
}
|
||||
|
||||
// missing takes a list of wanted subnets and returns a list of wanted subnets that are not currently tracked.
|
||||
func (t *subnetTracker) missing(wanted map[uint64]bool) []uint64 {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
missing := make([]uint64, 0, len(wanted))
|
||||
for subnet := range wanted {
|
||||
if _, ok := t.subscriptions[subnet]; !ok {
|
||||
missing = append(missing, subnet)
|
||||
}
|
||||
}
|
||||
return missing
|
||||
}
|
||||
|
||||
// cancelSubscription cancels and removes the subscription for a given subnet.
|
||||
func (t *subnetTracker) cancelSubscription(subnet uint64) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
defer delete(t.subscriptions, subnet)
|
||||
|
||||
sub := t.subscriptions[subnet]
|
||||
if sub == nil {
|
||||
return
|
||||
}
|
||||
sub.Cancel()
|
||||
}
|
||||
|
||||
// track asks subscriptionTracker to hold on to the subscription for a given subnet so
|
||||
// that we can remember that it is tracked and cancel its context when it's time to unsubscribe.
|
||||
func (t *subnetTracker) track(subnet uint64, sub *pubsub.Subscription) {
|
||||
if sub == nil {
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.subscriptions[subnet] = sub
|
||||
}
|
||||
|
||||
// noopValidator is a no-op that only decodes the message, but does not check its contents.
|
||||
func (s *Service) noopValidator(_ context.Context, _ peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
@@ -112,131 +187,146 @@ func (s *Service) activeSyncSubnetIndices(currentSlot primitives.Slot) map[uint6
|
||||
return mapFromSlice(subscriptions)
|
||||
}
|
||||
|
||||
// spawn allows the Service to use a custom function for launching goroutines.
|
||||
// This is useful in tests where we can set spawner to a sync.WaitGroup and
|
||||
// wait for the spawned goroutines to finish.
|
||||
func (s *Service) spawn(f func()) {
|
||||
if s.subscriptionSpawner != nil {
|
||||
s.subscriptionSpawner(f)
|
||||
} else {
|
||||
go f()
|
||||
}
|
||||
}
|
||||
|
||||
// Register PubSub subscribers
|
||||
func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
s.subscribe(
|
||||
p2p.BlockSubnetTopicFormat,
|
||||
s.validateBeaconBlockPubSub,
|
||||
s.beaconBlockSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.AggregateAndProofSubnetTopicFormat,
|
||||
s.validateAggregateAndProof,
|
||||
s.beaconAggregateProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.ExitSubnetTopicFormat,
|
||||
s.validateVoluntaryExit,
|
||||
s.voluntaryExitSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.ProposerSlashingSubnetTopicFormat,
|
||||
s.validateProposerSlashing,
|
||||
s.proposerSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.AttesterSlashingSubnetTopicFormat,
|
||||
s.validateAttesterSlashing,
|
||||
s.attesterSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.AttestationSubnetTopicFormat,
|
||||
validate: s.validateCommitteeIndexBeaconAttestation,
|
||||
handle: s.committeeIndexBeaconAttestationSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.persistentAndAggregatorSubnetIndices,
|
||||
getSubnetsRequiringPeers: attesterSubnetIndices,
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.BlockSubnetTopicFormat, s.validateBeaconBlockPubSub, s.beaconBlockSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AggregateAndProofSubnetTopicFormat, s.validateAggregateAndProof, s.beaconAggregateProofSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ExitSubnetTopicFormat, s.validateVoluntaryExit, s.voluntaryExitSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ProposerSlashingSubnetTopicFormat, s.validateProposerSlashing, s.proposerSlashingSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AttesterSlashingSubnetTopicFormat, s.validateAttesterSlashing, s.attesterSlashingSubscriber, digest)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.AttestationSubnetTopicFormat,
|
||||
validate: s.validateCommitteeIndexBeaconAttestation,
|
||||
handle: s.committeeIndexBeaconAttestationSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.persistentAndAggregatorSubnetIndices,
|
||||
getSubnetsRequiringPeers: attesterSubnetIndices,
|
||||
})
|
||||
})
|
||||
|
||||
// New gossip topic in Altair
|
||||
if params.BeaconConfig().AltairForkEpoch <= epoch {
|
||||
s.subscribe(
|
||||
p2p.SyncContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncContributionAndProof,
|
||||
s.syncContributionAndProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
validate: s.validateSyncCommitteeMessage,
|
||||
handle: s.syncCommitteeMessageSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.activeSyncSubnetIndices,
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.SyncContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncContributionAndProof,
|
||||
s.syncContributionAndProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
validate: s.validateSyncCommitteeMessage,
|
||||
handle: s.syncCommitteeMessageSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.activeSyncSubnetIndices,
|
||||
})
|
||||
})
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
s.subscribe(
|
||||
p2p.LightClientOptimisticUpdateTopicFormat,
|
||||
s.validateLightClientOptimisticUpdate,
|
||||
s.lightClientOptimisticUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.LightClientFinalityUpdateTopicFormat,
|
||||
s.validateLightClientFinalityUpdate,
|
||||
s.lightClientFinalityUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.LightClientOptimisticUpdateTopicFormat,
|
||||
s.validateLightClientOptimisticUpdate,
|
||||
s.lightClientOptimisticUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.LightClientFinalityUpdateTopicFormat,
|
||||
s.validateLightClientFinalityUpdate,
|
||||
s.lightClientFinalityUpdateSubscriber,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// New gossip topic in Capella
|
||||
if params.BeaconConfig().CapellaForkEpoch <= epoch {
|
||||
s.subscribe(
|
||||
p2p.BlsToExecutionChangeSubnetTopicFormat,
|
||||
s.validateBlsToExecutionChange,
|
||||
s.blsToExecutionChangeSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.BlsToExecutionChangeSubnetTopicFormat,
|
||||
s.validateBlsToExecutionChange,
|
||||
s.blsToExecutionChangeSubscriber,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Deneb, removed in Electra
|
||||
if params.BeaconConfig().DenebForkEpoch <= epoch && epoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: func(primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
},
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: func(primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Electra, removed in Fulu
|
||||
if params.BeaconConfig().ElectraForkEpoch <= epoch && epoch < params.BeaconConfig().FuluForkEpoch {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: func(currentSlot primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
},
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: func(currentSlot primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Fulu.
|
||||
if params.BeaconConfig().FuluForkEpoch <= epoch {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||
validate: s.validateDataColumn,
|
||||
handle: s.dataColumnSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||
validate: s.validateDataColumn,
|
||||
handle: s.dataColumnSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
// The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, digest [4]byte) *pubsub.Subscription {
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, digest [4]byte) {
|
||||
<-s.initialSyncComplete
|
||||
_, e, err := params.ForkDataFromDigest(digest)
|
||||
if err != nil {
|
||||
// Impossible condition as it would mean digest does not exist.
|
||||
@@ -247,7 +337,7 @@ func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandle
|
||||
// Impossible condition as it would mean topic does not exist.
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
||||
}
|
||||
return s.subscribeWithBase(s.addDigestToTopic(topic, digest), validator, handle)
|
||||
s.subscribeWithBase(s.addDigestToTopic(topic, digest), validator, handle)
|
||||
}
|
||||
|
||||
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
||||
@@ -412,61 +502,38 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
// pruneSubscriptions unsubscribes from topics we are currently subscribed to but that are
|
||||
// not in the list of wanted subnets.
|
||||
// This function mutates the `subscriptionBySubnet` map, which is used to keep track of the current subscriptions.
|
||||
func (s *Service) pruneSubscriptions(
|
||||
subscriptionBySubnet map[uint64]*pubsub.Subscription,
|
||||
wantedSubnets map[uint64]bool,
|
||||
topicFormat string,
|
||||
digest [4]byte,
|
||||
) {
|
||||
for subnet, subscription := range subscriptionBySubnet {
|
||||
if subscription == nil {
|
||||
// Should not happen, but just in case.
|
||||
delete(subscriptionBySubnet, subnet)
|
||||
continue
|
||||
}
|
||||
|
||||
if wantedSubnets[subnet] {
|
||||
// Nothing to prune.
|
||||
continue
|
||||
}
|
||||
|
||||
// We are subscribed to a subnet that is no longer wanted.
|
||||
subscription.Cancel()
|
||||
fullTopic := fmt.Sprintf(topicFormat, digest, subnet) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
s.unSubscribeFromTopic(fullTopic)
|
||||
delete(subscriptionBySubnet, subnet)
|
||||
func (s *Service) pruneSubscriptions(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
||||
for _, subnet := range t.unwanted(wantedSubnets) {
|
||||
t.cancelSubscription(subnet)
|
||||
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
|
||||
}
|
||||
}
|
||||
|
||||
// subscribeToSubnets subscribes to needed subnets and unsubscribe from unneeded ones.
|
||||
// This functions mutates the `subscriptionBySubnet` map, which is used to keep track of the current subscriptions.
|
||||
func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
func (s *Service) subscribeToSubnets(t *subnetTracker) error {
|
||||
// Do not subscribe if not synced.
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
return nil
|
||||
}
|
||||
|
||||
valid, err := isDigestValid(p.digest, s.cfg.clock)
|
||||
valid, err := isDigestValid(t.digest, s.cfg.clock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "is digest valid")
|
||||
}
|
||||
|
||||
// Unsubscribe from all subnets if digest is not valid. It's likely to be the case after a hard fork.
|
||||
if !valid {
|
||||
wantedSubnets := map[uint64]bool{}
|
||||
s.pruneSubscriptions(p.subscriptionBySubnet, wantedSubnets, p.topicFormat, p.digest)
|
||||
s.pruneSubscriptions(t, nil)
|
||||
return errInvalidDigest
|
||||
}
|
||||
|
||||
subnetsToJoin := p.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneSubscriptions(p.subscriptionBySubnet, subnetsToJoin, p.topicFormat, p.digest)
|
||||
for subnet := range subnetsToJoin {
|
||||
subnetTopic := fmt.Sprintf(p.topicFormat, p.digest, subnet)
|
||||
|
||||
if _, exists := p.subscriptionBySubnet[subnet]; !exists {
|
||||
subscription := s.subscribeWithBase(subnetTopic, p.validate, p.handle)
|
||||
p.subscriptionBySubnet[subnet] = subscription
|
||||
}
|
||||
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneSubscriptions(t, subnetsToJoin)
|
||||
for _, subnet := range t.missing(subnetsToJoin) {
|
||||
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
|
||||
topic := t.fullTopic(subnet, "")
|
||||
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -474,110 +541,81 @@ func (s *Service) subscribeToSubnets(p subscribeToSubnetsParameters) error {
|
||||
|
||||
// subscribeWithParameters subscribes to a list of subnets.
|
||||
func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
shortTopicFormat := p.topicFormat
|
||||
shortTopicFormatLen := len(shortTopicFormat)
|
||||
if shortTopicFormatLen >= 3 && shortTopicFormat[shortTopicFormatLen-3:] == "_%d" {
|
||||
shortTopicFormat = shortTopicFormat[:shortTopicFormatLen-3]
|
||||
}
|
||||
shortTopic := fmt.Sprintf(shortTopicFormat, p.digest)
|
||||
tracker := newSubnetTracker(p)
|
||||
// Try once immediately so we don't have to wait until the next slot.
|
||||
s.ensureSubnetPeersAndSubscribe(tracker)
|
||||
|
||||
parameters := subscribeToSubnetsParameters{
|
||||
subscriptionBySubnet: make(map[uint64]*pubsub.Subscription),
|
||||
topicFormat: p.topicFormat,
|
||||
digest: p.digest,
|
||||
validate: p.validate,
|
||||
handle: p.handle,
|
||||
getSubnetsToJoin: p.getSubnetsToJoin,
|
||||
go s.logMinimumPeersPerSubnet(p)
|
||||
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
s.ensureSubnetPeersAndSubscribe(tracker)
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
err := s.subscribeToSubnets(parameters)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not subscribe to subnets")
|
||||
}
|
||||
|
||||
func (s *Service) ensureSubnetPeersAndSubscribe(tracker *subnetTracker) {
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
minPeers := flags.Get().MinimumPeersPerSubnet
|
||||
logFields := tracker.logFields()
|
||||
neededSubnets := computeAllNeededSubnets(s.cfg.clock.CurrentSlot(), tracker.getSubnetsToJoin, tracker.getSubnetsRequiringPeers)
|
||||
|
||||
if err := s.subscribeToSubnets(tracker); err != nil {
|
||||
if errors.Is(err, errInvalidDigest) {
|
||||
log.WithFields(logFields).Debug("Digest is invalid, stopping subscription")
|
||||
return
|
||||
}
|
||||
log.WithFields(logFields).WithError(err).Error("Could not subscribe to subnets")
|
||||
return
|
||||
}
|
||||
|
||||
slotDuration := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(s.ctx, timeout)
|
||||
defer cancel()
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, tracker.topicFormat, tracker.digest, minPeers, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithFields(logFields).WithError(err).Debug("Could not find peers with subnets")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) logMinimumPeersPerSubnet(p subscribeParameters) {
|
||||
logFields := p.logFields()
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
// Subscribe to expected subnets and search for peers if needed at every slot.
|
||||
go func() {
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
neededSubnets := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDuration)
|
||||
defer cancel()
|
||||
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, p.topicFormat, p.digest, minimumPeersPerSubnet, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithError(err).Debug("Could not find peers with subnets")
|
||||
}
|
||||
}()
|
||||
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
neededSubnets := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
|
||||
if err := s.subscribeToSubnets(parameters); err != nil {
|
||||
if errors.Is(err, errInvalidDigest) {
|
||||
log.WithField("topics", shortTopic).Debug("Digest is invalid, stopping subscription")
|
||||
return
|
||||
}
|
||||
log.WithError(err).Error("Could not subscribe to subnets")
|
||||
continue
|
||||
}
|
||||
|
||||
func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDuration)
|
||||
defer cancel()
|
||||
|
||||
if err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, p.topicFormat, p.digest, minimumPeersPerSubnet, neededSubnets); err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithError(err).Debug("Could not find peers with subnets")
|
||||
}
|
||||
}()
|
||||
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Warn the user if we are not subscribed to enough peers in the subnets.
|
||||
go func() {
|
||||
log := log.WithField("minimum", minimumPeersPerSubnet)
|
||||
logTicker := time.NewTicker(5 * time.Minute)
|
||||
defer logTicker.Stop()
|
||||
log := log.WithField("minimum", minimumPeersPerSubnet)
|
||||
logTicker := time.NewTicker(5 * time.Minute)
|
||||
defer logTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-logTicker.C:
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
subnetsToFindPeersIndex := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
for {
|
||||
select {
|
||||
case <-logTicker.C:
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
subnetsToFindPeersIndex := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
|
||||
isSubnetWithMissingPeers := false
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for index := range subnetsToFindPeersIndex {
|
||||
topic := fmt.Sprintf(p.topicFormat, p.digest, index)
|
||||
isSubnetWithMissingPeers := false
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for index := range subnetsToFindPeersIndex {
|
||||
topic := fmt.Sprintf(p.topicFormat, p.digest, index)
|
||||
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if count := s.connectedPeersCount(topic); count < minimumPeersPerSubnet {
|
||||
isSubnetWithMissingPeers = true
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"actual": count,
|
||||
}).Warning("Not enough connected peers")
|
||||
}
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if count := s.connectedPeersCount(topic); count < minimumPeersPerSubnet {
|
||||
isSubnetWithMissingPeers = true
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"actual": count,
|
||||
}).Warning("Not enough connected peers")
|
||||
}
|
||||
|
||||
if !isSubnetWithMissingPeers {
|
||||
log.WithField("topic", shortTopic).Info("All subnets have enough connected peers")
|
||||
}
|
||||
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
if !isSubnetWithMissingPeers {
|
||||
log.WithFields(logFields).Debug("All subnets have enough connected peers")
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) unSubscribeFromTopic(topic string) {
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -52,3 +54,31 @@ func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.V
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allDataColumnSubnets returns the data column subnets for which we need to find peers
|
||||
// but don't need to subscribe to. This is used to ensure we have peers available in all subnets
|
||||
// when we are serving validators. When a validator proposes a block, they need to publish data
|
||||
// column sidecars on all subnets. This method returns a nil map when there is no validators custody
|
||||
// requirement.
|
||||
func (s *Service) allDataColumnSubnets(_ primitives.Slot) map[uint64]bool {
|
||||
validatorsCustodyRequirement, err := s.validatorsCustodyRequirement()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve validators custody requirement")
|
||||
return nil
|
||||
}
|
||||
|
||||
// If no validators are tracked, return early
|
||||
if validatorsCustodyRequirement == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// When we have validators with custody requirements, we need peers in all subnets
|
||||
// because validators need to be able to publish data columns to all subnets when proposing
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
allSubnets := make(map[uint64]bool, dataColumnSidecarSubnetCount)
|
||||
for i := range dataColumnSidecarSubnetCount {
|
||||
allSubnets[i] = true
|
||||
}
|
||||
|
||||
return allSubnets
|
||||
}
|
||||
|
||||
65
beacon-chain/sync/subscriber_data_column_sidecar_test.go
Normal file
65
beacon-chain/sync/subscriber_data_column_sidecar_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestAllDataColumnSubnets(t *testing.T) {
|
||||
t.Run("returns nil when no validators tracked", func(t *testing.T) {
|
||||
// Service with no tracked validators
|
||||
svc := &Service{
|
||||
ctx: t.Context(),
|
||||
trackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
|
||||
result := svc.allDataColumnSubnets(primitives.Slot(0))
|
||||
assert.Equal(t, true, len(result) == 0, "Expected nil or empty map when no validators are tracked")
|
||||
})
|
||||
|
||||
t.Run("returns all subnets logic test", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ctx := t.Context()
|
||||
|
||||
db := dbtest.SetupDB(t)
|
||||
|
||||
// Create and save genesis state
|
||||
genesisState, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, db.SaveGenesisData(ctx, genesisState))
|
||||
|
||||
// Create stategen and initialize with genesis state
|
||||
stateGen := stategen.New(db, doublylinkedtree.New())
|
||||
_, err := stateGen.Resume(ctx, genesisState)
|
||||
require.NoError(t, err)
|
||||
|
||||
// At least one tracked validator.
|
||||
tvc := cache.NewTrackedValidatorsCache()
|
||||
tvc.Set(cache.TrackedValidator{Active: true, Index: 1})
|
||||
|
||||
svc := &Service{
|
||||
ctx: ctx,
|
||||
trackedValidatorsCache: tvc,
|
||||
cfg: &config{
|
||||
stateGen: stateGen,
|
||||
beaconDB: db,
|
||||
},
|
||||
}
|
||||
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
result := svc.allDataColumnSubnets(0)
|
||||
assert.Equal(t, dataColumnSidecarSubnetCount, uint64(len(result)))
|
||||
|
||||
for i := range dataColumnSidecarSubnetCount {
|
||||
assert.Equal(t, true, result[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -58,6 +58,7 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
var err error
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -83,6 +84,11 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func markInitSyncComplete(_ *testing.T, s *Service) {
|
||||
s.initialSyncComplete = make(chan struct{})
|
||||
close(s.initialSyncComplete)
|
||||
}
|
||||
|
||||
func TestSubscribe_UnsubscribeTopic(t *testing.T) {
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
gt := time.Now()
|
||||
@@ -101,6 +107,7 @@ func TestSubscribe_UnsubscribeTopic(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
var err error
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -152,6 +159,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := "/eth2/%x/attester_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -205,6 +213,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := "/eth2/%x/proposer_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -253,6 +262,8 @@ func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
|
||||
var err error
|
||||
p.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
@@ -292,25 +303,33 @@ func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) {
|
||||
}
|
||||
digest, err := r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
|
||||
defaultTopic := "/eth2/testing/%#x/committee%d"
|
||||
params := subscribeParameters{
|
||||
topicFormat: "/eth2/testing/%#x/committee%d",
|
||||
digest: digest,
|
||||
}
|
||||
tracker := newSubnetTracker(params)
|
||||
|
||||
// committee index 1
|
||||
fullTopic := fmt.Sprintf(defaultTopic, digest, 1) + r.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
c1 := uint64(1)
|
||||
fullTopic := params.fullTopic(c1, r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
_, topVal := r.wrapAndReportValidation(fullTopic, r.noopValidator)
|
||||
require.NoError(t, r.cfg.p2p.PubSub().RegisterTopicValidator(fullTopic, topVal))
|
||||
subscriptions[1], err = r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
sub1, err := r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
require.NoError(t, err)
|
||||
tracker.track(c1, sub1)
|
||||
|
||||
// committee index 2
|
||||
fullTopic = fmt.Sprintf(defaultTopic, digest, 2) + r.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
c2 := uint64(2)
|
||||
fullTopic = params.fullTopic(c2, r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
_, topVal = r.wrapAndReportValidation(fullTopic, r.noopValidator)
|
||||
err = r.cfg.p2p.PubSub().RegisterTopicValidator(fullTopic, topVal)
|
||||
require.NoError(t, err)
|
||||
subscriptions[2], err = r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
sub2, err := r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
require.NoError(t, err)
|
||||
tracker.track(c2, sub2)
|
||||
|
||||
r.pruneSubscriptions(subscriptions, map[uint64]bool{2: true}, defaultTopic, digest)
|
||||
r.pruneSubscriptions(tracker, map[uint64]bool{c2: true})
|
||||
require.LogsDoNotContain(t, hook, "Could not unregister topic validator")
|
||||
}
|
||||
|
||||
@@ -539,7 +558,7 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) {
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), currEpoch, []uint64{0, 1}, 10*time.Second)
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
r.subscribeWithParameters(subscribeParameters{
|
||||
go r.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
@@ -580,6 +599,7 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), 0, []uint64{0, 1}, 10*time.Second)
|
||||
@@ -589,12 +609,11 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().DenebForkVersion), version)
|
||||
require.Equal(t, params.BeaconConfig().DenebForkEpoch, e)
|
||||
|
||||
sp := subscribeToSubnetsParameters{
|
||||
subscriptionBySubnet: make(map[uint64]*pubsub.Subscription),
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
}
|
||||
sp := newSubnetTracker(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
})
|
||||
require.NoError(t, r.subscribeToSubnets(sp))
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
@@ -697,6 +716,7 @@ func TestSubscribe_ReceivesLCOptimisticUpdate(t *testing.T) {
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := p2p.LightClientOptimisticUpdateTopicFormat
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -764,6 +784,7 @@ func TestSubscribe_ReceivesLCFinalityUpdate(t *testing.T) {
|
||||
lcStore: lightClient.NewLightClientStore(d, &p2ptest.FakeP2P{}, new(event.Feed)),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := p2p.LightClientFinalityUpdateTopicFormat
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
@@ -427,6 +427,7 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) {
|
||||
cw := startup.NewClockSynchronizer()
|
||||
opts := []Option{WithClockWaiter(cw)}
|
||||
svc := NewService(ctx, append(opts, tt.svcopts...)...)
|
||||
markInitSyncComplete(t, svc)
|
||||
svc, tt.args.topic = tt.setupSvc(svc, tt.args.msg, tt.args.topic)
|
||||
go svc.Start()
|
||||
if tt.clock == nil {
|
||||
|
||||
@@ -409,6 +409,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
svc := NewService(ctx, append(opts, tt.svcopts...)...)
|
||||
var clock *startup.Clock
|
||||
svc, tt.args.topic, clock = tt.setupSvc(svc, tt.args.msg, tt.args.topic)
|
||||
markInitSyncComplete(t, svc)
|
||||
go svc.Start()
|
||||
require.NoError(t, cw.SetClock(clock))
|
||||
svc.verifierWaiter = verification.NewInitializerWaiter(cw, chainService.ForkChoiceStore, svc.cfg.stateGen)
|
||||
|
||||
@@ -856,7 +856,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) {
|
||||
svc, clock = tt.setupSvc(svc, tt.args.msg)
|
||||
require.NoError(t, cw.SetClock(clock))
|
||||
svc.verifierWaiter = verification.NewInitializerWaiter(cw, chainService.ForkChoiceStore, svc.cfg.stateGen)
|
||||
|
||||
markInitSyncComplete(t, svc)
|
||||
go svc.Start()
|
||||
marshalledObj, err := tt.args.msg.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
|
||||
2
changelog/2025-08-25-docs-links.md
Normal file
2
changelog/2025-08-25-docs-links.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Updated outdated documentation links for Web3Signer and Why Bazel.
|
||||
2
changelog/Galoretka_fix-leakybucket-test-duplicate.md
Normal file
2
changelog/Galoretka_fix-leakybucket-test-duplicate.md
Normal file
@@ -0,0 +1,2 @@
|
||||
## Ignored
|
||||
- Remove duplicate test case in `container/leaky-bucket/collector_test.go` to reduce redundancy. (#15672)
|
||||
3
changelog/james-prysm_deprecate-publish-blos.md
Normal file
3
changelog/james-prysm_deprecate-publish-blos.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Deprecated and added error to /prysm/v1/beacon/blobs endpoint for post Fulu fork.
|
||||
7
changelog/james-prysm_fulu-web3signer.md
Normal file
7
changelog/james-prysm_fulu-web3signer.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Added
|
||||
|
||||
- Adding Fulu types for web3signer.
|
||||
|
||||
### Changed
|
||||
|
||||
- changed validatorpb.SignRequest_AggregateAttestationAndProof signing type to use AggregateAttestationAndProofV2 on web3signer.
|
||||
3
changelog/james-prysm_post-block-ssz.md
Normal file
3
changelog/james-prysm_post-block-ssz.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Switching default of validator client rest call for submit block from JSON to SSZ. Fallback json will be attempted.
|
||||
2
changelog/kasey_start-discovery-immediately.md
Normal file
2
changelog/kasey_start-discovery-immediately.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Start topic-based peer discovery before initial sync completes so that we have coverage of needed columns when range syncing.
|
||||
2
changelog/kasey_unwedge-ethspecify.md
Normal file
2
changelog/kasey_unwedge-ethspecify.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Unwedge ethspecify.
|
||||
3
changelog/muzry_fix_get_block_attestation_v2.md
Normal file
3
changelog/muzry_fix_get_block_attestation_v2.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix getBlockAttestationsV2 to return [] instead of null when data is empty
|
||||
3
changelog/muzry_fix_unremove_empty_dir.md
Normal file
3
changelog/muzry_fix_unremove_empty_dir.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed the issue of empty dirs not being deleted when using –blob-storage-layout=by-epoch
|
||||
3
changelog/potuz_remove_bls_broadcast.md
Normal file
3
changelog/potuz_remove_bls_broadcast.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Remove broadcast of BLS changes at the Capella fork.
|
||||
3
changelog/pvl-async-dynamic-run-every.md
Normal file
3
changelog/pvl-async-dynamic-run-every.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- async: Added a method for periodic execution with dynamic intervals. This is useful for a future progressive slot schedule.
|
||||
3
changelog/pvl-debug-log.md
Normal file
3
changelog/pvl-debug-log.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Added more metadata for debug logs when initial sync requests fail for "invalid data returned from peer" errors
|
||||
3
changelog/pvl-erigon-agent.md
Normal file
3
changelog/pvl-erigon-agent.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Added erigon/caplin to known p2p agent strings.
|
||||
3
changelog/pvl-peerdas-peer-fanout.md
Normal file
3
changelog/pvl-peerdas-peer-fanout.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Configured the beacon node to seek peers when we have validator custody requirements. If one or more validators are connected to the beacon node, then the beacon node should seek a diverse set of peers such that broadcasting to all data column subnets for a block proposal is more efficient.
|
||||
3
changelog/pvl-spectest-size.md
Normal file
3
changelog/pvl-spectest-size.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- CI: Changed test size for //testing/spectest/mainnet:go_default_test
|
||||
3
changelog/radek_agg-sc-messages.md
Normal file
3
changelog/radek_agg-sc-messages.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Aggregate and pack sync committee messages into blocks.
|
||||
3
changelog/radek_fix-max-epoch-calculation-once.md
Normal file
3
changelog/radek_fix-max-epoch-calculation-once.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Pre-calculate exit epoch, churn and active balance before processing slashings to reduce CPU load.
|
||||
3
changelog/syjn99_ssz-ql-list.md
Normal file
3
changelog/syjn99_ssz-ql-list.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Support `List` type for SSZ-QL.
|
||||
4
changelog/syjn99_ssz-ql-tag-parser.md
Normal file
4
changelog/syjn99_ssz-ql-tag-parser.md
Normal file
@@ -0,0 +1,4 @@
|
||||
### Added
|
||||
|
||||
- SSZ-QL: Add element information for `Vector` type.
|
||||
- SSZ-QL: Support multi-dimensional tag parsing.
|
||||
3
changelog/ttsao_add-fulu-fork-transition-tests.md
Normal file
3
changelog/ttsao_add-fulu-fork-transition-tests.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add Fulu fork transition tests for mainnet and minimal configurations
|
||||
3
changelog/ttsao_add-fulu-proposer-lookahead-tests.md
Normal file
3
changelog/ttsao_add-fulu-proposer-lookahead-tests.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Fulu proposer lookahead epoch processing tests for mainnet and minimal configurations
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -404,6 +405,8 @@ func (b *BeaconChainConfig) InitializeForkSchedule() {
|
||||
|
||||
func LogDigests(b *BeaconChainConfig) {
|
||||
schedule := b.networkSchedule
|
||||
schedule.mu.RLock()
|
||||
defer schedule.mu.RUnlock()
|
||||
for _, e := range schedule.entries {
|
||||
log.WithFields(e.LogFields()).Debug("Network schedule entry initialized")
|
||||
digests := make([]string, 0, len(schedule.byDigest))
|
||||
@@ -415,6 +418,7 @@ func LogDigests(b *BeaconChainConfig) {
|
||||
}
|
||||
|
||||
type NetworkSchedule struct {
|
||||
mu sync.RWMutex
|
||||
entries []NetworkScheduleEntry
|
||||
byEpoch map[primitives.Epoch]*NetworkScheduleEntry
|
||||
byVersion map[[fieldparams.VersionLength]byte]*NetworkScheduleEntry
|
||||
@@ -482,6 +486,11 @@ func (ns *NetworkSchedule) ForEpoch(epoch primitives.Epoch) NetworkScheduleEntry
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) activatedAt(epoch primitives.Epoch) (*NetworkScheduleEntry, bool) {
|
||||
ns.mu.RLock()
|
||||
defer ns.mu.RUnlock()
|
||||
if ns.byEpoch == nil {
|
||||
return nil, false
|
||||
}
|
||||
entry, ok := ns.byEpoch[epoch]
|
||||
return entry, ok
|
||||
}
|
||||
@@ -503,6 +512,8 @@ func (ns *NetworkSchedule) merge(other *NetworkSchedule) *NetworkSchedule {
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) index(e NetworkScheduleEntry) {
|
||||
ns.mu.Lock()
|
||||
defer ns.mu.Unlock()
|
||||
if _, ok := ns.byDigest[e.ForkDigest]; !ok {
|
||||
ns.byDigest[e.ForkDigest] = &e
|
||||
}
|
||||
@@ -763,3 +774,23 @@ func WithinDAPeriod(block, current primitives.Epoch) bool {
|
||||
|
||||
return block+BeaconConfig().MinEpochsForBlobsSidecarsRequest >= current
|
||||
}
|
||||
|
||||
// EpochsDuration returns the time duration of the given number of epochs.
|
||||
func EpochsDuration(count primitives.Epoch, b *BeaconChainConfig) time.Duration {
|
||||
return SlotsDuration(SlotsForEpochs(count, b), b)
|
||||
}
|
||||
|
||||
// SlotsForEpochs returns the number of slots in the given number of epochs.
|
||||
func SlotsForEpochs(count primitives.Epoch, b *BeaconChainConfig) primitives.Slot {
|
||||
return primitives.Slot(count) * b.SlotsPerEpoch
|
||||
}
|
||||
|
||||
// SlotsDuration returns the time duration of the given number of slots.
|
||||
func SlotsDuration(count primitives.Slot, b *BeaconChainConfig) time.Duration {
|
||||
return time.Duration(count) * SecondsPerSlot(b)
|
||||
}
|
||||
|
||||
// SecondsPerSlot returns the time duration of a single slot.
|
||||
func SecondsPerSlot(b *BeaconChainConfig) time.Duration {
|
||||
return time.Duration(b.SecondsPerSlot) * time.Second
|
||||
}
|
||||
|
||||
@@ -57,8 +57,11 @@ func ForkFromConfig(cfg *BeaconChainConfig, epoch primitives.Epoch) *ethpb.Fork
|
||||
// ForkDataFromDigest performs the inverse, where it tries to determine the fork version
|
||||
// and epoch from a provided digest by looping through our current fork schedule.
|
||||
func ForkDataFromDigest(digest [4]byte) ([fieldparams.VersionLength]byte, primitives.Epoch, error) {
|
||||
cfg := BeaconConfig()
|
||||
entry, ok := cfg.networkSchedule.byDigest[digest]
|
||||
ns := BeaconConfig().networkSchedule
|
||||
ns.mu.RLock()
|
||||
defer ns.mu.RUnlock()
|
||||
// Look up the digest in our map of digests to fork versions and epochs.
|
||||
entry, ok := ns.byDigest[digest]
|
||||
if !ok {
|
||||
return [fieldparams.VersionLength]byte{}, 0, errors.Errorf("no fork exists for a digest of %#x", digest)
|
||||
}
|
||||
|
||||
@@ -141,7 +141,6 @@ func TestCollector(t *testing.T) {
|
||||
setElapsed(0)
|
||||
|
||||
tests := []testSet{
|
||||
collectorSimple,
|
||||
collectorSimple,
|
||||
collectorVaried,
|
||||
}
|
||||
|
||||
@@ -5,10 +5,13 @@ go_library(
|
||||
srcs = [
|
||||
"analyzer.go",
|
||||
"container.go",
|
||||
"list.go",
|
||||
"path.go",
|
||||
"query.go",
|
||||
"ssz_info.go",
|
||||
"ssz_type.go",
|
||||
"tag_parser.go",
|
||||
"vector.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/encoding/ssz/query",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -20,6 +23,7 @@ go_test(
|
||||
"analyzer_test.go",
|
||||
"path_test.go",
|
||||
"query_test.go",
|
||||
"tag_parser_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
|
||||
@@ -1,22 +1,13 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
offsetBytes = 4
|
||||
|
||||
// sszMaxTag specifies the maximum capacity of a variable-sized collection, like an SSZ List.
|
||||
sszMaxTag = "ssz-max"
|
||||
|
||||
// sszSizeTag specifies the length of a fixed-sized collection, like an SSZ Vector.
|
||||
// A wildcard ('?') indicates that the dimension is variable-sized (a List).
|
||||
sszSizeTag = "ssz-size"
|
||||
)
|
||||
const offsetBytes = 4
|
||||
|
||||
// AnalyzeObject analyzes given object and returns its SSZ information.
|
||||
func AnalyzeObject(obj any) (*sszInfo, error) {
|
||||
@@ -30,6 +21,89 @@ func AnalyzeObject(obj any) (*sszInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// PopulateVariableLengthInfo populates runtime information for SSZ fields of variable-sized types.
|
||||
// This function updates the sszInfo structure with actual lengths and offsets that can only
|
||||
// be determined at runtime for variable-sized items like Lists and variable-sized Container fields.
|
||||
func PopulateVariableLengthInfo(sszInfo *sszInfo, value any) error {
|
||||
if sszInfo == nil {
|
||||
return errors.New("sszInfo is nil")
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
return errors.New("value is nil")
|
||||
}
|
||||
|
||||
// Short circuit: If the type is fixed-sized, we don't need to fill in the info.
|
||||
if !sszInfo.isVariable {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch sszInfo.sszType {
|
||||
// In List case, we have to set the actual length of the list.
|
||||
case List:
|
||||
listInfo, err := sszInfo.ListInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get list info: %w", err)
|
||||
}
|
||||
|
||||
if listInfo == nil {
|
||||
return errors.New("listInfo is nil")
|
||||
}
|
||||
|
||||
val := reflect.ValueOf(value)
|
||||
if val.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("expected slice for List type, got %v", val.Kind())
|
||||
}
|
||||
|
||||
length := uint64(val.Len())
|
||||
if err := listInfo.SetLength(length); err != nil {
|
||||
return fmt.Errorf("could not set list length: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
// In Container case, we need to recursively populate variable-sized fields.
|
||||
case Container:
|
||||
containerInfo, err := sszInfo.ContainerInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get container info: %w", err)
|
||||
}
|
||||
|
||||
// Dereference first in case value is a pointer.
|
||||
derefValue := dereferencePointer(value)
|
||||
|
||||
// Start with the fixed size of this Container.
|
||||
currentOffset := sszInfo.FixedSize()
|
||||
|
||||
for _, fieldName := range containerInfo.order {
|
||||
fieldInfo := containerInfo.fields[fieldName]
|
||||
childSszInfo := fieldInfo.sszInfo
|
||||
if childSszInfo == nil {
|
||||
return fmt.Errorf("sszInfo is nil for field %s", fieldName)
|
||||
}
|
||||
|
||||
// Skip fixed-size fields.
|
||||
if !childSszInfo.isVariable {
|
||||
continue
|
||||
}
|
||||
|
||||
// Set the actual offset for variable-sized fields.
|
||||
fieldInfo.offset = currentOffset
|
||||
|
||||
// Recursively populate variable-sized fields.
|
||||
fieldValue := derefValue.FieldByName(fieldInfo.goFieldName)
|
||||
if err := PopulateVariableLengthInfo(childSszInfo, fieldValue.Interface()); err != nil {
|
||||
return fmt.Errorf("could not populate from value for field %s: %w", fieldName, err)
|
||||
}
|
||||
|
||||
currentOffset += childSszInfo.Size()
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unsupported SSZ type (%s) for variable size info", sszInfo.sszType)
|
||||
}
|
||||
}
|
||||
|
||||
// analyzeType is an entry point that inspects a reflect.Type and computes its SSZ layout information.
|
||||
func analyzeType(typ reflect.Type, tag *reflect.StructTag) (*sszInfo, error) {
|
||||
switch typ.Kind() {
|
||||
@@ -92,50 +166,50 @@ func analyzeHomogeneousColType(typ reflect.Type, tag *reflect.StructTag) (*sszIn
|
||||
return nil, fmt.Errorf("can only analyze slice types, got %v", typ.Kind())
|
||||
}
|
||||
|
||||
if tag == nil {
|
||||
return nil, fmt.Errorf("tag is required for slice types")
|
||||
// Parse the first dimension from the tag and get remaining tag for element
|
||||
sszDimension, remainingTag, err := ParseSSZTag(tag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse SSZ tag: %w", err)
|
||||
}
|
||||
if sszDimension == nil {
|
||||
return nil, errors.New("ssz tag is required for slice types")
|
||||
}
|
||||
|
||||
elementInfo, err := analyzeType(typ.Elem(), nil)
|
||||
// Analyze element type with remaining dimensions
|
||||
elementInfo, err := analyzeType(typ.Elem(), remainingTag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not analyze element type for homogeneous collection: %w", err)
|
||||
}
|
||||
|
||||
// 1. Check if the type is List/Bitlist by checking `ssz-max` tag.
|
||||
sszMax := tag.Get(sszMaxTag)
|
||||
if sszMax != "" {
|
||||
dims := strings.Split(sszMax, ",")
|
||||
if len(dims) > 1 {
|
||||
return nil, fmt.Errorf("multi-dimensional lists are not supported, got %d dimensions", len(dims))
|
||||
}
|
||||
|
||||
limit, err := strconv.ParseUint(dims[0], 10, 64)
|
||||
// 1. Handle List/Bitlist type
|
||||
if sszDimension.IsList() {
|
||||
limit, err := sszDimension.GetListLimit()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ssz-max tag (%s): %w", sszMax, err)
|
||||
return nil, fmt.Errorf("could not get list limit: %w", err)
|
||||
}
|
||||
|
||||
return analyzeListType(typ, elementInfo, limit)
|
||||
}
|
||||
|
||||
// 2. Handle Vector/Bitvector type.
|
||||
sszSize := tag.Get(sszSizeTag)
|
||||
dims := strings.Split(sszSize, ",")
|
||||
if len(dims) > 1 {
|
||||
return nil, fmt.Errorf("multi-dimensional vectors are not supported, got %d dimensions", len(dims))
|
||||
// 2. Handle Vector/Bitvector type
|
||||
if sszDimension.IsVector() {
|
||||
length, err := sszDimension.GetVectorLength()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get vector length: %w", err)
|
||||
}
|
||||
|
||||
return analyzeVectorType(typ, elementInfo, length)
|
||||
}
|
||||
|
||||
length, err := strconv.ParseUint(dims[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ssz-size tag (%s): %w", sszSize, err)
|
||||
}
|
||||
|
||||
return analyzeVectorType(typ, elementInfo, length)
|
||||
// Parsing ssz tag doesn't provide enough information to determine the collection type,
|
||||
// return an error.
|
||||
return nil, errors.New("could not determine collection type from tags")
|
||||
}
|
||||
|
||||
// analyzeListType analyzes SSZ List type and returns its SSZ info.
|
||||
func analyzeListType(typ reflect.Type, elementInfo *sszInfo, limit uint64) (*sszInfo, error) {
|
||||
if elementInfo == nil {
|
||||
return nil, fmt.Errorf("element info is required for List")
|
||||
return nil, errors.New("element info is required for List")
|
||||
}
|
||||
|
||||
return &sszInfo{
|
||||
@@ -144,13 +218,24 @@ func analyzeListType(typ reflect.Type, elementInfo *sszInfo, limit uint64) (*ssz
|
||||
|
||||
fixedSize: offsetBytes,
|
||||
isVariable: true,
|
||||
|
||||
listInfo: &listInfo{
|
||||
limit: limit,
|
||||
element: elementInfo,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// analyzeVectorType analyzes SSZ Vector type and returns its SSZ info.
|
||||
func analyzeVectorType(typ reflect.Type, elementInfo *sszInfo, length uint64) (*sszInfo, error) {
|
||||
if elementInfo == nil {
|
||||
return nil, fmt.Errorf("element info is required for Vector")
|
||||
return nil, errors.New("element info is required for Vector")
|
||||
}
|
||||
|
||||
// Validate the given length.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md#illegal-types
|
||||
if length == 0 {
|
||||
return nil, fmt.Errorf("vector length must be greater than 0, got %d", length)
|
||||
}
|
||||
|
||||
return &sszInfo{
|
||||
@@ -159,6 +244,11 @@ func analyzeVectorType(typ reflect.Type, elementInfo *sszInfo, length uint64) (*
|
||||
|
||||
fixedSize: length * elementInfo.Size(),
|
||||
isVariable: false,
|
||||
|
||||
vectorInfo: &vectorInfo{
|
||||
length: length,
|
||||
element: elementInfo,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -168,11 +258,12 @@ func analyzeContainerType(typ reflect.Type) (*sszInfo, error) {
|
||||
return nil, fmt.Errorf("can only analyze struct types, got %v", typ.Kind())
|
||||
}
|
||||
|
||||
fields := make(map[string]*fieldInfo)
|
||||
order := make([]string, 0, typ.NumField())
|
||||
|
||||
sszInfo := &sszInfo{
|
||||
sszType: Container,
|
||||
typ: typ,
|
||||
|
||||
containerInfo: make(map[string]*fieldInfo),
|
||||
}
|
||||
var currentOffset uint64
|
||||
|
||||
@@ -204,23 +295,31 @@ func analyzeContainerType(typ reflect.Type) (*sszInfo, error) {
|
||||
return nil, fmt.Errorf("could not analyze type for field %s: %w", fieldName, err)
|
||||
}
|
||||
|
||||
// If one of the fields is variable-sized,
|
||||
// the entire struct is considered variable-sized.
|
||||
if info.isVariable {
|
||||
sszInfo.isVariable = true
|
||||
}
|
||||
|
||||
// Store nested struct info.
|
||||
sszInfo.containerInfo[fieldName] = &fieldInfo{
|
||||
sszInfo: info,
|
||||
offset: currentOffset,
|
||||
fields[fieldName] = &fieldInfo{
|
||||
sszInfo: info,
|
||||
offset: currentOffset,
|
||||
goFieldName: field.Name,
|
||||
}
|
||||
// Persist order
|
||||
order = append(order, fieldName)
|
||||
|
||||
// Update the current offset based on the field's fixed size.
|
||||
currentOffset += info.fixedSize
|
||||
// Update the current offset depending on whether the field is variable-sized.
|
||||
if info.isVariable {
|
||||
// If one of the fields is variable-sized,
|
||||
// the entire struct is considered variable-sized.
|
||||
sszInfo.isVariable = true
|
||||
currentOffset += offsetBytes
|
||||
} else {
|
||||
currentOffset += info.fixedSize
|
||||
}
|
||||
}
|
||||
|
||||
sszInfo.fixedSize = currentOffset
|
||||
sszInfo.containerInfo = &containerInfo{
|
||||
fields: fields,
|
||||
order: order,
|
||||
}
|
||||
|
||||
return sszInfo, nil
|
||||
}
|
||||
|
||||
@@ -13,5 +13,5 @@ func TestAnalyzeSSZInfo(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, info, "Expected non-nil SSZ info")
|
||||
require.Equal(t, uint64(333), info.FixedSize(), "Expected fixed size to be 333")
|
||||
require.Equal(t, uint64(493), info.FixedSize(), "Expected fixed size to be 333")
|
||||
}
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
package query
|
||||
|
||||
// containerInfo maps a field's JSON name to its sszInfo for nested Containers.
|
||||
type containerInfo = map[string]*fieldInfo
|
||||
// containerInfo has
|
||||
// 1. fields: a field map that maps a field's JSON name to its sszInfo for nested Containers
|
||||
// 2. order: a list of field names in the order they should be serialized
|
||||
type containerInfo struct {
|
||||
fields map[string]*fieldInfo
|
||||
order []string
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
// sszInfo contains the SSZ information of the field.
|
||||
sszInfo *sszInfo
|
||||
// offset is the offset of the field within the parent struct.
|
||||
offset uint64
|
||||
// goFieldName is the name of the field in Go struct.
|
||||
goFieldName string
|
||||
}
|
||||
|
||||
53
encoding/ssz/query/list.go
Normal file
53
encoding/ssz/query/list.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// listInfo holds information about a SSZ List type.
|
||||
//
|
||||
// length is initialized with zero,
|
||||
// and can be set using SetLength while populating the actual SSZ List.
|
||||
type listInfo struct {
|
||||
// limit is the maximum number of elements in the list.
|
||||
limit uint64
|
||||
// element is the SSZ info of the list's element type.
|
||||
element *sszInfo
|
||||
// length is the actual number of elements at runtime (0 if not set).
|
||||
length uint64
|
||||
}
|
||||
|
||||
func (l *listInfo) Limit() uint64 {
|
||||
if l == nil {
|
||||
return 0
|
||||
}
|
||||
return l.limit
|
||||
}
|
||||
|
||||
func (l *listInfo) Element() (*sszInfo, error) {
|
||||
if l == nil {
|
||||
return nil, errors.New("listInfo is nil")
|
||||
}
|
||||
return l.element, nil
|
||||
}
|
||||
|
||||
func (l *listInfo) Length() uint64 {
|
||||
if l == nil {
|
||||
return 0
|
||||
}
|
||||
return l.length
|
||||
}
|
||||
|
||||
func (l *listInfo) SetLength(length uint64) error {
|
||||
if l == nil {
|
||||
return errors.New("listInfo is nil")
|
||||
}
|
||||
|
||||
if length > l.limit {
|
||||
return fmt.Errorf("length %d exceeds limit %d", length, l.limit)
|
||||
}
|
||||
|
||||
l.length = length
|
||||
return nil
|
||||
}
|
||||
@@ -1,37 +1,38 @@
|
||||
package query
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// CalculateOffsetAndLength calculates the offset and length of a given path within the SSZ object.
|
||||
// By walking the given path, it accumulates the offsets based on sszInfo.
|
||||
func CalculateOffsetAndLength(sszInfo *sszInfo, path []PathElement) (*sszInfo, uint64, uint64, error) {
|
||||
if sszInfo == nil {
|
||||
return nil, 0, 0, fmt.Errorf("sszInfo is nil")
|
||||
return nil, 0, 0, errors.New("sszInfo is nil")
|
||||
}
|
||||
|
||||
if len(path) == 0 {
|
||||
return nil, 0, 0, fmt.Errorf("path is empty")
|
||||
return nil, 0, 0, errors.New("path is empty")
|
||||
}
|
||||
|
||||
walk := sszInfo
|
||||
currentOffset := uint64(0)
|
||||
offset := uint64(0)
|
||||
|
||||
for _, elem := range path {
|
||||
fieldInfos, err := walk.ContainerInfo()
|
||||
containerInfo, err := walk.ContainerInfo()
|
||||
if err != nil {
|
||||
return nil, 0, 0, fmt.Errorf("could not get field infos: %w", err)
|
||||
}
|
||||
|
||||
fieldInfo, exists := fieldInfos[elem.Name]
|
||||
fieldInfo, exists := containerInfo.fields[elem.Name]
|
||||
if !exists {
|
||||
return nil, 0, 0, fmt.Errorf("field %s not found in fieldInfos", elem.Name)
|
||||
return nil, 0, 0, fmt.Errorf("field %s not found in containerInfo", elem.Name)
|
||||
}
|
||||
|
||||
currentOffset += fieldInfo.offset
|
||||
offset += fieldInfo.offset
|
||||
walk = fieldInfo.sszInfo
|
||||
}
|
||||
|
||||
if walk.isVariable {
|
||||
return nil, 0, 0, fmt.Errorf("cannot calculate length for variable-sized type")
|
||||
}
|
||||
|
||||
return walk, currentOffset, walk.Size(), nil
|
||||
return walk, offset, walk.Size(), nil
|
||||
}
|
||||
|
||||
@@ -6,100 +6,192 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/ssz_query"
|
||||
sszquerypb "github.com/OffchainLabs/prysm/v6/proto/ssz_query"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestCalculateOffsetAndLength(t *testing.T) {
|
||||
tests := []struct {
|
||||
type testCase struct {
|
||||
name string
|
||||
path string
|
||||
expectedOffset uint64
|
||||
expectedLength uint64
|
||||
}{
|
||||
// Basic integer types
|
||||
{
|
||||
name: "field_uint32",
|
||||
path: ".field_uint32",
|
||||
expectedOffset: 0,
|
||||
expectedLength: 4,
|
||||
},
|
||||
{
|
||||
name: "field_uint64",
|
||||
path: ".field_uint64",
|
||||
expectedOffset: 4,
|
||||
expectedLength: 8,
|
||||
},
|
||||
// Boolean type
|
||||
{
|
||||
name: "field_bool",
|
||||
path: ".field_bool",
|
||||
expectedOffset: 12,
|
||||
expectedLength: 1,
|
||||
},
|
||||
// Fixed-size bytes
|
||||
{
|
||||
name: "field_bytes32",
|
||||
path: ".field_bytes32",
|
||||
expectedOffset: 13,
|
||||
expectedLength: 32,
|
||||
},
|
||||
// Nested container
|
||||
{
|
||||
name: "nested container",
|
||||
path: ".nested",
|
||||
expectedOffset: 45,
|
||||
expectedLength: 40,
|
||||
},
|
||||
{
|
||||
name: "nested value1",
|
||||
path: ".nested.value1",
|
||||
expectedOffset: 45,
|
||||
expectedLength: 8,
|
||||
},
|
||||
{
|
||||
name: "nested value2",
|
||||
path: ".nested.value2",
|
||||
expectedOffset: 53,
|
||||
expectedLength: 32,
|
||||
},
|
||||
// Vector field
|
||||
{
|
||||
name: "vector field",
|
||||
path: ".vector_field",
|
||||
expectedOffset: 85,
|
||||
expectedLength: 192, // 24 * 8 bytes
|
||||
},
|
||||
// Trailing field
|
||||
{
|
||||
name: "trailing_field",
|
||||
path: ".trailing_field",
|
||||
expectedOffset: 277,
|
||||
expectedLength: 56,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
path, err := query.ParsePath(tt.path)
|
||||
require.NoError(t, err)
|
||||
t.Run("FixedTestContainer", func(t *testing.T) {
|
||||
tests := []testCase{
|
||||
// Basic integer types
|
||||
{
|
||||
name: "field_uint32",
|
||||
path: ".field_uint32",
|
||||
expectedOffset: 0,
|
||||
expectedLength: 4,
|
||||
},
|
||||
{
|
||||
name: "field_uint64",
|
||||
path: ".field_uint64",
|
||||
expectedOffset: 4,
|
||||
expectedLength: 8,
|
||||
},
|
||||
// Boolean type
|
||||
{
|
||||
name: "field_bool",
|
||||
path: ".field_bool",
|
||||
expectedOffset: 12,
|
||||
expectedLength: 1,
|
||||
},
|
||||
// Fixed-size bytes
|
||||
{
|
||||
name: "field_bytes32",
|
||||
path: ".field_bytes32",
|
||||
expectedOffset: 13,
|
||||
expectedLength: 32,
|
||||
},
|
||||
// Nested container
|
||||
{
|
||||
name: "nested container",
|
||||
path: ".nested",
|
||||
expectedOffset: 45,
|
||||
expectedLength: 40,
|
||||
},
|
||||
{
|
||||
name: "nested value1",
|
||||
path: ".nested.value1",
|
||||
expectedOffset: 45,
|
||||
expectedLength: 8,
|
||||
},
|
||||
{
|
||||
name: "nested value2",
|
||||
path: ".nested.value2",
|
||||
expectedOffset: 53,
|
||||
expectedLength: 32,
|
||||
},
|
||||
// Vector field
|
||||
{
|
||||
name: "vector field",
|
||||
path: ".vector_field",
|
||||
expectedOffset: 85,
|
||||
expectedLength: 192, // 24 * 8 bytes
|
||||
},
|
||||
// 2D bytes field
|
||||
{
|
||||
name: "two_dimension_bytes_field",
|
||||
path: ".two_dimension_bytes_field",
|
||||
expectedOffset: 277,
|
||||
expectedLength: 160, // 5 * 32 bytes
|
||||
},
|
||||
// Trailing field
|
||||
{
|
||||
name: "trailing_field",
|
||||
path: ".trailing_field",
|
||||
expectedOffset: 437,
|
||||
expectedLength: 56,
|
||||
},
|
||||
}
|
||||
|
||||
info, err := query.AnalyzeObject(&sszquerypb.FixedTestContainer{})
|
||||
require.NoError(t, err)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
path, err := query.ParsePath(tt.path)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, offset, length, err := query.CalculateOffsetAndLength(info, path)
|
||||
require.NoError(t, err)
|
||||
info, err := query.AnalyzeObject(&sszquerypb.FixedTestContainer{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tt.expectedOffset, offset, "Expected offset to be %d", tt.expectedOffset)
|
||||
require.Equal(t, tt.expectedLength, length, "Expected length to be %d", tt.expectedLength)
|
||||
})
|
||||
}
|
||||
_, offset, length, err := query.CalculateOffsetAndLength(info, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tt.expectedOffset, offset, "Expected offset to be %d", tt.expectedOffset)
|
||||
require.Equal(t, tt.expectedLength, length, "Expected length to be %d", tt.expectedLength)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("VariableTestContainer", func(t *testing.T) {
|
||||
tests := []testCase{
|
||||
// Fixed leading field
|
||||
{
|
||||
name: "leading_field",
|
||||
path: ".leading_field",
|
||||
expectedOffset: 0,
|
||||
expectedLength: 32,
|
||||
},
|
||||
// Variable-size list fields
|
||||
{
|
||||
name: "field_list_uint64",
|
||||
path: ".field_list_uint64",
|
||||
expectedOffset: 104, // First part of variable-sized type.
|
||||
expectedLength: 40, // 5 elements * uint64 (8 bytes each)
|
||||
},
|
||||
{
|
||||
name: "field_list_container",
|
||||
path: ".field_list_container",
|
||||
expectedOffset: 144, // Second part of variable-sized type.
|
||||
expectedLength: 120, // 3 elements * FixedNestedContainer (40 bytes each)
|
||||
},
|
||||
{
|
||||
name: "field_list_bytes32",
|
||||
path: ".field_list_bytes32",
|
||||
expectedOffset: 264,
|
||||
expectedLength: 96, // 3 elements * 32 bytes each
|
||||
},
|
||||
// Nested paths
|
||||
{
|
||||
name: "nested",
|
||||
path: ".nested",
|
||||
expectedOffset: 360,
|
||||
// Calculated with:
|
||||
// - Value1: 8 bytes
|
||||
// - field_list_uint64 offset: 4 bytes
|
||||
// - field_list_uint64 length: 40 bytes
|
||||
expectedLength: 52,
|
||||
},
|
||||
{
|
||||
name: "nested.value1",
|
||||
path: ".nested.value1",
|
||||
expectedOffset: 360,
|
||||
expectedLength: 8,
|
||||
},
|
||||
{
|
||||
name: "nested.field_list_uint64",
|
||||
path: ".nested.field_list_uint64",
|
||||
expectedOffset: 372,
|
||||
expectedLength: 40,
|
||||
},
|
||||
// Fixed trailing field
|
||||
{
|
||||
name: "trailing_field",
|
||||
path: ".trailing_field",
|
||||
expectedOffset: 48, // After leading_field + 4 offset pointers
|
||||
expectedLength: 56,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
path, err := query.ParsePath(tt.path)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := query.AnalyzeObject(&sszquerypb.VariableTestContainer{})
|
||||
require.NoError(t, err)
|
||||
|
||||
testContainer := createVariableTestContainer()
|
||||
err = query.PopulateVariableLengthInfo(info, testContainer)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, offset, length, err := query.CalculateOffsetAndLength(info, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tt.expectedOffset, offset, "Expected offset to be %d", tt.expectedOffset)
|
||||
require.Equal(t, tt.expectedLength, length, "Expected length to be %d", tt.expectedLength)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRoundTripSszInfo(t *testing.T) {
|
||||
specs := []testutil.TestSpec{
|
||||
getFixedTestContainerSpec(),
|
||||
getVariableTestContainerSpec(),
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
@@ -107,7 +199,7 @@ func TestRoundTripSszInfo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func createFixedTestContainer() any {
|
||||
func createFixedTestContainer() *sszquerypb.FixedTestContainer {
|
||||
fieldBytes32 := make([]byte, 32)
|
||||
for i := range fieldBytes32 {
|
||||
fieldBytes32[i] = byte(i + 24)
|
||||
@@ -123,7 +215,7 @@ func createFixedTestContainer() any {
|
||||
trailingField[i] = byte(i + 88)
|
||||
}
|
||||
|
||||
return &ssz_query.FixedTestContainer{
|
||||
return &sszquerypb.FixedTestContainer{
|
||||
// Basic types
|
||||
FieldUint32: math.MaxUint32,
|
||||
FieldUint64: math.MaxUint64,
|
||||
@@ -141,13 +233,22 @@ func createFixedTestContainer() any {
|
||||
// Vector field
|
||||
VectorField: []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
|
||||
|
||||
// 2D bytes field
|
||||
TwoDimensionBytesField: [][]byte{
|
||||
make([]byte, 32),
|
||||
make([]byte, 32),
|
||||
make([]byte, 32),
|
||||
make([]byte, 32),
|
||||
make([]byte, 32),
|
||||
},
|
||||
|
||||
// Trailing field
|
||||
TrailingField: trailingField,
|
||||
}
|
||||
}
|
||||
|
||||
func getFixedTestContainerSpec() testutil.TestSpec {
|
||||
testContainer := createFixedTestContainer().(*sszquerypb.FixedTestContainer)
|
||||
testContainer := createFixedTestContainer()
|
||||
|
||||
return testutil.TestSpec{
|
||||
Name: "FixedTestContainer",
|
||||
@@ -190,6 +291,11 @@ func getFixedTestContainerSpec() testutil.TestSpec {
|
||||
Path: ".vector_field",
|
||||
Expected: testContainer.VectorField,
|
||||
},
|
||||
// 2D bytes field
|
||||
{
|
||||
Path: ".two_dimension_bytes_field",
|
||||
Expected: testContainer.TwoDimensionBytesField,
|
||||
},
|
||||
// Trailing field
|
||||
{
|
||||
Path: ".trailing_field",
|
||||
@@ -198,3 +304,100 @@ func getFixedTestContainerSpec() testutil.TestSpec {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createVariableTestContainer() *sszquerypb.VariableTestContainer {
|
||||
leadingField := make([]byte, 32)
|
||||
for i := range leadingField {
|
||||
leadingField[i] = byte(i + 100)
|
||||
}
|
||||
|
||||
trailingField := make([]byte, 56)
|
||||
for i := range trailingField {
|
||||
trailingField[i] = byte(i + 150)
|
||||
}
|
||||
|
||||
nestedContainers := make([]*sszquerypb.FixedNestedContainer, 3)
|
||||
for i := range nestedContainers {
|
||||
value2 := make([]byte, 32)
|
||||
for j := range value2 {
|
||||
value2[j] = byte(j + i*32)
|
||||
}
|
||||
nestedContainers[i] = &sszquerypb.FixedNestedContainer{
|
||||
Value1: uint64(1000 + i),
|
||||
Value2: value2,
|
||||
}
|
||||
}
|
||||
|
||||
return &sszquerypb.VariableTestContainer{
|
||||
// Fixed leading field
|
||||
LeadingField: leadingField,
|
||||
|
||||
// Variable-size lists
|
||||
FieldListUint64: []uint64{100, 200, 300, 400, 500},
|
||||
FieldListContainer: nestedContainers,
|
||||
FieldListBytes32: [][]byte{
|
||||
make([]byte, 32),
|
||||
make([]byte, 32),
|
||||
make([]byte, 32),
|
||||
},
|
||||
|
||||
// Variable nested container
|
||||
Nested: &sszquerypb.VariableNestedContainer{
|
||||
Value1: 42,
|
||||
FieldListUint64: []uint64{1, 2, 3, 4, 5},
|
||||
},
|
||||
|
||||
// Fixed trailing field
|
||||
TrailingField: trailingField,
|
||||
}
|
||||
}
|
||||
|
||||
func getVariableTestContainerSpec() testutil.TestSpec {
|
||||
testContainer := createVariableTestContainer()
|
||||
|
||||
return testutil.TestSpec{
|
||||
Name: "VariableTestContainer",
|
||||
Type: sszquerypb.VariableTestContainer{},
|
||||
Instance: testContainer,
|
||||
PathTests: []testutil.PathTest{
|
||||
// Fixed leading field
|
||||
{
|
||||
Path: ".leading_field",
|
||||
Expected: testContainer.LeadingField,
|
||||
},
|
||||
// Variable-size list of uint64
|
||||
{
|
||||
Path: ".field_list_uint64",
|
||||
Expected: testContainer.FieldListUint64,
|
||||
},
|
||||
// Variable-size list of (fixed-size) containers
|
||||
{
|
||||
Path: ".field_list_container",
|
||||
Expected: testContainer.FieldListContainer,
|
||||
},
|
||||
// Variable-size list of bytes32
|
||||
{
|
||||
Path: ".field_list_bytes32",
|
||||
Expected: testContainer.FieldListBytes32,
|
||||
},
|
||||
// Variable nested container with every path
|
||||
{
|
||||
Path: ".nested",
|
||||
Expected: testContainer.Nested,
|
||||
},
|
||||
{
|
||||
Path: ".nested.value1",
|
||||
Expected: testContainer.Nested.Value1,
|
||||
},
|
||||
{
|
||||
Path: ".nested.field_list_uint64",
|
||||
Expected: testContainer.Nested.FieldListUint64,
|
||||
},
|
||||
// Fixed trailing field
|
||||
{
|
||||
Path: ".trailing_field",
|
||||
Expected: testContainer.TrailingField,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -20,7 +20,13 @@ type sszInfo struct {
|
||||
fixedSize uint64
|
||||
|
||||
// For Container types.
|
||||
containerInfo containerInfo
|
||||
containerInfo *containerInfo
|
||||
|
||||
// For List types.
|
||||
listInfo *listInfo
|
||||
|
||||
// For Vector types.
|
||||
vectorInfo *vectorInfo
|
||||
}
|
||||
|
||||
func (info *sszInfo) FixedSize() uint64 {
|
||||
@@ -40,13 +46,33 @@ func (info *sszInfo) Size() uint64 {
|
||||
return info.fixedSize
|
||||
}
|
||||
|
||||
// NOTE: Handle variable-sized types.
|
||||
return 0
|
||||
switch info.sszType {
|
||||
case List:
|
||||
length := info.listInfo.length
|
||||
elementSize := info.listInfo.element.Size()
|
||||
|
||||
return length * elementSize
|
||||
|
||||
case Container:
|
||||
size := info.fixedSize
|
||||
for _, fieldInfo := range info.containerInfo.fields {
|
||||
if !fieldInfo.sszInfo.isVariable {
|
||||
continue
|
||||
}
|
||||
|
||||
size += fieldInfo.sszInfo.Size()
|
||||
}
|
||||
return size
|
||||
|
||||
default:
|
||||
// NOTE: Handle other variable-sized types.
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (info *sszInfo) ContainerInfo() (containerInfo, error) {
|
||||
func (info *sszInfo) ContainerInfo() (*containerInfo, error) {
|
||||
if info == nil {
|
||||
return nil, fmt.Errorf("sszInfo is nil")
|
||||
return nil, errors.New("sszInfo is nil")
|
||||
}
|
||||
|
||||
if info.sszType != Container {
|
||||
@@ -54,12 +80,59 @@ func (info *sszInfo) ContainerInfo() (containerInfo, error) {
|
||||
}
|
||||
|
||||
if info.containerInfo == nil {
|
||||
return nil, fmt.Errorf("sszInfo.containerInfo is nil")
|
||||
return nil, errors.New("sszInfo.containerInfo is nil")
|
||||
}
|
||||
|
||||
return info.containerInfo, nil
|
||||
}
|
||||
|
||||
func (info *sszInfo) ListInfo() (*listInfo, error) {
|
||||
if info == nil {
|
||||
return nil, errors.New("sszInfo is nil")
|
||||
}
|
||||
|
||||
if info.sszType != List {
|
||||
return nil, fmt.Errorf("sszInfo is not a List type, got %s", info.sszType)
|
||||
}
|
||||
|
||||
return info.listInfo, nil
|
||||
}
|
||||
|
||||
func (info *sszInfo) VectorInfo() (*vectorInfo, error) {
|
||||
if info == nil {
|
||||
return nil, errors.New("sszInfo is nil")
|
||||
}
|
||||
|
||||
if info.sszType != Vector {
|
||||
return nil, fmt.Errorf("sszInfo is not a Vector type, got %s", info.sszType)
|
||||
}
|
||||
|
||||
return info.vectorInfo, nil
|
||||
}
|
||||
|
||||
// String implements the Stringer interface for sszInfo.
|
||||
// This follows the notation used in the consensus specs.
|
||||
func (info *sszInfo) String() string {
|
||||
if info == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
switch info.sszType {
|
||||
case List:
|
||||
return fmt.Sprintf("List[%s, %d]", info.listInfo.element, info.listInfo.limit)
|
||||
case Vector:
|
||||
if info.vectorInfo.element.String() == "uint8" {
|
||||
// Handle byte vectors as BytesN
|
||||
// See Aliases section in SSZ spec:
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md#aliases
|
||||
return fmt.Sprintf("Bytes%d", info.vectorInfo.length)
|
||||
}
|
||||
return fmt.Sprintf("Vector[%s, %d]", info.vectorInfo.element, info.vectorInfo.length)
|
||||
default:
|
||||
return info.typ.Name()
|
||||
}
|
||||
}
|
||||
|
||||
// Print returns a string representation of the sszInfo, which is useful for debugging.
|
||||
func (info *sszInfo) Print() string {
|
||||
if info == nil {
|
||||
@@ -80,31 +153,29 @@ func printRecursive(info *sszInfo, builder *strings.Builder, prefix string) {
|
||||
|
||||
switch info.sszType {
|
||||
case Container:
|
||||
builder.WriteString(fmt.Sprintf("%s: %s (%s / fixed size: %d, total size: %d)\n", info.sszType, info.typ.Name(), sizeDesc, info.FixedSize(), info.Size()))
|
||||
builder.WriteString(fmt.Sprintf("%s (%s / fixed size: %d, total size: %d)\n", info, sizeDesc, info.FixedSize(), info.Size()))
|
||||
|
||||
for i, key := range info.containerInfo.order {
|
||||
connector := "├─"
|
||||
nextPrefix := prefix + "│ "
|
||||
if i == len(info.containerInfo.order)-1 {
|
||||
connector = "└─"
|
||||
nextPrefix = prefix + " "
|
||||
}
|
||||
|
||||
builder.WriteString(fmt.Sprintf("%s%s %s (offset: %d) ", prefix, connector, key, info.containerInfo.fields[key].offset))
|
||||
|
||||
if nestedInfo := info.containerInfo.fields[key].sszInfo; nestedInfo != nil {
|
||||
printRecursive(nestedInfo, builder, nextPrefix)
|
||||
} else {
|
||||
builder.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
case List:
|
||||
builder.WriteString(fmt.Sprintf("%s (%s / length: %d, size: %d)\n", info, sizeDesc, info.listInfo.length, info.Size()))
|
||||
|
||||
default:
|
||||
builder.WriteString(fmt.Sprintf("%s (%s / size: %d)\n", info.sszType, sizeDesc, info.Size()))
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(info.containerInfo))
|
||||
for k := range info.containerInfo {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for i, key := range keys {
|
||||
connector := "├─"
|
||||
nextPrefix := prefix + "│ "
|
||||
if i == len(keys)-1 {
|
||||
connector = "└─"
|
||||
nextPrefix = prefix + " "
|
||||
}
|
||||
|
||||
builder.WriteString(fmt.Sprintf("%s%s %s (offset: %d) ", prefix, connector, key, info.containerInfo[key].offset))
|
||||
|
||||
if nestedInfo := info.containerInfo[key].sszInfo; nestedInfo != nil {
|
||||
printRecursive(nestedInfo, builder, nextPrefix)
|
||||
} else {
|
||||
builder.WriteString("\n")
|
||||
}
|
||||
builder.WriteString(fmt.Sprintf("%s (%s / size: %d)\n", info, sizeDesc, info.Size()))
|
||||
}
|
||||
}
|
||||
|
||||
130
encoding/ssz/query/tag_parser.go
Normal file
130
encoding/ssz/query/tag_parser.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// sszMaxTag specifies the maximum capacity of a variable-sized collection, like an SSZ List.
|
||||
sszMaxTag = "ssz-max"
|
||||
|
||||
// sszSizeTag specifies the length of a fixed-sized collection, like an SSZ Vector.
|
||||
// A wildcard ('?') indicates that the dimension is variable-sized (a List).
|
||||
sszSizeTag = "ssz-size"
|
||||
)
|
||||
|
||||
// SSZDimension holds parsed SSZ tag information for current dimension.
|
||||
// Mutually exclusive fields indicate whether the dimension is a vector or a list.
|
||||
type SSZDimension struct {
|
||||
vectorLength *uint64
|
||||
listLimit *uint64
|
||||
}
|
||||
|
||||
// ParseSSZTag parses SSZ-specific tags (like `ssz-max` and `ssz-size`)
|
||||
// and returns the first dimension and the remaining SSZ tags.
|
||||
// This function validates the tags and returns an error if they are malformed.
|
||||
func ParseSSZTag(tag *reflect.StructTag) (*SSZDimension, *reflect.StructTag, error) {
|
||||
if tag == nil {
|
||||
return nil, nil, errors.New("nil struct tag")
|
||||
}
|
||||
|
||||
var newTagParts []string
|
||||
var sizeStr, maxStr string
|
||||
|
||||
// Parse ssz-size tag
|
||||
if sszSize := tag.Get(sszSizeTag); sszSize != "" {
|
||||
dims := strings.Split(sszSize, ",")
|
||||
if len(dims) > 0 {
|
||||
sizeStr = dims[0]
|
||||
|
||||
if len(dims) > 1 {
|
||||
remainingSize := strings.Join(dims[1:], ",")
|
||||
newTagParts = append(newTagParts, fmt.Sprintf(`%s:"%s"`, sszSizeTag, remainingSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse ssz-max tag
|
||||
if sszMax := tag.Get(sszMaxTag); sszMax != "" {
|
||||
dims := strings.Split(sszMax, ",")
|
||||
if len(dims) > 0 {
|
||||
maxStr = dims[0]
|
||||
|
||||
if len(dims) > 1 {
|
||||
remainingMax := strings.Join(dims[1:], ",")
|
||||
newTagParts = append(newTagParts, fmt.Sprintf(`%s:"%s"`, sszMaxTag, remainingMax))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create new tag with remaining dimensions only.
|
||||
// We don't have to preserve other tags like json, protobuf.
|
||||
var newTag *reflect.StructTag
|
||||
if len(newTagParts) > 0 {
|
||||
newTagStr := strings.Join(newTagParts, " ")
|
||||
t := reflect.StructTag(newTagStr)
|
||||
newTag = &t
|
||||
}
|
||||
|
||||
// Parse the first dimension based on ssz-size and ssz-max rules.
|
||||
// 1. If ssz-size is not specified (wildcard or empty), it must be a list.
|
||||
if sizeStr == "?" || sizeStr == "" {
|
||||
if maxStr == "?" {
|
||||
return nil, nil, errors.New("ssz-size and ssz-max cannot both be '?'")
|
||||
}
|
||||
if maxStr == "" {
|
||||
return nil, nil, errors.New("list requires ssz-max value")
|
||||
}
|
||||
|
||||
limit, err := strconv.ParseUint(maxStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid ssz-max value: %w", err)
|
||||
}
|
||||
if limit == 0 {
|
||||
return nil, nil, errors.New("ssz-max must be greater than 0")
|
||||
}
|
||||
|
||||
return &SSZDimension{listLimit: &limit}, newTag, nil
|
||||
}
|
||||
|
||||
// 2. If ssz-size is specified, it must be a vector.
|
||||
length, err := strconv.ParseUint(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid ssz-size value: %w", err)
|
||||
}
|
||||
if length == 0 {
|
||||
return nil, nil, errors.New("ssz-size must be greater than 0")
|
||||
}
|
||||
|
||||
return &SSZDimension{vectorLength: &length}, newTag, nil
|
||||
}
|
||||
|
||||
// IsVector returns true if this dimension represents a vector.
|
||||
func (d *SSZDimension) IsVector() bool {
|
||||
return d.vectorLength != nil
|
||||
}
|
||||
|
||||
// IsList returns true if this dimension represents a list.
|
||||
func (d *SSZDimension) IsList() bool {
|
||||
return d.listLimit != nil
|
||||
}
|
||||
|
||||
// GetVectorLength returns the length for a vector in current dimension
|
||||
func (d *SSZDimension) GetVectorLength() (uint64, error) {
|
||||
if !d.IsVector() {
|
||||
return 0, errors.New("not a vector dimension")
|
||||
}
|
||||
return *d.vectorLength, nil
|
||||
}
|
||||
|
||||
// GetListLimit returns the limit for a list in current dimension
|
||||
func (d *SSZDimension) GetListLimit() (uint64, error) {
|
||||
if !d.IsList() {
|
||||
return 0, errors.New("not a list dimension")
|
||||
}
|
||||
return *d.listLimit, nil
|
||||
}
|
||||
187
encoding/ssz/query/tag_parser_test.go
Normal file
187
encoding/ssz/query/tag_parser_test.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestParseSSZTag(t *testing.T) {
|
||||
tests := []struct {
|
||||
wantErr bool
|
||||
wantIsList bool
|
||||
wantIsVector bool
|
||||
wantListLimit uint64
|
||||
wantVectorLength uint64
|
||||
wantRemainingTag string
|
||||
tag string
|
||||
name string
|
||||
}{
|
||||
// Vector tests
|
||||
{
|
||||
name: "single dimension vector",
|
||||
tag: `ssz-size:"32"`,
|
||||
wantIsVector: true,
|
||||
wantVectorLength: 32,
|
||||
},
|
||||
{
|
||||
name: "multi-dimensional vector",
|
||||
tag: `ssz-size:"5,32"`,
|
||||
wantIsVector: true,
|
||||
wantVectorLength: 5,
|
||||
wantRemainingTag: `ssz-size:"32"`,
|
||||
},
|
||||
{
|
||||
name: "three-dimensional vector",
|
||||
tag: `ssz-size:"5,10,32"`,
|
||||
wantIsVector: true,
|
||||
wantVectorLength: 5,
|
||||
wantRemainingTag: `ssz-size:"10,32"`,
|
||||
},
|
||||
{
|
||||
name: "large vector",
|
||||
tag: `ssz-size:"1048576"`,
|
||||
wantIsVector: true,
|
||||
wantVectorLength: 1048576,
|
||||
},
|
||||
|
||||
// List tests
|
||||
{
|
||||
name: "single dimension list",
|
||||
tag: `ssz-max:"100"`,
|
||||
wantIsList: true,
|
||||
wantListLimit: 100,
|
||||
},
|
||||
{
|
||||
name: "multi-dimensional list",
|
||||
tag: `ssz-max:"100,200"`,
|
||||
wantIsList: true,
|
||||
wantListLimit: 100,
|
||||
wantRemainingTag: `ssz-max:"200"`,
|
||||
},
|
||||
{
|
||||
name: "large list",
|
||||
tag: `ssz-max:"1048576"`,
|
||||
wantIsList: true,
|
||||
wantListLimit: 1048576,
|
||||
},
|
||||
{
|
||||
name: "wildcard size becomes list",
|
||||
tag: `ssz-size:"?" ssz-max:"100"`,
|
||||
wantIsList: true,
|
||||
wantListLimit: 100,
|
||||
},
|
||||
{
|
||||
name: "wildcard with remaining dimensions",
|
||||
tag: `ssz-size:"?,32" ssz-max:"100"`,
|
||||
wantIsList: true,
|
||||
wantListLimit: 100,
|
||||
wantRemainingTag: `ssz-size:"32"`,
|
||||
},
|
||||
{
|
||||
name: "empty size becomes list",
|
||||
tag: `ssz-size:"" ssz-max:"100"`,
|
||||
wantIsList: true,
|
||||
wantListLimit: 100,
|
||||
},
|
||||
{
|
||||
name: "list of vectors",
|
||||
tag: `ssz-size:"?,32" ssz-max:"100"`,
|
||||
wantIsList: true,
|
||||
wantListLimit: 100,
|
||||
wantRemainingTag: `ssz-size:"32"`,
|
||||
},
|
||||
|
||||
// Error cases
|
||||
{
|
||||
name: "empty tag",
|
||||
tag: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "zero vector length",
|
||||
tag: `ssz-size:"0"`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "zero list limit",
|
||||
tag: `ssz-max:"0"`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid vector length",
|
||||
tag: `ssz-size:"abc"`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid list limit",
|
||||
tag: `ssz-max:"xyz"`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "both wildcard",
|
||||
tag: `ssz-size:"?" ssz-max:"?"`,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "list without max",
|
||||
tag: `ssz-size:"?"`,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var tag *reflect.StructTag
|
||||
if tt.tag != "" {
|
||||
structTag := reflect.StructTag(tt.tag)
|
||||
tag = &structTag
|
||||
}
|
||||
|
||||
dim, remainingTag, err := query.ParseSSZTag(tag)
|
||||
if tt.wantErr {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dim)
|
||||
|
||||
// Check dimension type
|
||||
require.Equal(t, tt.wantIsVector, dim.IsVector())
|
||||
require.Equal(t, tt.wantIsList, dim.IsList())
|
||||
|
||||
// Verify vector length if it's a vector
|
||||
if tt.wantIsVector {
|
||||
length, err := dim.GetVectorLength()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantVectorLength, length)
|
||||
|
||||
// Trying to get list limit should error
|
||||
_, err = dim.GetListLimit()
|
||||
require.NotNil(t, err)
|
||||
}
|
||||
|
||||
// Verify list limit if it's a list
|
||||
if tt.wantIsList {
|
||||
limit, err := dim.GetListLimit()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantListLimit, limit)
|
||||
|
||||
// Trying to get vector length should error
|
||||
_, err = dim.GetVectorLength()
|
||||
require.NotNil(t, err)
|
||||
}
|
||||
|
||||
// Check remaining tag
|
||||
if tt.wantRemainingTag == "" {
|
||||
require.Equal(t, remainingTag == nil, true)
|
||||
} else {
|
||||
require.NotNil(t, remainingTag)
|
||||
require.Equal(t, tt.wantRemainingTag, string(*remainingTag))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,9 @@ func RunStructTest(t *testing.T, spec TestSpec) {
|
||||
require.NoError(t, err)
|
||||
|
||||
testInstance := spec.Instance
|
||||
err = query.PopulateVariableLengthInfo(info, testInstance)
|
||||
require.NoError(t, err)
|
||||
|
||||
marshaller, ok := testInstance.(ssz.Marshaler)
|
||||
require.Equal(t, true, ok, "Test instance must implement ssz.Marshaler, got %T", testInstance)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user