mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 14:28:09 -05:00
Compare commits
1 Commits
hdiff_star
...
state-desi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c1edf3fe55 |
@@ -193,7 +193,6 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httpwriter:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
@@ -323,17 +323,14 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
if e == stateEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
// At this point either we know we are proposing on a future slot or we need to still compute the
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
@@ -341,7 +338,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch && !fuluAndNextEpoch {
|
||||
if e > stateEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
if c.Epoch < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -30,13 +30,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -45,7 +43,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch <= headEpoch {
|
||||
if c.Epoch == headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
||||
@@ -170,13 +170,12 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
@@ -198,13 +197,12 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
@@ -229,7 +227,6 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -238,9 +235,8 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
@@ -267,7 +263,6 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -275,8 +270,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
root: [32]byte{'T'},
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
@@ -293,13 +287,12 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
|
||||
@@ -295,6 +295,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
|
||||
@@ -820,6 +820,24 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
|
||||
@@ -290,3 +290,52 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -122,6 +122,24 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if hasBal {
|
||||
if bal > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -152,7 +152,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
log.WithError(err).Error("Could not update committee cache")
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
|
||||
return indices, nil
|
||||
|
||||
@@ -5,20 +5,10 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
cellsAndProofsFromStructuredComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cells_and_proofs_from_structured_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute cells and proofs from structured computation.",
|
||||
Buckets: []float64{10, 20, 30, 40, 50, 100, 200},
|
||||
},
|
||||
)
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ package peerdas
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -297,42 +296,32 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
return nil, nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
proofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, proofs)
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
@@ -340,55 +329,40 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, kzgProofs)
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
|
||||
@@ -182,6 +182,12 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sig := signed.Signature()
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -195,7 +201,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(rSet).Join(aSet)
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
|
||||
@@ -157,8 +157,9 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
|
||||
@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
|
||||
|
||||
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
|
||||
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
if oldestSlotFlagPtr != nil {
|
||||
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
|
||||
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
|
||||
if oldestEpoch < sn.blockRetention {
|
||||
sn.validOldestSlotPtr = oldestSlotFlagPtr
|
||||
} else {
|
||||
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).
|
||||
|
||||
@@ -128,9 +128,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
|
||||
|
||||
currentSlot := primitives.Slot(10000)
|
||||
currentFunc := func() primitives.Slot { return currentSlot }
|
||||
@@ -144,7 +141,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedCol primitives.Epoch
|
||||
name string
|
||||
input SyncNeeds
|
||||
current func() primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "basic initialization with no flags",
|
||||
@@ -178,13 +174,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(10)
|
||||
return &slot
|
||||
}(),
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
|
||||
@@ -214,9 +210,6 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "both blob retention flag and oldest slot set",
|
||||
blobRetentionFlag: minBlobEpochs + 5,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(100)
|
||||
return &slot
|
||||
@@ -239,27 +232,16 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedBlob: 5000,
|
||||
expectedCol: 5000,
|
||||
},
|
||||
{
|
||||
name: "regression for deneb start",
|
||||
blobRetentionFlag: 8212500,
|
||||
expectValidOldest: true,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
expectedBlob: 8212500,
|
||||
expectedCol: 8212500,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.current == nil {
|
||||
tc.current = currentFunc
|
||||
}
|
||||
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that current, deneb, fulu are set correctly
|
||||
require.Equal(t, currentSlot, result.current())
|
||||
|
||||
// Check retention calculations
|
||||
require.Equal(t, tc.expectedBlob, result.blobRetention)
|
||||
require.Equal(t, tc.expectedCol, result.colRetention)
|
||||
|
||||
@@ -515,11 +515,6 @@ func (dcs *DataColumnStorage) Clear() error {
|
||||
|
||||
// prune clean the cache, the filesystem and mutexes.
|
||||
func (dcs *DataColumnStorage) prune() {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
dataColumnPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
highestStoredEpoch := dcs.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
@@ -627,9 +622,6 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
// Create the SSZ encoded data column sidecars.
|
||||
var sszEncodedDataColumnSidecars []byte
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount := uint8(0)
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
@@ -676,9 +668,6 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
// Increment the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount++
|
||||
|
||||
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
@@ -703,12 +692,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -822,14 +808,10 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,15 +36,16 @@ var (
|
||||
})
|
||||
|
||||
// Data columns
|
||||
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_save_latency",
|
||||
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
|
||||
Buckets: []float64{10, 20, 30, 50, 100, 200, 500},
|
||||
Buckets: dataColumnBuckets,
|
||||
})
|
||||
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_get_latency",
|
||||
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
|
||||
Buckets: []float64{3, 5, 7, 9, 11, 13},
|
||||
Buckets: dataColumnBuckets,
|
||||
})
|
||||
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_pruned",
|
||||
@@ -58,16 +59,4 @@ var (
|
||||
Name: "data_column_disk_count",
|
||||
Help: "Approximate number of data columns in storage",
|
||||
})
|
||||
dataColumnFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_file_sync_latency",
|
||||
Help: "Latency of sync operations when saving data columns in milliseconds",
|
||||
})
|
||||
dataColumnBatchStoreCount = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_batch_store_count",
|
||||
Help: "Number of data columns stored in a batch",
|
||||
})
|
||||
dataColumnPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_prune_latency",
|
||||
Help: "Latency of data column prune operations in milliseconds",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -89,7 +89,6 @@ type NoHeadAccessDatabase interface {
|
||||
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
|
||||
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
|
||||
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SlotByBlockRoot(context.Context, [32]byte) (primitives.Slot, error)
|
||||
// State related methods.
|
||||
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
|
||||
SaveStates(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error
|
||||
@@ -97,7 +96,6 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
|
||||
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
|
||||
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
|
||||
SlotInDiffTree(primitives.Slot) (uint64, int, error)
|
||||
// Checkpoint operations.
|
||||
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
|
||||
@@ -32,7 +32,6 @@ go_library(
|
||||
"state_diff_helpers.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
"testing_helpers.go",
|
||||
"utils.go",
|
||||
"validated_checkpoint.go",
|
||||
"wss.go",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
dbIface "github.com/OffchainLabs/prysm/v7/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz/detect"
|
||||
"github.com/OffchainLabs/prysm/v7/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -43,15 +42,6 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
|
||||
if err := s.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block root")
|
||||
}
|
||||
|
||||
// Initialize state-diff if enabled and not yet initialized.
|
||||
if features.Get().EnableStateDiff && s.stateDiffCache == nil {
|
||||
if err := s.initializeStateDiff(0, genesisState); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize state diff for genesis")
|
||||
}
|
||||
log.Info("Initialized state-diff with genesis state")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -204,31 +204,11 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
}
|
||||
|
||||
if features.Get().EnableStateDiff {
|
||||
// Check if offset already exists (existing state-diff database).
|
||||
hasOffset, err := kv.hasStateDiffOffset()
|
||||
sdCache, err := newStateDiffCache(kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hasOffset {
|
||||
// Existing state-diff database - restarts not yet supported.
|
||||
return nil, errors.New("restarting with existing state-diff database not yet supported")
|
||||
}
|
||||
|
||||
// Check if this is a new database (no head block).
|
||||
headBlock, err := kv.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head block")
|
||||
}
|
||||
|
||||
if headBlock == nil {
|
||||
// New database - will be initialized later during checkpoint/genesis sync.
|
||||
// stateDiffCache stays nil until SaveOrigin or SaveGenesisData initializes it.
|
||||
log.Info("State-diff enabled: will be initialized during checkpoint or genesis sync")
|
||||
} else {
|
||||
// Existing database without state-diff - warn and disable feature.
|
||||
log.Warn("State-diff feature ignored: database was created without state-diff support")
|
||||
}
|
||||
kv.stateDiffCache = sdCache
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
|
||||
@@ -23,16 +23,6 @@ const (
|
||||
The data at level 0 is saved every 2**exponent[0] slots and always contains a full state snapshot that is used as a base for the delta saved at other levels.
|
||||
*/
|
||||
|
||||
// SlotInDiffTree returns whether the given slot is a saving point in the diff tree.
|
||||
// It it is, it also returns the offset and level in the tree.
|
||||
func (s *Store) SlotInDiffTree(slot primitives.Slot) (uint64, int, error) {
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return 0, -1, ErrSlotBeforeOffset
|
||||
}
|
||||
return offset, computeLevel(offset, slot), nil
|
||||
}
|
||||
|
||||
// saveStateByDiff takes a state and decides between saving a full state snapshot or a diff.
|
||||
func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.saveStateByDiff")
|
||||
@@ -43,10 +33,13 @@ func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconStat
|
||||
}
|
||||
|
||||
slot := st.Slot()
|
||||
offset, lvl, err := s.SlotInDiffTree(slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if slot is in diff tree")
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
// Find the level to save the state.
|
||||
lvl := computeLevel(offset, slot)
|
||||
if lvl == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -120,54 +119,6 @@ func (s *Store) getOffset() uint64 {
|
||||
return s.stateDiffCache.getOffset()
|
||||
}
|
||||
|
||||
// hasStateDiffOffset checks if the state-diff offset has been set in the database.
|
||||
// This is used to detect if an existing database has state-diff enabled.
|
||||
func (s *Store) hasStateDiffOffset() (bool, error) {
|
||||
var hasOffset bool
|
||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
hasOffset = bucket.Get(offsetKey) != nil
|
||||
return nil
|
||||
})
|
||||
return hasOffset, err
|
||||
}
|
||||
|
||||
// initializeStateDiff sets up the state-diff schema for a new database.
|
||||
// This should be called during checkpoint sync or genesis sync.
|
||||
func (s *Store) initializeStateDiff(slot primitives.Slot, initialState state.ReadOnlyBeaconState) error {
|
||||
// Write offset directly to the database (without using cache which doesn't exist yet).
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
|
||||
return bucket.Put(offsetKey, offsetBytes)
|
||||
})
|
||||
if err != nil {
|
||||
return pkgerrors.Wrap(err, "failed to set offset")
|
||||
}
|
||||
|
||||
// Create the state diff cache (this will read the offset from the database).
|
||||
sdCache, err := newStateDiffCache(s)
|
||||
if err != nil {
|
||||
return pkgerrors.Wrap(err, "failed to create state diff cache")
|
||||
}
|
||||
s.stateDiffCache = sdCache
|
||||
|
||||
// Save the initial state as a full snapshot.
|
||||
if err := s.saveFullSnapshot(initialState); err != nil {
|
||||
return pkgerrors.Wrap(err, "failed to save initial snapshot")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func keyForSnapshot(v int) ([]byte, error) {
|
||||
switch v {
|
||||
case version.Fulu:
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// InitStateDiffCacheForTesting initializes the state diff cache with the given offset.
|
||||
// This is intended for testing purposes when setting up state diff after database creation.
|
||||
// This file is only compiled when the "testing" build tag is set.
|
||||
func (s *Store) InitStateDiffCacheForTesting(offset uint64) error {
|
||||
// First, set the offset in the database.
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, offset)
|
||||
return bucket.Put([]byte("offset"), offsetBytes)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then create the state diff cache.
|
||||
sdCache, err := newStateDiffCache(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.stateDiffCache = sdCache
|
||||
return nil
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz/detect"
|
||||
@@ -112,13 +111,5 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
return errors.Wrap(err, "save finalized checkpoint")
|
||||
}
|
||||
|
||||
// Initialize state-diff if enabled and not yet initialized.
|
||||
if features.Get().EnableStateDiff && s.stateDiffCache == nil {
|
||||
if err := s.initializeStateDiff(state.Slot(), state); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize state diff")
|
||||
}
|
||||
log.WithField("slot", state.Slot()).Info("Initialized state-diff with checkpoint state")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -532,19 +532,12 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
|
||||
if len(result) != 0 {
|
||||
getBlobsV2Latency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,13 +27,6 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
getBlobsV2Latency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_blobs_v2_latency_milliseconds",
|
||||
Help: "Captures RPC latency for getBlobsV2 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
|
||||
@@ -204,9 +204,6 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client finality update map.
|
||||
@@ -226,8 +223,5 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
|
||||
@@ -958,13 +957,6 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
}
|
||||
}
|
||||
}
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash block")
|
||||
}
|
||||
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not verify block signature")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
|
||||
@@ -130,10 +130,6 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
@@ -242,14 +238,22 @@ func (s *Server) handleAttestationsElectra(
|
||||
},
|
||||
})
|
||||
|
||||
// Broadcast first using CommitteeId directly (fast path)
|
||||
// This matches gRPC behavior and avoids blocking on state fetching
|
||||
wantedEpoch := slots.ToEpoch(singleAtt.Data.Slot)
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get head validator indices")
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), singleAtt.CommitteeId, singleAtt.Data.Slot)
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, singleAtt); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, &server.IndexedError{
|
||||
Index: i,
|
||||
@@ -260,35 +264,17 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Save to pool after broadcast (slow path - requires state fetching)
|
||||
// Run in goroutine to avoid blocking the HTTP response
|
||||
go func() {
|
||||
for _, singleAtt := range validAttestations {
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(context.Background(), singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get target state for attestation")
|
||||
continue
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(context.Background(), targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get committee for attestation")
|
||||
continue
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if len(failedBroadcasts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -484,10 +470,6 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
var req structs.SubmitSyncCommitteeSignaturesRequest
|
||||
err := json.NewDecoder(r.Body).Decode(&req.Data)
|
||||
switch {
|
||||
@@ -729,7 +711,6 @@ func (s *Server) SubmitAttesterSlashingsV2(w http.ResponseWriter, r *http.Reques
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
v, err := version.FromString(versionHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -623,8 +622,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
AttestationStateFetcher: chainService,
|
||||
}
|
||||
@@ -657,7 +654,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -677,7 +673,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
@@ -798,7 +793,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -818,7 +812,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
@@ -868,27 +861,6 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
|
||||
})
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListVoluntaryExits(t *testing.T) {
|
||||
@@ -1085,19 +1057,14 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: chainService,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1122,19 +1089,14 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: chainService,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1158,18 +1120,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: chainService,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1192,13 +1149,7 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
s := &Server{}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[]")
|
||||
@@ -1215,13 +1166,7 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
s := &Server{}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -1234,26 +1179,6 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleSyncCommitteeMsg)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitSyncCommitteeSignatures(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
@@ -2187,33 +2112,6 @@ func TestSubmitAttesterSlashingsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid attester slashing", e.Message)
|
||||
})
|
||||
|
||||
t.Run("missing-version-header", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString(invalidAttesterSlashing)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
|
||||
// Intentionally do not set api.VersionHeader to verify missing header handling.
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, api.VersionHeader+" header is required", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
|
||||
@@ -654,10 +654,6 @@ func (m *futureSyncMockFetcher) StateBySlot(context.Context, primitives.Slot) (s
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func (m *futureSyncMockFetcher) StateByEpoch(context.Context, primitives.Epoch) (state.BeaconState, error) {
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func TestGetSyncCommittees_Future(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -40,7 +40,6 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
|
||||
@@ -116,7 +116,6 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
@@ -132,15 +131,12 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
chunkLength = ssz.MarshalUint64(chunkLength, uint64(len(updateSSZ)+4))
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
httputil.HandleError(w, "Could not write update SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -149,7 +145,6 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateJson, err := structs.LightClientUpdateFromConsensus(update)
|
||||
|
||||
@@ -132,7 +132,6 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if s.SyncChecker.Synced() && !optimistic {
|
||||
return
|
||||
|
||||
@@ -228,7 +228,7 @@ func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.
|
||||
}
|
||||
st, err := s.Stater.StateBySlot(r.Context(), nextEpochEnd)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, "Could not get state for epoch's starting slot: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil, false
|
||||
}
|
||||
return st, true
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
@@ -77,7 +78,6 @@ go_test(
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
rpchelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/shared"
|
||||
@@ -897,15 +898,20 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// For next epoch requests, we use the current epoch's state since committee
|
||||
// assignments for next epoch can be computed from current epoch's state.
|
||||
epochForState := requestedEpoch
|
||||
var startSlot primitives.Slot
|
||||
if requestedEpoch == nextEpoch {
|
||||
epochForState = currentEpoch
|
||||
startSlot, err = slots.EpochStart(currentEpoch)
|
||||
} else {
|
||||
startSlot, err = slots.EpochStart(requestedEpoch)
|
||||
}
|
||||
st, err := s.Stater.StateByEpoch(ctx, epochForState)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot from epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := s.Stater.StateBySlot(ctx, startSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1014,11 +1020,39 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
nextEpochLookahead = true
|
||||
}
|
||||
|
||||
st, err := s.Stater.StateByEpoch(ctx, requestedEpoch)
|
||||
epochStartSlot, err := slots.EpochStart(requestedEpoch)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot of epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var st state.BeaconState
|
||||
// if the requested epoch is new, use the head state and the next slot cache
|
||||
if requestedEpoch < currentEpoch {
|
||||
st, err = s.Stater.StateBySlot(ctx, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get state for slot %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
st, err = s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head state: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// Notice that even for Fulu requests for the next epoch, we are only advancing the state to the start of the current epoch.
|
||||
if st.Slot() < epochStartSlot {
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head root: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not process slots up to %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var assignments map[primitives.ValidatorIndex][]primitives.Slot
|
||||
if nextEpochLookahead {
|
||||
@@ -1069,8 +1103,7 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err = sortProposerDuties(duties); err != nil {
|
||||
httputil.HandleError(w, "Could not sort proposer duties: "+err.Error(), http.StatusInternalServerError)
|
||||
if !sortProposerDuties(w, duties) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1141,10 +1174,14 @@ func (s *Server) GetSyncCommitteeDuties(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
startingEpoch := min(requestedEpoch, currentEpoch)
|
||||
|
||||
st, err := s.Stater.StateByEpoch(ctx, startingEpoch)
|
||||
slot, err := slots.EpochStart(startingEpoch)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, "Could not get sync committee slot: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1290,7 +1327,7 @@ func (s *Server) GetLiveness(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
st, err = s.Stater.StateBySlot(ctx, epochEnd)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, "Could not get slot for requested epoch: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
participation, err = st.CurrentEpochParticipation()
|
||||
@@ -1410,20 +1447,22 @@ func syncCommitteeDutiesAndVals(
|
||||
return duties, vals, nil
|
||||
}
|
||||
|
||||
func sortProposerDuties(duties []*structs.ProposerDuty) error {
|
||||
var err error
|
||||
func sortProposerDuties(w http.ResponseWriter, duties []*structs.ProposerDuty) bool {
|
||||
ok := true
|
||||
sort.Slice(duties, func(i, j int) bool {
|
||||
si, parseErr := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
si, err := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
return false
|
||||
}
|
||||
sj, parseErr := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
sj, err := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
return false
|
||||
}
|
||||
return si < sj
|
||||
})
|
||||
return err
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
p2pmock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -2007,7 +2006,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
@@ -2186,7 +2184,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
}
|
||||
@@ -2227,62 +2224,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerDuties(t *testing.T) {
|
||||
@@ -2486,60 +2427,6 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
@@ -2570,7 +2457,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, st.SetNextSyncCommittee(nextCommittee))
|
||||
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, State: st}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2761,7 +2648,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
return newSyncPeriodSt
|
||||
}
|
||||
}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot, State: newSyncPeriodSt}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2842,7 +2729,8 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
slot, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
st2 := st.Copy()
|
||||
st2, err := util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st2.SetSlot(slot))
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
@@ -2856,7 +2744,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
State: st2,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st2},
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
@@ -2901,62 +2789,6 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareBeaconProposer(t *testing.T) {
|
||||
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -99,7 +98,6 @@ type Stater interface {
|
||||
State(ctx context.Context, id []byte) (state.BeaconState, error)
|
||||
StateRoot(ctx context.Context, id []byte) ([]byte, error)
|
||||
StateBySlot(ctx context.Context, slot primitives.Slot) (state.BeaconState, error)
|
||||
StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// BeaconDbStater is an implementation of Stater. It retrieves states from the beacon chain database.
|
||||
@@ -269,46 +267,6 @@ func (p *BeaconDbStater) StateBySlot(ctx context.Context, target primitives.Slot
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// StateByEpoch returns the state for the start of the requested epoch.
|
||||
// For current or next epoch, it uses the head state and next slot cache for efficiency.
|
||||
// For past epochs, it replays blocks from the most recent canonical state.
|
||||
func (p *BeaconDbStater) StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "statefetcher.StateByEpoch")
|
||||
defer span.End()
|
||||
|
||||
targetSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get epoch start slot")
|
||||
}
|
||||
|
||||
currentSlot := p.GenesisTimeFetcher.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// For past epochs, use the replay mechanism
|
||||
if epoch < currentEpoch {
|
||||
return p.StateBySlot(ctx, targetSlot)
|
||||
}
|
||||
|
||||
// For current or next epoch, use head state + next slot cache (much faster)
|
||||
headState, err := p.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
|
||||
// If head state is already at or past the target slot, return it
|
||||
if headState.Slot() >= targetSlot {
|
||||
return headState, nil
|
||||
}
|
||||
|
||||
// Process slots using the next slot cache
|
||||
headRoot := p.ChainInfoFetcher.CachedHeadRoot()
|
||||
st, err := transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot[:], targetSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", targetSlot)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (p *BeaconDbStater) headStateRoot(ctx context.Context) ([]byte, error) {
|
||||
b, err := p.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -444,111 +444,3 @@ func TestStateBySlot_AfterHeadSlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(101), st.Slot())
|
||||
}
|
||||
|
||||
func TestStateByEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
t.Run("current epoch uses head state", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 0
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
// Should return head state since it's already past epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("current epoch processes slots to epoch start", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 1
|
||||
// Current slot is 32 (epoch 1), so epoch 1 is current epoch
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := slotsPerEpoch // slot 32, epoch 1
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
// In real usage, the transition package handles this properly
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
// that can process slots (missing committees, etc.)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("past epoch uses replay", func(t *testing.T) {
|
||||
// Head is at epoch 2, requesting epoch 0 (past)
|
||||
headSlot := slotsPerEpoch * 2 // slot 64, epoch 2
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
pastEpochSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
mockReplayer := mockstategen.NewReplayerBuilder()
|
||||
mockReplayer.SetMockStateForSlot(pastEpochSt, 0)
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), st.Slot())
|
||||
})
|
||||
|
||||
t.Run("next epoch uses head state path", func(t *testing.T) {
|
||||
// Head is at slot 30 (epoch 0), requesting epoch 1 (next)
|
||||
// Current slot is 30 (epoch 0), so epoch 1 is next epoch
|
||||
headSlot := primitives.Slot(30)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("head state already at target slot returns immediately", func(t *testing.T) {
|
||||
// Head is at slot 32 (epoch 1 start), requesting epoch 1
|
||||
headSlot := slotsPerEpoch // slot 32
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("head state past target slot returns head state", func(t *testing.T) {
|
||||
// Head is at slot 40, requesting epoch 1 (starts at slot 32)
|
||||
headSlot := primitives.Slot(40)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
// Returns head state since it's already >= epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -52,27 +52,24 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
attCopy := att.Copy()
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
@@ -85,10 +82,6 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, singleAtt, singleAtt.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -105,17 +98,18 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
|
||||
singleAttCopy := singleAtt.Copy()
|
||||
att := singleAttCopy.ToAttestationElectra(committee)
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@ func TestProposeAttestation(t *testing.T) {
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
TimeFetcher: chainService,
|
||||
AttestationStateFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 999
|
||||
@@ -142,7 +141,6 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
@@ -151,37 +149,6 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
func TestProposeAttestation_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attesterServer.ProposeAttestation(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestProposeAttestationElectra_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := ðpb.SingleAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
}
|
||||
_, err := attesterServer.ProposeAttestationElectra(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestGetAttestationData_OK(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
|
||||
@@ -26,6 +26,5 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// MockStater is a fake implementation of lookup.Stater.
|
||||
@@ -15,7 +14,6 @@ type MockStater struct {
|
||||
StateProviderFunc func(ctx context.Context, stateId []byte) (state.BeaconState, error)
|
||||
BeaconStateRoot []byte
|
||||
StatesBySlot map[primitives.Slot]state.BeaconState
|
||||
StatesByEpoch map[primitives.Epoch]state.BeaconState
|
||||
StatesByRoot map[[32]byte]state.BeaconState
|
||||
CustomError error
|
||||
}
|
||||
@@ -45,22 +43,3 @@ func (m *MockStater) StateRoot(context.Context, []byte) ([]byte, error) {
|
||||
func (m *MockStater) StateBySlot(_ context.Context, s primitives.Slot) (state.BeaconState, error) {
|
||||
return m.StatesBySlot[s], nil
|
||||
}
|
||||
|
||||
// StateByEpoch --
|
||||
func (m *MockStater) StateByEpoch(_ context.Context, e primitives.Epoch) (state.BeaconState, error) {
|
||||
if m.CustomError != nil {
|
||||
return nil, m.CustomError
|
||||
}
|
||||
if m.StatesByEpoch != nil {
|
||||
return m.StatesByEpoch[e], nil
|
||||
}
|
||||
// Fall back to StatesBySlot if StatesByEpoch is not set
|
||||
slot, err := slots.EpochStart(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.StatesBySlot != nil {
|
||||
return m.StatesBySlot[slot], nil
|
||||
}
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
438
beacon-chain/state/state-design.md
Normal file
438
beacon-chain/state/state-design.md
Normal file
@@ -0,0 +1,438 @@
|
||||
# Beacon Chain State Design Document
|
||||
|
||||
## Package Structure
|
||||
|
||||
### `/beacon-chain/state`
|
||||
|
||||
This is the top-level state package that defines interfaces only. This package provides a clean API boundary for state access without implementation details. The package follows the interface segregation principle, breaking down access patterns into granular read-only and write-only interfaces (e.g., `ReadOnlyValidators`, `WriteOnlyValidators`, `ReadOnlyBalances`, `WriteOnlyBalances`).
|
||||
|
||||
### `/beacon-chain/state/state-native`
|
||||
|
||||
This package contains the actual `BeaconState` struct and all concrete implementations of the state interfaces. It also contains thread-safe getters and setters.
|
||||
|
||||
### `/beacon-chain/state/fieldtrie`
|
||||
|
||||
A dedicated package for field-level merkle trie functionality. The main component of the package is the `FieldTrie` struct which represents a particular field's merkle trie. The package contains several field trie operations such as recomputing tries, copying and transferring them.
|
||||
|
||||
### `/beacon-chain/state/stateutil`
|
||||
|
||||
Utility package whose main components are various state merkleization functions. Other things contained in this package include field trie helpers, the implementation of shared references, and validator tracking.
|
||||
|
||||
## Beacon State Architecture
|
||||
|
||||
### `BeaconState` Structure
|
||||
|
||||
The `BeaconState` is a single structure that supports all consensus versions. The value of the `version` field determines the active version of the state. It is used extensively throughout the codebase to determine which code path will be executed.
|
||||
|
||||
```go
|
||||
type BeaconState struct {
|
||||
version int
|
||||
id uint64 // Used for tracking states in multi-value slices
|
||||
|
||||
// Common fields (all versions)
|
||||
genesisTime uint64
|
||||
genesisValidatorsRoot [32]byte
|
||||
slot primitives.Slot
|
||||
// ...
|
||||
|
||||
// Phase0+ fields
|
||||
eth1Data *ethpb.Eth1Data
|
||||
eth1DataVotes []*ethpb.Eth1Data
|
||||
eth1DepositIndex uint64
|
||||
slashings []uint64
|
||||
previousEpochAttestations []*ethpb.PendingAttestation
|
||||
currentEpochAttestations []*ethpb.PendingAttestation
|
||||
// ...
|
||||
|
||||
// Altair+ fields
|
||||
previousEpochParticipation []byte
|
||||
currentEpochParticipation []byte
|
||||
currentSyncCommittee *ethpb.SyncCommittee
|
||||
nextSyncCommittee *ethpb.SyncCommittee
|
||||
// ...
|
||||
|
||||
// Fields for other forks...
|
||||
|
||||
// Internal state management
|
||||
lock sync.RWMutex
|
||||
dirtyFields map[types.FieldIndex]bool
|
||||
dirtyIndices map[types.FieldIndex][]uint64
|
||||
stateFieldLeaves map[types.FieldIndex]*fieldtrie.FieldTrie
|
||||
rebuildTrie map[types.FieldIndex]bool
|
||||
valMapHandler *stateutil.ValidatorMapHandler
|
||||
merkleLayers [][][]byte
|
||||
sharedFieldReferences map[types.FieldIndex]*stateutil.Reference
|
||||
}
|
||||
```
|
||||
|
||||
### Multi-Value Slices
|
||||
|
||||
Several large array fields of the state are implemented as multi-value slices. This is a specialized data structure that enables efficient sharing and modification of slices between states. A multi-value slice is preferred over a regular slice in scenarios where only a small fraction of the array is updated at a time. In such cases, using a multi-value slice results in fewer memory allocations because many values of the slice will be shared between states, whereas with a regular slice changing even a single item results in copying the full slice. Examples include `MultiValueBlockRoots` and `MultiValueBalances`.
|
||||
|
||||
**Example**
|
||||
```go
|
||||
// Create new multi-value slice
|
||||
mvBalances := NewMultiValueBalances([]uint64{32000000000, 32000000000, ...})
|
||||
|
||||
// Share across states
|
||||
state1.balancesMultiValue = mvBalances
|
||||
state2 := state1.Copy() // state2 shares the same mvBalances
|
||||
|
||||
// Modify in state2
|
||||
state2.UpdateBalancesAtIndex(0, 31000000000) // This doesn't create a new multi-value slice.
|
||||
|
||||
state1.BalanceAtIndex(0) // this returns 32000000000
|
||||
state2.BalanceAtIndex(0) // this returns 31000000000
|
||||
```
|
||||
|
||||
### Getters/Setters
|
||||
|
||||
All beacon state getters and setters follow a consistent pattern. All exported methods are protected from concurrent modification using read locks (for getters) or write locks (for setters).
|
||||
|
||||
**Getters**
|
||||
|
||||
Values are never returned directly from getters. A copy of the value is returned instead.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) Fork() *ethpb.Fork {
|
||||
if b.fork == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.forkVal()
|
||||
}
|
||||
|
||||
func (b *BeaconState) forkVal() *ethpb.Fork {
|
||||
if b.fork == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
prevVersion := make([]byte, len(b.fork.PreviousVersion))
|
||||
copy(prevVersion, b.fork.PreviousVersion)
|
||||
currVersion := make([]byte, len(b.fork.CurrentVersion))
|
||||
copy(currVersion, b.fork.CurrentVersion)
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: prevVersion,
|
||||
CurrentVersion: currVersion,
|
||||
Epoch: b.fork.Epoch,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
func (b *BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, error) {
|
||||
if b.version != version.Phase0 {
|
||||
return nil, errNotSupported("CurrentEpochAttestations", b.version)
|
||||
}
|
||||
|
||||
if b.currentEpochAttestations == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.currentEpochAttestationsVal(), nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) currentEpochAttestationsVal() []*ethpb.PendingAttestation {
|
||||
if b.currentEpochAttestations == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
res := make([]*ethpb.PendingAttestation, len(b.currentEpochAttestations))
|
||||
for i := range res {
|
||||
res[i] = b.currentEpochAttestations[i].Copy()
|
||||
}
|
||||
return res
|
||||
}
|
||||
```
|
||||
|
||||
In the case of fields backed by multi-value slices, the appropriate methods of the multi-value slice are invoked.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) StateRoots() [][]byte {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
roots := b.stateRootsVal()
|
||||
if roots == nil {
|
||||
return nil
|
||||
}
|
||||
return roots.Slice()
|
||||
}
|
||||
|
||||
func (b *BeaconState) stateRootsVal() customtypes.StateRoots {
|
||||
if b.stateRootsMultiValue == nil {
|
||||
return nil
|
||||
}
|
||||
return b.stateRootsMultiValue.Value(b)
|
||||
}
|
||||
|
||||
func (b *BeaconState) StateRootAtIndex(idx uint64) ([]byte, error) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if b.stateRootsMultiValue == nil {
|
||||
return nil, nil
|
||||
}
|
||||
r, err := b.stateRootsMultiValue.At(b, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r[:], nil
|
||||
}
|
||||
```
|
||||
|
||||
**Setters**
|
||||
|
||||
Whenever a beacon state field is set, it is marked as dirty. This is needed for hash tree root computation so that the cached merkle branch with the old value of the modified field is recomputed using the new value.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) SetSlot(val primitives.Slot) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.slot = val
|
||||
b.markFieldAsDirty(types.Slot)
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
Several fields of the state are shared between states through a `Reference` mechanism. These references are stored in `b.sharedFieldReferences`. Whenever a state is copied, the reference counter for each of these fields is incremented. When a new value for any of these fields is set, the counter for the existing reference is decremented and a new reference is created for that field.
|
||||
|
||||
```go
|
||||
type Reference struct {
|
||||
refs uint // Reference counter
|
||||
lock sync.RWMutex
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
func (b *BeaconState) SetCurrentParticipationBits(val []byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.version == version.Phase0 {
|
||||
return errNotSupported("SetCurrentParticipationBits", b.version)
|
||||
}
|
||||
|
||||
b.sharedFieldReferences[types.CurrentEpochParticipationBits].MinusRef()
|
||||
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1)
|
||||
|
||||
b.currentEpochParticipation = val
|
||||
b.markFieldAsDirty(types.CurrentEpochParticipationBits)
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
Updating a single value of an array field requires updating `b.dirtyIndices` to ensure the field trie for that particular field is properly recomputed.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) UpdateBalancesAtIndex(idx primitives.ValidatorIndex, val uint64) error {
|
||||
if err := b.balancesMultiValue.UpdateAt(b, uint64(idx), val); err != nil {
|
||||
return errors.Wrap(err, "could not update balances")
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.markFieldAsDirty(types.Balances)
|
||||
b.addDirtyIndices(types.Balances, []uint64{uint64(idx)})
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
As is the case with getters, for fields backed by multi-value slices the appropriate methods of the multi-value slice are invoked when updating field values. The multi-value slice keeps track of states internally, which means the `Reference` construct is unnecessary.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) SetStateRoots(val [][]byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.stateRootsMultiValue != nil {
|
||||
b.stateRootsMultiValue.Detach(b)
|
||||
}
|
||||
b.stateRootsMultiValue = NewMultiValueStateRoots(val)
|
||||
|
||||
b.markFieldAsDirty(types.StateRoots)
|
||||
b.rebuildTrie[types.StateRoots] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) error {
|
||||
if err := b.stateRootsMultiValue.UpdateAt(b, idx, stateRoot); err != nil {
|
||||
return errors.Wrap(err, "could not update state roots")
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.markFieldAsDirty(types.StateRoots)
|
||||
b.addDirtyIndices(types.StateRoots, []uint64{idx})
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Read-Only Validator
|
||||
|
||||
There are two ways in which validators can be accessed through getters. One approach is to use the methods `Validators` and `ValidatorAtIndex`, which return a copy of, respectively, the whole validator set or a particular validator. The other approach is to use the `ReadOnlyValidator` construct via the `ValidatorsReadOnly` and `ValidatorAtIndexReadOnly` methods. The `ReadOnlyValidator` structure is a wrapper around the protobuf validator. The advantage of using the read-only version is that no copy of the underlying validator is made, which helps with performance, especially when accessing a large number of validators (e.g. looping through the whole validator registry). Because the read-only wrapper exposes only getters, each of which returns a copy of the validator’s fields, it prevents accidental mutation of the underlying validator.
|
||||
|
||||
```go
|
||||
type ReadOnlyValidator struct {
|
||||
validator *ethpb.Validator
|
||||
}
|
||||
|
||||
// Only getter methods, no setters
|
||||
func (v *ReadOnlyValidator) PublicKey() [48]byte
|
||||
func (v *ReadOnlyValidator) EffectiveBalance() uint64
|
||||
func (v *ReadOnlyValidator) Slashed() bool
|
||||
// ... etc
|
||||
```
|
||||
|
||||
**Usage**
|
||||
```go
|
||||
// Returns ReadOnlyValidator
|
||||
validator, err := state.ValidatorAtIndexReadOnly(idx)
|
||||
|
||||
// Can read but not modify
|
||||
pubkey := validator.PublicKey()
|
||||
balance := validator.EffectiveBalance()
|
||||
|
||||
// To modify, must get mutable copy
|
||||
validatorCopy := validator.Copy() // Returns *ethpb.Validator
|
||||
validatorCopy.EffectiveBalance = newBalance
|
||||
state.UpdateValidatorAtIndex(idx, validatorCopy)
|
||||
```
|
||||
|
||||
## Field Trie System
|
||||
|
||||
For a few large arrays, such as block roots or the validator registry, hashing the state field at every slot would be very expensive. To avoid such re-hashing, the underlying merkle trie of the field is maintained and only the branch(es) corresponding to the changed index(es) are recomputed, instead of the whole trie.
|
||||
|
||||
Each state version has a list of active fields defined (e.g. `phase0Fields`, `altairFields`), which serves as the basis for field trie creation.
|
||||
|
||||
```go
|
||||
type FieldTrie struct {
|
||||
*sync.RWMutex
|
||||
reference *stateutil.Reference // The number of states this field trie is shared between
|
||||
fieldLayers [][]*[32]byte // Merkle trie layers
|
||||
field types.FieldIndex // Which field this field trie represents
|
||||
dataType types.DataType // Type of field's array
|
||||
length uint64 // Maximum number of elements
|
||||
numOfElems int // Number of elements
|
||||
isTransferred bool // Whether trie was transferred
|
||||
}
|
||||
```
|
||||
|
||||
The `DataType` enum indicates the type of the field's array. The possible values are:
|
||||
- `BasicArray`: Fixed-size arrays (e.g. `blockRoots`)
|
||||
- `CompositeArray`: Variable-size arrays (e.g. `validators`)
|
||||
- `CompressedArray`: Variable-size arrays that pack multiple elements per trie leaf (e.g. `balances` with 4 elements per leaf)
|
||||
|
||||
### Recomputing a Trie
|
||||
|
||||
To avoid recomputing the state root every time any value of the state changes, branches of field tries are not recomputed until the state root is actually needed. When it is necessary to recompute a trie, the `RecomputeTrie` function rebuilds the affected branches in the trie according to the provided changed indices. The changed indices of each field are tracked in the `dirtyIndices` field of the beacon state, which is a `map[types.FieldIndex][]uint64`. The recomputation algorithms for fixed-size and variable-size fields are different.
|
||||
|
||||
### Transferring a Trie
|
||||
|
||||
When it is expected that an older state won't need its trie for recomputation, its trie layers can be transferred directly to a new trie instead of copying them:
|
||||
|
||||
```go
|
||||
func (f *FieldTrie) TransferTrie() *FieldTrie {
|
||||
f.isTransferred = true
|
||||
nTrie := &FieldTrie{
|
||||
fieldLayers: f.fieldLayers, // Direct transfer, no copy
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
// ...
|
||||
}
|
||||
f.fieldLayers = nil // Zero out original
|
||||
return nTrie
|
||||
}
|
||||
```
|
||||
|
||||
The downside of this approach is that if it becomes necessary to access the older state's trie later on, the whole trie would have to be recreated since it is empty now. This is especially costly for the validator registry and that is why it is always copied instead.
|
||||
|
||||
```go
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
var newTrie *fieldtrie.FieldTrie
|
||||
// We choose to only copy the validator
|
||||
// trie as it is pretty expensive to regenerate.
|
||||
if index == types.Validators {
|
||||
newTrie = fTrie.CopyTrie()
|
||||
} else {
|
||||
newTrie = fTrie.TransferTrie()
|
||||
}
|
||||
fTrie.FieldReference().MinusRef()
|
||||
b.stateFieldLeaves[index] = newTrie
|
||||
fTrie = newTrie
|
||||
}
|
||||
```
|
||||
|
||||
## Hash Tree Root Computation
|
||||
|
||||
The `BeaconState` structure is not a protobuf object, so there are no SSZ-generated methods for its fields. For each state field, there exists a custom method that returns its root. One advantage of doing it this way is the possibility to have the most efficient implementation possible. As an example, when computing the root of the validator registry, validators are hashed in parallel and vectorized sha256 computation is utilized to speed up the computation.
|
||||
|
||||
```go
|
||||
func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
|
||||
// Exit early if no validators are provided.
|
||||
if len(validators) == 0 {
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
n := runtime.GOMAXPROCS(0)
|
||||
rootsSize := len(validators) * validatorFieldRoots
|
||||
groupSize := len(validators) / n
|
||||
roots := make([][32]byte, rootsSize)
|
||||
wg.Add(n - 1)
|
||||
for j := 0; j < n-1; j++ {
|
||||
go hashValidatorHelper(validators, roots, j, groupSize, &wg)
|
||||
}
|
||||
for i := (n - 1) * groupSize; i < len(validators); i++ {
|
||||
fRoots, err := ValidatorFieldRoots(validators[i])
|
||||
if err != nil {
|
||||
return [][32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
|
||||
}
|
||||
for k, root := range fRoots {
|
||||
roots[i*validatorFieldRoots+k] = root
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// A validator's tree can represented with a depth of 3. As log2(8) = 3
|
||||
// Using this property we can lay out all the individual fields of a
|
||||
// validator and hash them in single level using our vectorized routine.
|
||||
for range validatorTreeDepth {
|
||||
// Overwrite input lists as we are hashing by level
|
||||
// and only need the highest level to proceed.
|
||||
roots = htr.VectorizedSha256(roots)
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
```
|
||||
|
||||
Calculating the full state root is very expensive; therefore, it is done in a lazy fashion. Previously generated merkle layers are cached in the state and only merkle trie branches corresponding to dirty indices are regenerated before returning the final root.
|
||||
|
||||
```go
|
||||
func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconState.HashTreeRoot")
|
||||
defer span.End()
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
// When len(b.merkleLayers) > 0, this function returns immediately
|
||||
if err := b.initializeMerkleLayers(ctx); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
// Dirty fields are tracked in the beacon state through b.dirtyFields
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return bytesutil.ToBytes32(b.merkleLayers[len(b.merkleLayers)-1][0]), nil
|
||||
}
|
||||
```
|
||||
@@ -29,7 +29,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/sync/backfill/coverage:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -69,14 +68,11 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/testing:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
|
||||
@@ -5,13 +5,10 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -28,10 +25,6 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
s.migrationLock.Lock()
|
||||
defer s.migrationLock.Unlock()
|
||||
|
||||
if features.Get().EnableStateDiff {
|
||||
return s.migrateToColdHdiff(ctx, fRoot)
|
||||
}
|
||||
|
||||
s.finalizedInfo.lock.RLock()
|
||||
oldFSlot := s.finalizedInfo.slot
|
||||
s.finalizedInfo.lock.RUnlock()
|
||||
@@ -97,8 +90,21 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.beaconDB.HasState(ctx, aRoot) {
|
||||
s.migrateHotToCold(aRoot)
|
||||
// If you are migrating a state and its already part of the hot state cache saved to the db,
|
||||
// you can just remove it from the hot state cache as it becomes redundant.
|
||||
s.saveHotStateDB.lock.Lock()
|
||||
roots := s.saveHotStateDB.blockRootsOfSavedStates
|
||||
for i := range roots {
|
||||
if aRoot == roots[i] {
|
||||
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
|
||||
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
|
||||
// Break here is ok.
|
||||
break
|
||||
}
|
||||
}
|
||||
s.saveHotStateDB.lock.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -123,103 +129,3 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateToColdHdiff saves the state-diffs for slots that are in the state diff tree after finalization
|
||||
func (s *State) migrateToColdHdiff(ctx context.Context, fRoot [32]byte) error {
|
||||
s.finalizedInfo.lock.RLock()
|
||||
oldFSlot := s.finalizedInfo.slot
|
||||
s.finalizedInfo.lock.RUnlock()
|
||||
fSlot, err := s.beaconDB.SlotByBlockRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get slot by block root")
|
||||
}
|
||||
for slot := oldFSlot; slot < fSlot; slot++ {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
_, lvl, err := s.beaconDB.SlotInDiffTree(slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("could not determine if slot %d is in diff tree", slot)
|
||||
continue
|
||||
}
|
||||
if lvl == -1 {
|
||||
continue
|
||||
}
|
||||
// The state needs to be saved.
|
||||
// Try the epoch boundary cache first.
|
||||
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("could not get epoch boundary state for slot %d", slot)
|
||||
cached = nil
|
||||
exists = false
|
||||
}
|
||||
var aRoot [32]byte
|
||||
var aState state.BeaconState
|
||||
if exists {
|
||||
aRoot = cached.root
|
||||
aState = cached.state
|
||||
} else {
|
||||
_, roots, err := s.beaconDB.HighestRootsBelowSlot(ctx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Given the block has been finalized, the db should not have more than one block in a given slot.
|
||||
// We should error out when this happens.
|
||||
if len(roots) != 1 {
|
||||
return errUnknownBlock
|
||||
}
|
||||
aRoot = roots[0]
|
||||
// Different than the legacy MigrateToCold, we need to always get the state even if
|
||||
// the state exists in DB as part of the hot state db, because we need to process slots
|
||||
// to the state diff tree slots.
|
||||
aState, err = s.StateByRoot(ctx, aRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if s.beaconDB.HasState(ctx, aRoot) {
|
||||
s.migrateHotToCold(aRoot)
|
||||
continue
|
||||
}
|
||||
// advance slots to the target slot
|
||||
if aState.Slot() < slot {
|
||||
aState, err = transition.ProcessSlots(ctx, aState, slot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not process slots to slot %d", slot)
|
||||
}
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, aState, aRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(
|
||||
logrus.Fields{
|
||||
"slot": aState.Slot(),
|
||||
"root": fmt.Sprintf("%#x", aRoot),
|
||||
}).Info("Saved state in DB")
|
||||
}
|
||||
// Update finalized info in memory.
|
||||
fInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
s.SaveFinalizedState(fSlot, fRoot, fInfo.state)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *State) migrateHotToCold(aRoot [32]byte) {
|
||||
// If you are migrating a state and its already part of the hot state cache saved to the db,
|
||||
// you can just remove it from the hot state cache as it becomes redundant.
|
||||
s.saveHotStateDB.lock.Lock()
|
||||
roots := s.saveHotStateDB.blockRootsOfSavedStates
|
||||
for i := range roots {
|
||||
if aRoot == roots[i] {
|
||||
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
|
||||
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
|
||||
// Break here is ok.
|
||||
break
|
||||
}
|
||||
}
|
||||
s.saveHotStateDB.lock.Unlock()
|
||||
}
|
||||
|
||||
@@ -4,11 +4,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
|
||||
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -227,170 +224,3 @@ func TestMigrateToCold_ParallelCalls(t *testing.T) {
|
||||
assert.DeepEqual(t, [][32]byte{r7}, service.saveHotStateDB.blockRootsOfSavedStates, "Did not remove all saved hot state roots")
|
||||
require.LogsContain(t, hook, "Saved state in DB")
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Tests for migrateToColdHdiff (state diff migration)
|
||||
// =========================================================================
|
||||
|
||||
// setStateDiffExponents sets state diff exponents for testing.
|
||||
// Uses exponents [6, 5] which means:
|
||||
// - Level 0: Every 2^6 = 64 slots (full snapshot)
|
||||
// - Level 1: Every 2^5 = 32 slots (diff)
|
||||
func setStateDiffExponents() {
|
||||
globalFlags := flags.GlobalFlags{
|
||||
StateDiffExponents: []int{6, 5},
|
||||
}
|
||||
flags.Init(&globalFlags)
|
||||
}
|
||||
|
||||
// TestMigrateToColdHdiff_CanUpdateFinalizedInfo verifies that the migration
|
||||
// correctly updates finalized info when migrating to slots not in the diff tree.
|
||||
func TestMigrateToColdHdiff_CanUpdateFinalizedInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// Set exponents and create DB first (without EnableStateDiff flag).
|
||||
setStateDiffExponents()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
// Initialize the state diff cache via the method on *kv.Store (not in interface).
|
||||
require.NoError(t, beaconDB.(*kv.Store).InitStateDiffCacheForTesting(0))
|
||||
// Now enable the feature flag.
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
|
||||
defer resetCfg()
|
||||
service := New(beaconDB, doublylinkedtree.New())
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
|
||||
// Put genesis state in epoch boundary cache so migrateToColdHdiff doesn't need to retrieve from DB.
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(gRoot, beaconState))
|
||||
|
||||
// Set initial finalized info at genesis.
|
||||
service.finalizedInfo = &finalizedInfo{
|
||||
slot: 0,
|
||||
root: gRoot,
|
||||
state: beaconState,
|
||||
}
|
||||
|
||||
// Create finalized block at slot 10 (not in diff tree, so no intermediate states saved).
|
||||
finalizedState := beaconState.Copy()
|
||||
require.NoError(t, finalizedState.SetSlot(10))
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
fRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b)
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(fRoot, finalizedState))
|
||||
|
||||
require.NoError(t, service.MigrateToCold(ctx, fRoot))
|
||||
|
||||
// Verify finalized info is updated.
|
||||
assert.Equal(t, primitives.Slot(10), service.finalizedInfo.slot)
|
||||
assert.DeepEqual(t, fRoot, service.finalizedInfo.root)
|
||||
expectedHTR, err := finalizedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
actualHTR, err := service.finalizedInfo.state.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedHTR, actualHTR)
|
||||
}
|
||||
|
||||
// TestMigrateToColdHdiff_SkipsSlotsNotInDiffTree verifies that the migration
|
||||
// skips slots that are not in the diff tree.
|
||||
func TestMigrateToColdHdiff_SkipsSlotsNotInDiffTree(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := t.Context()
|
||||
// Set exponents and create DB first (without EnableStateDiff flag).
|
||||
setStateDiffExponents()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
// Initialize the state diff cache via the method on *kv.Store (not in interface).
|
||||
require.NoError(t, beaconDB.(*kv.Store).InitStateDiffCacheForTesting(0))
|
||||
// Now enable the feature flag.
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
|
||||
defer resetCfg()
|
||||
service := New(beaconDB, doublylinkedtree.New())
|
||||
|
||||
beaconState, pks := util.DeterministicGenesisState(t, 32)
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
|
||||
// Start from slot 1 to avoid slot 0 which is in the diff tree.
|
||||
service.finalizedInfo = &finalizedInfo{
|
||||
slot: 1,
|
||||
root: gRoot,
|
||||
state: beaconState,
|
||||
}
|
||||
|
||||
// Reset the log hook to ignore setup logs.
|
||||
hook.Reset()
|
||||
|
||||
// Create a block at slot 20 (NOT in diff tree with exponents [6,5]).
|
||||
b20, err := util.GenerateFullBlock(beaconState, pks, util.DefaultBlockGenConfig(), 20)
|
||||
require.NoError(t, err)
|
||||
r20, err := b20.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b20)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: 20, Root: r20[:]}))
|
||||
|
||||
// Put finalized state in cache.
|
||||
finalizedState := beaconState.Copy()
|
||||
require.NoError(t, finalizedState.SetSlot(20))
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(r20, finalizedState))
|
||||
|
||||
require.NoError(t, service.MigrateToCold(ctx, r20))
|
||||
|
||||
// Verify NO states were saved during migration (slots 1-19 are not in diff tree).
|
||||
assert.LogsDoNotContain(t, hook, "Saved state in DB")
|
||||
}
|
||||
|
||||
// TestMigrateToColdHdiff_NoOpWhenFinalizedSlotNotAdvanced verifies that
|
||||
// migration is a no-op when the finalized slot has not advanced.
|
||||
func TestMigrateToColdHdiff_NoOpWhenFinalizedSlotNotAdvanced(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// Set exponents and create DB first (without EnableStateDiff flag).
|
||||
setStateDiffExponents()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
// Initialize the state diff cache via the method on *kv.Store (not in interface).
|
||||
require.NoError(t, beaconDB.(*kv.Store).InitStateDiffCacheForTesting(0))
|
||||
// Now enable the feature flag.
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
|
||||
defer resetCfg()
|
||||
service := New(beaconDB, doublylinkedtree.New())
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
|
||||
// Set finalized info already at slot 50.
|
||||
finalizedState := beaconState.Copy()
|
||||
require.NoError(t, finalizedState.SetSlot(50))
|
||||
service.finalizedInfo = &finalizedInfo{
|
||||
slot: 50,
|
||||
root: gRoot,
|
||||
state: finalizedState,
|
||||
}
|
||||
|
||||
// Create block at same slot 50.
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 50
|
||||
fRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b)
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(fRoot, finalizedState))
|
||||
|
||||
// Migration should be a no-op (finalized slot not advancing).
|
||||
require.NoError(t, service.MigrateToCold(ctx, fRoot))
|
||||
}
|
||||
|
||||
@@ -161,17 +161,13 @@ func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
resChan := make(chan error)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
s.kzgChan <- verificationSet
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case s.kzgChan <- verificationSet:
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
|
||||
@@ -3,7 +3,6 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -244,10 +243,8 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
var lastRoot [fieldparams.RootLength]byte
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
lastRoot = root
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
@@ -262,7 +259,6 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingRootCount := len(missingIndicesByRoot)
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
@@ -305,19 +301,11 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingRootCount": initialMissingRootCount,
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingRootCount": len(missingIndicesByRoot),
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
})
|
||||
|
||||
if initialMissingRootCount == 1 {
|
||||
log = log.WithField("root", fmt.Sprintf("%#x", lastRoot))
|
||||
}
|
||||
|
||||
log.Debug("Requested direct data column sidecars from peers")
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
@@ -269,71 +268,6 @@ func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SecondsPerSlot = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-service.kzgChan:
|
||||
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -204,13 +204,6 @@ var (
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELAttempts = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_attempts",
|
||||
Help: "Count the number of data columns recovery attempts from the execution layer.",
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_total",
|
||||
@@ -249,13 +242,6 @@ var (
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnSidecarsObtainedViaELCount = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "data_column_obtained_via_el_count",
|
||||
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -3,9 +3,9 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
@@ -21,23 +21,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const pendingAttsLimit = 32768
|
||||
|
||||
// aggregatorIndexFilter defines how aggregator index should be handled in equality checks.
|
||||
type aggregatorIndexFilter int
|
||||
|
||||
const (
|
||||
// ignoreAggregatorIndex means aggregates differing only by aggregator index are considered equal.
|
||||
ignoreAggregatorIndex aggregatorIndexFilter = iota
|
||||
// includeAggregatorIndex means aggregator index must also match for aggregates to be considered equal.
|
||||
includeAggregatorIndex
|
||||
)
|
||||
var pendingAttsLimit = 32768
|
||||
|
||||
// This method processes pending attestations as a "known" block as arrived. With validations,
|
||||
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
|
||||
@@ -60,7 +50,16 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
s.processAttestations(ctx, attestations)
|
||||
if len(attestations) > 0 {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx, attestations)
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
"duration": duration,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
randGen := rand.NewGenerator()
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
@@ -80,71 +79,26 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
// processAttestations processes a list of attestations.
|
||||
// It assumes (for logging purposes only) that all attestations pertain to the same block.
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
if len(attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
firstAttestation := attestations[0]
|
||||
var blockRoot []byte
|
||||
switch v := firstAttestation.(type) {
|
||||
case ethpb.Att:
|
||||
blockRoot = v.GetData().BeaconBlockRoot
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
blockRoot = v.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping processing", v)
|
||||
return
|
||||
}
|
||||
|
||||
validAggregates := make([]ethpb.SignedAggregateAttAndProof, 0, len(attestations))
|
||||
startAggregate := time.Now()
|
||||
atts := make([]ethpb.Att, 0, len(attestations))
|
||||
aggregateAttAndProofCount := 0
|
||||
for _, att := range attestations {
|
||||
switch v := att.(type) {
|
||||
case ethpb.Att:
|
||||
atts = append(atts, v)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
aggregateAttAndProofCount++
|
||||
// Avoid processing multiple aggregates only differing by aggregator index.
|
||||
if slices.ContainsFunc(validAggregates, func(other ethpb.SignedAggregateAttAndProof) bool {
|
||||
return pendingAggregatesAreEqual(v, other, ignoreAggregatorIndex)
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.processAggregate(ctx, v); err != nil {
|
||||
log.WithError(err).Debug("Pending aggregate attestation could not be processed")
|
||||
continue
|
||||
}
|
||||
|
||||
validAggregates = append(validAggregates, v)
|
||||
s.processAggregate(ctx, v)
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping", v)
|
||||
}
|
||||
}
|
||||
durationAggregateAttAndProof := time.Since(startAggregate)
|
||||
|
||||
startAtts := time.Now()
|
||||
for _, bucket := range bucketAttestationsByData(atts) {
|
||||
s.processAttestationBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
durationAtts := time.Since(startAtts)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"totalCount": len(attestations),
|
||||
"aggregateAttAndProofCount": aggregateAttAndProofCount,
|
||||
"uniqueAggregateAttAndProofCount": len(validAggregates),
|
||||
"attCount": len(atts),
|
||||
"durationTotal": durationAggregateAttAndProof + durationAtts,
|
||||
"durationAggregateAttAndProof": durationAggregateAttAndProof,
|
||||
"durationAtts": durationAtts,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
// attestationBucket groups attestations with the same AttestationData for batch processing.
|
||||
@@ -349,20 +303,21 @@ func (s *Service) processVerifiedAttestation(
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) error {
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
res, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
return errors.Wrap(err, "validate aggregated att")
|
||||
return
|
||||
}
|
||||
|
||||
if res != pubsub.ValidationAccept || !s.validateBlockInAttestation(ctx, aggregate) {
|
||||
return errors.New("Pending aggregated attestation failed validation")
|
||||
log.Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
}
|
||||
|
||||
att := aggregate.AggregateAttestationAndProof().AggregateVal()
|
||||
if err := s.saveAttestation(att); err != nil {
|
||||
return errors.Wrap(err, "save attestation")
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
}
|
||||
|
||||
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
@@ -370,8 +325,6 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast aggregated attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
@@ -383,7 +336,7 @@ func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
|
||||
|
||||
s.savePending(root, agg, func(other any) bool {
|
||||
a, ok := other.(ethpb.SignedAggregateAttAndProof)
|
||||
return ok && pendingAggregatesAreEqual(agg, a, includeAggregatorIndex)
|
||||
return ok && pendingAggregatesAreEqual(agg, a)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -438,19 +391,13 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
|
||||
}
|
||||
|
||||
// pendingAggregatesAreEqual checks if two pending aggregate attestations are equal.
|
||||
// The filter parameter controls whether aggregator index is considered in the equality check.
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof, filter aggregatorIndexFilter) bool {
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
|
||||
if filter == includeAggregatorIndex {
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
|
||||
aAtt := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAtt := b.AggregateAttestationAndProof().AggregateVal()
|
||||
if aAtt.GetData().Slot != bAtt.GetData().Slot {
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
// Process block A (which exists and has no pending attestations)
|
||||
// This should skip processing attestations for A and request blocks B and C
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
|
||||
require.LogsContain(t, hook, "Requesting blocks by root")
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
@@ -911,17 +911,17 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProofElectra{Message: ðpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -942,7 +942,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -963,7 +963,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -984,30 +984,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1000},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index should be equal while ignoring aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, ignoreAggregatorIndex))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -43,13 +44,11 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
if !s.chainIsStarted() {
|
||||
return
|
||||
}
|
||||
|
||||
locker.Lock()
|
||||
defer locker.Unlock()
|
||||
|
||||
if err := s.processPendingBlocks(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Could not process pending blocks")
|
||||
}
|
||||
locker.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -74,10 +73,8 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
randGen := rand.NewGenerator()
|
||||
var parentRoots [][32]byte
|
||||
|
||||
blkRoots := make([][32]byte, 0, len(sortedSlots)*maxBlocksPerSlot)
|
||||
|
||||
// Iterate through sorted slots.
|
||||
for i, slot := range sortedSlots {
|
||||
for _, slot := range sortedSlots {
|
||||
// Skip processing if slot is in the future.
|
||||
if slot > s.cfg.clock.CurrentSlot() {
|
||||
continue
|
||||
@@ -94,9 +91,6 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
// Process each block in the queue.
|
||||
for _, b := range blocksInCache {
|
||||
start := time.Now()
|
||||
totalDuration := time.Duration(0)
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -153,34 +147,19 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
cancelFunction()
|
||||
|
||||
blkRoots = append(blkRoots, blkRoot)
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
|
||||
// Remove the processed block from the queue.
|
||||
if err := s.removeBlockFromQueue(b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
totalDuration += duration
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotIndex": fmt.Sprintf("%d/%d", i+1, len(sortedSlots)),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", blkRoot),
|
||||
"duration": duration,
|
||||
"totalDuration": totalDuration,
|
||||
}).Debug("Processed pending block and cleared it in cache")
|
||||
log.WithFields(logrus.Fields{"slot": slot, "blockRoot": hex.EncodeToString(bytesutil.Trunc(blkRoot[:]))}).Debug("Processed pending block and cleared it in cache")
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
|
||||
for _, blkRoot := range blkRoots {
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
}
|
||||
|
||||
return s.sendBatchRootRequest(ctx, parentRoots, randGen)
|
||||
}
|
||||
|
||||
@@ -400,19 +379,6 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
rootsStr := make([]string, 0, len(roots))
|
||||
for _, req := range roots {
|
||||
rootsStr = append(rootsStr, fmt.Sprintf("%#x", req))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"count": len(req),
|
||||
"roots": rootsStr,
|
||||
}).Debug("Requesting blocks by root")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -472,6 +438,8 @@ func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
@@ -189,30 +189,12 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
|
||||
defer cancel()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"type": source.Type(),
|
||||
})
|
||||
|
||||
var constructedSidecarCount uint64
|
||||
for iteration := uint64(0); ; /*no stop condition*/ iteration++ {
|
||||
log = log.WithField("iteration", iteration)
|
||||
|
||||
// Exit early if all sidecars to sample have been seen.
|
||||
if s.haveAllSidecarsBeenSeen(source.Slot(), source.ProposerIndex(), columnIndicesToSample) {
|
||||
if iteration > 0 && constructedSidecarCount == 0 {
|
||||
log.Debug("No data column sidecars constructed from the execution client")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if iteration == 0 {
|
||||
dataColumnsRecoveredFromELAttempts.Inc()
|
||||
}
|
||||
|
||||
// Try to reconstruct data column constructedSidecars from the execution client.
|
||||
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||
if err != nil {
|
||||
@@ -220,8 +202,8 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
}
|
||||
|
||||
// No sidecars are retrieved from the EL, retry later
|
||||
constructedSidecarCount = uint64(len(constructedSidecars))
|
||||
if constructedSidecarCount == 0 {
|
||||
sidecarCount := uint64(len(constructedSidecars))
|
||||
if sidecarCount == 0 {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -230,11 +212,9 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
continue
|
||||
}
|
||||
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
|
||||
// Boundary check.
|
||||
if constructedSidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", constructedSidecarCount, fieldparams.NumberOfColumns)
|
||||
if sidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", sidecarCount, fieldparams.NumberOfColumns)
|
||||
}
|
||||
|
||||
unseenIndices, err := s.broadcastAndReceiveUnseenDataColumnSidecars(ctx, source.Slot(), source.ProposerIndex(), columnIndicesToSample, constructedSidecars)
|
||||
@@ -242,12 +222,19 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
return nil, errors.Wrap(err, "broadcast and receive unseen data column sidecars")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
if len(unseenIndices) > 0 {
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
|
||||
dataColumnSidecarsObtainedViaELCount.Observe(float64(len(unseenIndices)))
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"iteration": iteration,
|
||||
"type": source.Type(),
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -51,12 +51,14 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
dcsc, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
|
||||
@@ -54,13 +54,11 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -103,10 +101,7 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
for v := 1; v < 6; v++ {
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
@@ -185,13 +180,11 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -234,10 +227,7 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
for v := 1; v < 6; v++ {
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -361,7 +361,7 @@ func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.
|
||||
}
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(errors.Wrapf(errSidecarParentNotSeen, "parent root: %#x", parentRoot))
|
||||
return columnErrBuilder(errSidecarParentNotSeen)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
## Fixed
|
||||
|
||||
- Fix missing return after version header check in SubmitAttesterSlashingsV2.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Prevent blocked sends to the KZG batch verifier when the caller context is already canceled, avoiding useless queueing and potential hangs.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fix deadlock in data column gossip KZG batch verification when a caller times out preventing result delivery.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- fixed replay state issue in rest api caused by attester and sync committee duties endpoints
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- e2e sync committee evaluator now skips the first slot after startup, we already skip the fork epoch for checks here, this skip only applies on startup, due to altair always from 0 and validators need to warm up.
|
||||
@@ -1,2 +0,0 @@
|
||||
#### Fixed
|
||||
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Changed
|
||||
- `validateDataColumn`: Remove error logs.
|
||||
@@ -1,7 +0,0 @@
|
||||
### Added
|
||||
- prometheus histogram `cells_and_proofs_from_structured_computation_milliseconds` to track computation time for cells and proofs from structured blobs.
|
||||
- prometheus histogram `get_blobs_v2_latency_milliseconds` to track RPC latency for `getBlobsV2` calls to the execution layer.
|
||||
|
||||
### Changed
|
||||
- Run `ComputeCellsAndProofsFromFlat` in parallel to improve performance when computing cells and proofs.
|
||||
- Run `ComputeCellsAndProofsFromStructured` in parallel to improve performance when computing cells and proofs.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Migrate to cold with the hdiff feature.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- D not send FCU on block batches.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Do not check block signature on state transition.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Do not error when committee has been computed correctly but updating the cache failed.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Extend `httperror` analyzer to more functions.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Static analyzer that ensures each `httputil.HandleError` call is followed by a `return` statement.
|
||||
3
changelog/radek_state-design-doc.md
Normal file
3
changelog/radek_state-design-doc.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add state design doc.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Use `WriteStateFetchError` in API handlers whenever possible.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175)
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Performance improvement in ProcessConsolidationRequests: Use more performance HasPendingBalanceToWithdraw instead of PendingBalanceToWithdraw as no need to calculate full total pending balance.
|
||||
@@ -156,7 +156,6 @@ var appFlags = []cli.Flag{
|
||||
dasFlags.BackfillOldestSlot,
|
||||
dasFlags.BlobRetentionEpochFlag,
|
||||
flags.BatchVerifierLimit,
|
||||
flags.StateDiffExponents,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -74,7 +74,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.RPCHost,
|
||||
flags.RPCPort,
|
||||
flags.BatchVerifierLimit,
|
||||
flags.StateDiffExponents,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -270,7 +270,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
DisableQUIC,
|
||||
EnableDiscoveryReboot,
|
||||
enableExperimentalAttestationPool,
|
||||
EnableStateDiff,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"basis_points.go",
|
||||
"builder_index.go",
|
||||
"committee_bits_mainnet.go",
|
||||
"committee_bits_minimal.go", # keep
|
||||
"committee_index.go",
|
||||
@@ -32,7 +31,6 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"builder_index_test.go",
|
||||
"committee_index_test.go",
|
||||
"domain_test.go",
|
||||
"epoch_test.go",
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package primitives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
var _ fssz.HashRoot = (BuilderIndex)(0)
|
||||
var _ fssz.Marshaler = (*BuilderIndex)(nil)
|
||||
var _ fssz.Unmarshaler = (*BuilderIndex)(nil)
|
||||
|
||||
// BuilderIndex is an index into the builder registry.
|
||||
type BuilderIndex uint64
|
||||
|
||||
// HashTreeRoot returns the SSZ hash tree root of the index.
|
||||
func (b BuilderIndex) HashTreeRoot() ([32]byte, error) {
|
||||
return fssz.HashWithDefaultHasher(b)
|
||||
}
|
||||
|
||||
// HashTreeRootWith appends the SSZ uint64 representation of the index to the given hasher.
|
||||
func (b BuilderIndex) HashTreeRootWith(hh *fssz.Hasher) error {
|
||||
hh.PutUint64(uint64(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalSSZ decodes the SSZ-encoded uint64 index from buf.
|
||||
func (b *BuilderIndex) UnmarshalSSZ(buf []byte) error {
|
||||
if len(buf) != b.SizeSSZ() {
|
||||
return fmt.Errorf("expected buffer of length %d received %d", b.SizeSSZ(), len(buf))
|
||||
}
|
||||
*b = BuilderIndex(fssz.UnmarshallUint64(buf))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalSSZTo appends the SSZ-encoded index to dst and returns the extended buffer.
|
||||
func (b *BuilderIndex) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
marshalled, err := b.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(dst, marshalled...), nil
|
||||
}
|
||||
|
||||
// MarshalSSZ encodes the index as an SSZ uint64.
|
||||
func (b *BuilderIndex) MarshalSSZ() ([]byte, error) {
|
||||
marshalled := fssz.MarshalUint64([]byte{}, uint64(*b))
|
||||
return marshalled, nil
|
||||
}
|
||||
|
||||
// SizeSSZ returns the size of the SSZ-encoded index in bytes.
|
||||
func (b *BuilderIndex) SizeSSZ() int {
|
||||
return 8
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package primitives_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"slices"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestBuilderIndex_SSZRoundTripAndHashRoot(t *testing.T) {
|
||||
cases := []uint64{
|
||||
0,
|
||||
1,
|
||||
42,
|
||||
(1 << 32) - 1,
|
||||
1 << 32,
|
||||
^uint64(0),
|
||||
}
|
||||
|
||||
for _, v := range cases {
|
||||
t.Run("v="+u64name(v), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
val := primitives.BuilderIndex(v)
|
||||
require.Equal(t, 8, (&val).SizeSSZ())
|
||||
|
||||
enc, err := (&val).MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 8, len(enc))
|
||||
|
||||
wantEnc := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(wantEnc, v)
|
||||
require.DeepEqual(t, wantEnc, enc)
|
||||
|
||||
dstPrefix := []byte("prefix:")
|
||||
dst, err := (&val).MarshalSSZTo(slices.Clone(dstPrefix))
|
||||
require.NoError(t, err)
|
||||
wantDst := append(dstPrefix, wantEnc...)
|
||||
require.DeepEqual(t, wantDst, dst)
|
||||
|
||||
var decoded primitives.BuilderIndex
|
||||
require.NoError(t, (&decoded).UnmarshalSSZ(enc))
|
||||
require.Equal(t, val, decoded)
|
||||
|
||||
root, err := val.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
var wantRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(wantRoot[:8], v)
|
||||
require.Equal(t, wantRoot, root)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderIndex_UnmarshalSSZRejectsWrongSize(t *testing.T) {
|
||||
for _, size := range []int{7, 9} {
|
||||
t.Run("size="+strconv.Itoa(size), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var v primitives.BuilderIndex
|
||||
err := (&v).UnmarshalSSZ(make([]byte, size))
|
||||
require.ErrorContains(t, "expected buffer of length 8", err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func u64name(v uint64) string {
|
||||
switch v {
|
||||
case 0:
|
||||
return "0"
|
||||
case 1:
|
||||
return "1"
|
||||
case 42:
|
||||
return "42"
|
||||
case (1 << 32) - 1:
|
||||
return "2^32-1"
|
||||
case 1 << 32:
|
||||
return "2^32"
|
||||
case ^uint64(0):
|
||||
return "max"
|
||||
default:
|
||||
return "custom"
|
||||
}
|
||||
}
|
||||
@@ -43,7 +43,6 @@ func WriteJson(w http.ResponseWriter, v any) {
|
||||
func WriteSsz(w http.ResponseWriter, respSsz []byte) {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", api.OctetStreamMediaType)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
|
||||
log.WithError(err).Error("Could not write response message")
|
||||
}
|
||||
|
||||
@@ -95,7 +95,6 @@ go_test(
|
||||
"endtoend_setup_test.go",
|
||||
"endtoend_test.go",
|
||||
"minimal_e2e_test.go",
|
||||
"minimal_hdiff_e2e_test.go",
|
||||
"minimal_slashing_e2e_test.go",
|
||||
"slasher_simulator_e2e_test.go",
|
||||
],
|
||||
|
||||
@@ -262,11 +262,6 @@ func validatorsSyncParticipation(_ *types.EvaluationContext, conns ...*grpc.Clie
|
||||
// Skip fork slot.
|
||||
continue
|
||||
}
|
||||
// Skip slot 1 at genesis - validators need time to ramp up after chain start.
|
||||
// This is a startup timing issue, not a fork transition issue.
|
||||
if b.Block().Slot() == 1 {
|
||||
continue
|
||||
}
|
||||
expectedParticipation := expectedSyncParticipation
|
||||
switch slots.ToEpoch(b.Block().Slot()) {
|
||||
case params.BeaconConfig().AltairForkEpoch:
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package endtoend
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/types"
|
||||
)
|
||||
|
||||
func TestEndToEnd_MinimalConfig_WithStateDiff(t *testing.T) {
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()),
|
||||
types.WithStateDiff(),
|
||||
)
|
||||
r.run()
|
||||
}
|
||||
@@ -67,15 +67,6 @@ func WithSSZOnly() E2EConfigOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithStateDiff() E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
cfg.BeaconFlags = append(cfg.BeaconFlags,
|
||||
"--enable-state-diff",
|
||||
"--state-diff-exponents=6,5", // Small exponents for quick testing
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// E2EConfig defines the struct for all configurations needed for E2E testing.
|
||||
type E2EConfig struct {
|
||||
TestCheckpointSync bool
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["analyzer.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httpwriter",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_x_tools//go/analysis:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
|
||||
"@org_golang_x_tools//go/ast/inspector:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,165 +0,0 @@
|
||||
package httpwriter
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/passes/inspect"
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
)
|
||||
|
||||
var Analyzer = &analysis.Analyzer{
|
||||
Name: "httpwriter",
|
||||
Doc: "Ensures that httputil functions which make use of the writer are immediately followed by a return statement.",
|
||||
Requires: []*analysis.Analyzer{
|
||||
inspect.Analyzer,
|
||||
},
|
||||
}
|
||||
|
||||
func Run(pass *analysis.Pass) (any, error) {
|
||||
ins := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
|
||||
|
||||
ins.Preorder([]ast.Node{(*ast.FuncDecl)(nil)}, func(n ast.Node) {
|
||||
fn := n.(*ast.FuncDecl)
|
||||
if fn.Body == nil {
|
||||
return
|
||||
}
|
||||
// top-level: there is no next statement after the function body
|
||||
checkBlock(pass, fn, fn.Body, nil)
|
||||
})
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
Analyzer.Run = Run
|
||||
}
|
||||
|
||||
// checkBlock inspects all statements inside block.
|
||||
// nextAfter is the statement that comes immediately *after* the statement that contains this block,
|
||||
// at the enclosing lexical level (or nil if none).
|
||||
// Example: for `if ... { ... } ; return`, when checking the if's body the nextAfter will be the top-level return stmt.
|
||||
func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nextAfter ast.Stmt) {
|
||||
stmts := block.List
|
||||
for i, stmt := range stmts {
|
||||
// compute the next statement after this statement at the same lexical (ancestor) level:
|
||||
// if there is a sibling, use it; otherwise propagate the nextAfter we were given.
|
||||
var nextForThisStmt ast.Stmt
|
||||
if i+1 < len(stmts) {
|
||||
nextForThisStmt = stmts[i+1]
|
||||
} else {
|
||||
nextForThisStmt = nextAfter
|
||||
}
|
||||
|
||||
// Recurse into nested blocks BEFORE checking this stmt's own expr,
|
||||
// but we must pass nextForThisStmt to nested blocks so nested HandleError
|
||||
// will see the correct "next statement after this statement".
|
||||
switch s := stmt.(type) {
|
||||
case *ast.IfStmt:
|
||||
// pass what's next after the whole if-statement down into its bodies
|
||||
if s.Init != nil {
|
||||
// init is a statement (rare), treat it as contained in s; it should use next being the if's body first stmt,
|
||||
// but for our purposes we don't need to inspect s.Init specially beyond nested calls.
|
||||
// We'll just check it with nextForThisStmt as well.
|
||||
checkBlock(pass, fn, &ast.BlockStmt{List: []ast.Stmt{s.Init}}, nextForThisStmt)
|
||||
}
|
||||
if s.Body != nil {
|
||||
checkBlock(pass, fn, s.Body, nextForThisStmt)
|
||||
}
|
||||
if s.Else != nil {
|
||||
switch els := s.Else.(type) {
|
||||
case *ast.BlockStmt:
|
||||
checkBlock(pass, fn, els, nextForThisStmt)
|
||||
case *ast.IfStmt:
|
||||
// else-if: its body will receive the same nextForThisStmt
|
||||
checkBlock(pass, fn, els.Body, nextForThisStmt)
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.ForStmt:
|
||||
if s.Init != nil {
|
||||
checkBlock(pass, fn, &ast.BlockStmt{List: []ast.Stmt{s.Init}}, nextForThisStmt)
|
||||
}
|
||||
if s.Body != nil {
|
||||
checkBlock(pass, fn, s.Body, nextForThisStmt)
|
||||
}
|
||||
if s.Post != nil {
|
||||
checkBlock(pass, fn, &ast.BlockStmt{List: []ast.Stmt{s.Post}}, nextForThisStmt)
|
||||
}
|
||||
|
||||
case *ast.RangeStmt:
|
||||
if s.Body != nil {
|
||||
checkBlock(pass, fn, s.Body, nextForThisStmt)
|
||||
}
|
||||
|
||||
case *ast.BlockStmt:
|
||||
// nested block (e.g. anonymous block) — propagate nextForThisStmt
|
||||
checkBlock(pass, fn, s, nextForThisStmt)
|
||||
}
|
||||
|
||||
// Now check the current statement itself: is it (or does it contain) a direct call to httputil.HandleError?
|
||||
// We only consider ExprStmt that are direct CallExpr to httputil.HandleError.
|
||||
call, name := findHandleErrorCall(stmt)
|
||||
if call == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine the actual "next statement after this call" in lexical function order:
|
||||
// - If there is a sibling in the same block after this stmt, that's next.
|
||||
// - Otherwise, next is nextForThisStmt (propagated from ancestor).
|
||||
var nextStmt ast.Stmt
|
||||
if i+1 < len(stmts) {
|
||||
nextStmt = stmts[i+1]
|
||||
} else {
|
||||
nextStmt = nextAfter
|
||||
}
|
||||
|
||||
// If there is a next statement and it's a return -> OK
|
||||
if nextStmt != nil {
|
||||
if _, ok := nextStmt.(*ast.ReturnStmt); ok {
|
||||
// immediately followed (in lexical order) by a return at some nesting level -> OK
|
||||
continue
|
||||
}
|
||||
// otherwise it's not a return (even if it's an if/for etc) -> violation
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
|
||||
continue
|
||||
}
|
||||
|
||||
// If nextStmt == nil, this call is lexically the last statement in the function.
|
||||
// That is allowed only if the function has no result values.
|
||||
if fn.Type.Results == nil || len(fn.Type.Results.List) == 0 {
|
||||
// void function: allowed
|
||||
continue
|
||||
}
|
||||
|
||||
// Non-void function and it's the last statement → violation
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
|
||||
}
|
||||
}
|
||||
|
||||
// findHandleErrorCall returns the call expression if stmt is a direct call to httputil.HandleError(...),
|
||||
// otherwise nil. We only match direct ExprStmt -> CallExpr -> SelectorExpr where selector is httputil.HandleError.
|
||||
func findHandleErrorCall(stmt ast.Stmt) (*ast.CallExpr, string) {
|
||||
es, ok := stmt.(*ast.ExprStmt)
|
||||
if !ok {
|
||||
return nil, ""
|
||||
}
|
||||
call, ok := es.X.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return nil, ""
|
||||
}
|
||||
sel, ok := call.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return nil, ""
|
||||
}
|
||||
pkgIdent, ok := sel.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return nil, ""
|
||||
}
|
||||
selectorName := sel.Sel.Name
|
||||
if pkgIdent.Name == "httputil" &&
|
||||
(selectorName == "HandleError" || selectorName == "WriteError" || selectorName == "WriteJson" || selectorName == "WriteSSZ") {
|
||||
return call, selectorName
|
||||
}
|
||||
return nil, ""
|
||||
}
|
||||
Reference in New Issue
Block a user