mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
16 Commits
remove-dir
...
revert-161
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ffe3cdfead | ||
|
|
d4ea8fafd6 | ||
|
|
07d1d6bdf9 | ||
|
|
f938da99d9 | ||
|
|
9deec69cc7 | ||
|
|
2767f08f4d | ||
|
|
d46c620783 | ||
|
|
dd05e44ef3 | ||
|
|
9da36a5de6 | ||
|
|
7950a24926 | ||
|
|
ea51253be9 | ||
|
|
2ac30f5ce6 | ||
|
|
7418c00ad6 | ||
|
|
66342655fd | ||
|
|
18eca953c1 | ||
|
|
8191bb5711 |
@@ -193,7 +193,7 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httperror:go_default_library",
|
||||
"//tools/analyzers/httpwriter:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
@@ -323,14 +323,17 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
if e == stateEpoch {
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
// At this point either we know we are proposing on a future slot or we need to still compute the
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
@@ -338,7 +341,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch {
|
||||
if e > stateEpoch && !fuluAndNextEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch || c.Epoch == 0 {
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -30,11 +30,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -43,7 +45,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch == headEpoch {
|
||||
if c.Epoch <= headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
||||
@@ -170,12 +170,13 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
@@ -197,12 +198,13 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
@@ -227,6 +229,7 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -235,8 +238,9 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
state: s,
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
@@ -263,6 +267,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -270,7 +275,8 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
@@ -287,12 +293,13 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
|
||||
@@ -295,14 +295,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
|
||||
@@ -820,24 +820,6 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
|
||||
@@ -290,52 +290,3 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -122,24 +122,6 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -5,10 +5,20 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
var (
|
||||
dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
cellsAndProofsFromStructuredComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cells_and_proofs_from_structured_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute cells and proofs from structured computation.",
|
||||
Buckets: []float64{10, 20, 30, 40, 50, 100, 200},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ package peerdas
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -296,32 +297,42 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
return nil, nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, proofs)
|
||||
proofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
@@ -329,40 +340,55 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, kzgProofs)
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
|
||||
@@ -182,12 +182,6 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sig := signed.Signature()
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -201,7 +195,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
|
||||
@@ -157,9 +157,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
|
||||
@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
|
||||
|
||||
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
|
||||
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
if oldestSlotFlagPtr != nil {
|
||||
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
|
||||
if oldestEpoch < sn.blockRetention {
|
||||
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
|
||||
sn.validOldestSlotPtr = oldestSlotFlagPtr
|
||||
} else {
|
||||
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).
|
||||
|
||||
@@ -128,6 +128,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
|
||||
|
||||
currentSlot := primitives.Slot(10000)
|
||||
currentFunc := func() primitives.Slot { return currentSlot }
|
||||
@@ -141,6 +144,7 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedCol primitives.Epoch
|
||||
name string
|
||||
input SyncNeeds
|
||||
current func() primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "basic initialization with no flags",
|
||||
@@ -174,13 +178,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(10)
|
||||
return &slot
|
||||
}(),
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
|
||||
@@ -210,6 +214,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "both blob retention flag and oldest slot set",
|
||||
blobRetentionFlag: minBlobEpochs + 5,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(100)
|
||||
return &slot
|
||||
@@ -232,16 +239,27 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedBlob: 5000,
|
||||
expectedCol: 5000,
|
||||
},
|
||||
{
|
||||
name: "regression for deneb start",
|
||||
blobRetentionFlag: 8212500,
|
||||
expectValidOldest: true,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
expectedBlob: 8212500,
|
||||
expectedCol: 8212500,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
if tc.current == nil {
|
||||
tc.current = currentFunc
|
||||
}
|
||||
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that current, deneb, fulu are set correctly
|
||||
require.Equal(t, currentSlot, result.current())
|
||||
|
||||
// Check retention calculations
|
||||
require.Equal(t, tc.expectedBlob, result.blobRetention)
|
||||
require.Equal(t, tc.expectedCol, result.colRetention)
|
||||
|
||||
@@ -515,6 +515,11 @@ func (dcs *DataColumnStorage) Clear() error {
|
||||
|
||||
// prune clean the cache, the filesystem and mutexes.
|
||||
func (dcs *DataColumnStorage) prune() {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
dataColumnPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
highestStoredEpoch := dcs.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
@@ -622,6 +627,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
// Create the SSZ encoded data column sidecars.
|
||||
var sszEncodedDataColumnSidecars []byte
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount := uint8(0)
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
@@ -668,6 +676,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
// Increment the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount++
|
||||
|
||||
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
@@ -692,9 +703,12 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -808,10 +822,14 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,16 +36,15 @@ var (
|
||||
})
|
||||
|
||||
// Data columns
|
||||
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_save_latency",
|
||||
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{10, 20, 30, 50, 100, 200, 500},
|
||||
})
|
||||
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_get_latency",
|
||||
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{3, 5, 7, 9, 11, 13},
|
||||
})
|
||||
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_pruned",
|
||||
@@ -59,4 +58,16 @@ var (
|
||||
Name: "data_column_disk_count",
|
||||
Help: "Approximate number of data columns in storage",
|
||||
})
|
||||
dataColumnFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_file_sync_latency",
|
||||
Help: "Latency of sync operations when saving data columns in milliseconds",
|
||||
})
|
||||
dataColumnBatchStoreCount = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_batch_store_count",
|
||||
Help: "Number of data columns stored in a batch",
|
||||
})
|
||||
dataColumnPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_prune_latency",
|
||||
Help: "Latency of data column prune operations in milliseconds",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -532,12 +532,19 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
|
||||
if len(result) != 0 {
|
||||
getBlobsV2Latency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,13 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
getBlobsV2Latency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_blobs_v2_latency_milliseconds",
|
||||
Help: "Captures RPC latency for getBlobsV2 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
|
||||
@@ -204,6 +204,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client finality update map.
|
||||
@@ -223,5 +226,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
|
||||
@@ -957,6 +958,13 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
}
|
||||
}
|
||||
}
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash block")
|
||||
}
|
||||
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not verify block signature")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
|
||||
@@ -130,6 +130,10 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
@@ -238,22 +242,14 @@ func (s *Server) handleAttestationsElectra(
|
||||
},
|
||||
})
|
||||
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
// Broadcast first using CommitteeId directly (fast path)
|
||||
// This matches gRPC behavior and avoids blocking on state fetching
|
||||
wantedEpoch := slots.ToEpoch(singleAtt.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get head validator indices")
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), singleAtt.CommitteeId, singleAtt.Data.Slot)
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, singleAtt); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, &server.IndexedError{
|
||||
Index: i,
|
||||
@@ -264,17 +260,35 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
// Save to pool after broadcast (slow path - requires state fetching)
|
||||
// Run in goroutine to avoid blocking the HTTP response
|
||||
go func() {
|
||||
for _, singleAtt := range validAttestations {
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(context.Background(), singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get target state for attestation")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(context.Background(), targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get committee for attestation")
|
||||
continue
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if len(failedBroadcasts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -470,6 +484,10 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
var req structs.SubmitSyncCommitteeSignaturesRequest
|
||||
err := json.NewDecoder(r.Body).Decode(&req.Data)
|
||||
switch {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -622,6 +623,8 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
AttestationStateFetcher: chainService,
|
||||
}
|
||||
@@ -654,6 +657,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -673,6 +677,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
@@ -793,6 +798,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -812,6 +818,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
@@ -861,6 +868,27 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
|
||||
})
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListVoluntaryExits(t *testing.T) {
|
||||
@@ -1057,14 +1085,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1089,14 +1122,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1120,13 +1158,18 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1149,7 +1192,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[]")
|
||||
@@ -1166,7 +1215,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -1179,6 +1234,26 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleSyncCommitteeMsg)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitSyncCommitteeSignatures(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
|
||||
@@ -40,6 +40,7 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
|
||||
@@ -52,24 +52,27 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
attCopy := att.Copy()
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
@@ -82,6 +85,10 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, singleAtt, singleAtt.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -98,18 +105,17 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
|
||||
singleAttCopy := singleAtt.Copy()
|
||||
att := singleAttCopy.ToAttestationElectra(committee)
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func TestProposeAttestation(t *testing.T) {
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
TimeFetcher: chainService,
|
||||
AttestationStateFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 999
|
||||
@@ -141,6 +142,7 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
@@ -149,6 +151,37 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
func TestProposeAttestation_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attesterServer.ProposeAttestation(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestProposeAttestationElectra_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := ðpb.SingleAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
}
|
||||
_, err := attesterServer.ProposeAttestationElectra(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestGetAttestationData_OK(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -243,8 +244,10 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
var lastRoot [fieldparams.RootLength]byte
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
lastRoot = root
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
@@ -259,6 +262,7 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingRootCount := len(missingIndicesByRoot)
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
@@ -301,11 +305,19 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingRootCount": initialMissingRootCount,
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingRootCount": len(missingIndicesByRoot),
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
})
|
||||
|
||||
if initialMissingRootCount == 1 {
|
||||
log = log.WithField("root", fmt.Sprintf("%#x", lastRoot))
|
||||
}
|
||||
|
||||
log.Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
@@ -204,6 +204,13 @@ var (
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELAttempts = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_attempts",
|
||||
Help: "Count the number of data columns recovery attempts from the execution layer.",
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_total",
|
||||
@@ -242,6 +249,13 @@ var (
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnSidecarsObtainedViaELCount = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "data_column_obtained_via_el_count",
|
||||
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -3,9 +3,9 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
@@ -21,13 +21,23 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var pendingAttsLimit = 32768
|
||||
const pendingAttsLimit = 32768
|
||||
|
||||
// aggregatorIndexFilter defines how aggregator index should be handled in equality checks.
|
||||
type aggregatorIndexFilter int
|
||||
|
||||
const (
|
||||
// ignoreAggregatorIndex means aggregates differing only by aggregator index are considered equal.
|
||||
ignoreAggregatorIndex aggregatorIndexFilter = iota
|
||||
// includeAggregatorIndex means aggregator index must also match for aggregates to be considered equal.
|
||||
includeAggregatorIndex
|
||||
)
|
||||
|
||||
// This method processes pending attestations as a "known" block as arrived. With validations,
|
||||
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
|
||||
@@ -50,16 +60,7 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
if len(attestations) > 0 {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx, attestations)
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
"duration": duration,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
s.processAttestations(ctx, attestations)
|
||||
|
||||
randGen := rand.NewGenerator()
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
@@ -79,26 +80,71 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
// processAttestations processes a list of attestations.
|
||||
// It assumes (for logging purposes only) that all attestations pertain to the same block.
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
if len(attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
firstAttestation := attestations[0]
|
||||
var blockRoot []byte
|
||||
switch v := firstAttestation.(type) {
|
||||
case ethpb.Att:
|
||||
blockRoot = v.GetData().BeaconBlockRoot
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
blockRoot = v.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping processing", v)
|
||||
return
|
||||
}
|
||||
|
||||
validAggregates := make([]ethpb.SignedAggregateAttAndProof, 0, len(attestations))
|
||||
startAggregate := time.Now()
|
||||
atts := make([]ethpb.Att, 0, len(attestations))
|
||||
aggregateAttAndProofCount := 0
|
||||
for _, att := range attestations {
|
||||
switch v := att.(type) {
|
||||
case ethpb.Att:
|
||||
atts = append(atts, v)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
s.processAggregate(ctx, v)
|
||||
aggregateAttAndProofCount++
|
||||
// Avoid processing multiple aggregates only differing by aggregator index.
|
||||
if slices.ContainsFunc(validAggregates, func(other ethpb.SignedAggregateAttAndProof) bool {
|
||||
return pendingAggregatesAreEqual(v, other, ignoreAggregatorIndex)
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.processAggregate(ctx, v); err != nil {
|
||||
log.WithError(err).Debug("Pending aggregate attestation could not be processed")
|
||||
continue
|
||||
}
|
||||
|
||||
validAggregates = append(validAggregates, v)
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping", v)
|
||||
}
|
||||
}
|
||||
durationAggregateAttAndProof := time.Since(startAggregate)
|
||||
|
||||
startAtts := time.Now()
|
||||
for _, bucket := range bucketAttestationsByData(atts) {
|
||||
s.processAttestationBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
durationAtts := time.Since(startAtts)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"totalCount": len(attestations),
|
||||
"aggregateAttAndProofCount": aggregateAttAndProofCount,
|
||||
"uniqueAggregateAttAndProofCount": len(validAggregates),
|
||||
"attCount": len(atts),
|
||||
"durationTotal": durationAggregateAttAndProof + durationAtts,
|
||||
"durationAggregateAttAndProof": durationAggregateAttAndProof,
|
||||
"durationAtts": durationAtts,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
// attestationBucket groups attestations with the same AttestationData for batch processing.
|
||||
@@ -303,21 +349,20 @@ func (s *Service) processVerifiedAttestation(
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) error {
|
||||
res, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.Wrap(err, "validate aggregated att")
|
||||
}
|
||||
|
||||
if res != pubsub.ValidationAccept || !s.validateBlockInAttestation(ctx, aggregate) {
|
||||
log.Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.New("Pending aggregated attestation failed validation")
|
||||
}
|
||||
|
||||
att := aggregate.AggregateAttestationAndProof().AggregateVal()
|
||||
if err := s.saveAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
return errors.Wrap(err, "save attestation")
|
||||
}
|
||||
|
||||
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
@@ -325,6 +370,8 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast aggregated attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
@@ -336,7 +383,7 @@ func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
|
||||
|
||||
s.savePending(root, agg, func(other any) bool {
|
||||
a, ok := other.(ethpb.SignedAggregateAttAndProof)
|
||||
return ok && pendingAggregatesAreEqual(agg, a)
|
||||
return ok && pendingAggregatesAreEqual(agg, a, includeAggregatorIndex)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -391,13 +438,19 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
|
||||
}
|
||||
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
// pendingAggregatesAreEqual checks if two pending aggregate attestations are equal.
|
||||
// The filter parameter controls whether aggregator index is considered in the equality check.
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof, filter aggregatorIndexFilter) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
|
||||
if filter == includeAggregatorIndex {
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
aAtt := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAtt := b.AggregateAttestationAndProof().AggregateVal()
|
||||
if aAtt.GetData().Slot != bAtt.GetData().Slot {
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
// Process block A (which exists and has no pending attestations)
|
||||
// This should skip processing attestations for A and request blocks B and C
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
require.LogsContain(t, hook, "Requesting blocks by root")
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
@@ -911,17 +911,17 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProofElectra{Message: ðpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -942,7 +942,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -963,7 +963,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -984,7 +984,30 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1000},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index should be equal while ignoring aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, ignoreAggregatorIndex))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -44,11 +43,13 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
if !s.chainIsStarted() {
|
||||
return
|
||||
}
|
||||
|
||||
locker.Lock()
|
||||
defer locker.Unlock()
|
||||
|
||||
if err := s.processPendingBlocks(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Could not process pending blocks")
|
||||
}
|
||||
locker.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -73,8 +74,10 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
randGen := rand.NewGenerator()
|
||||
var parentRoots [][32]byte
|
||||
|
||||
blkRoots := make([][32]byte, 0, len(sortedSlots)*maxBlocksPerSlot)
|
||||
|
||||
// Iterate through sorted slots.
|
||||
for _, slot := range sortedSlots {
|
||||
for i, slot := range sortedSlots {
|
||||
// Skip processing if slot is in the future.
|
||||
if slot > s.cfg.clock.CurrentSlot() {
|
||||
continue
|
||||
@@ -91,6 +94,9 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
// Process each block in the queue.
|
||||
for _, b := range blocksInCache {
|
||||
start := time.Now()
|
||||
totalDuration := time.Duration(0)
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -147,19 +153,34 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
cancelFunction()
|
||||
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
blkRoots = append(blkRoots, blkRoot)
|
||||
|
||||
// Remove the processed block from the queue.
|
||||
if err := s.removeBlockFromQueue(b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{"slot": slot, "blockRoot": hex.EncodeToString(bytesutil.Trunc(blkRoot[:]))}).Debug("Processed pending block and cleared it in cache")
|
||||
|
||||
duration := time.Since(start)
|
||||
totalDuration += duration
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotIndex": fmt.Sprintf("%d/%d", i+1, len(sortedSlots)),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", blkRoot),
|
||||
"duration": duration,
|
||||
"totalDuration": totalDuration,
|
||||
}).Debug("Processed pending block and cleared it in cache")
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
|
||||
for _, blkRoot := range blkRoots {
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
}
|
||||
|
||||
return s.sendBatchRootRequest(ctx, parentRoots, randGen)
|
||||
}
|
||||
|
||||
@@ -379,6 +400,19 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
rootsStr := make([]string, 0, len(roots))
|
||||
for _, req := range roots {
|
||||
rootsStr = append(rootsStr, fmt.Sprintf("%#x", req))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"count": len(req),
|
||||
"roots": rootsStr,
|
||||
}).Debug("Requesting blocks by root")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -438,8 +472,6 @@ func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
@@ -189,12 +189,30 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
|
||||
defer cancel()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"type": source.Type(),
|
||||
})
|
||||
|
||||
var constructedSidecarCount uint64
|
||||
for iteration := uint64(0); ; /*no stop condition*/ iteration++ {
|
||||
log = log.WithField("iteration", iteration)
|
||||
|
||||
// Exit early if all sidecars to sample have been seen.
|
||||
if s.haveAllSidecarsBeenSeen(source.Slot(), source.ProposerIndex(), columnIndicesToSample) {
|
||||
if iteration > 0 && constructedSidecarCount == 0 {
|
||||
log.Debug("No data column sidecars constructed from the execution client")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if iteration == 0 {
|
||||
dataColumnsRecoveredFromELAttempts.Inc()
|
||||
}
|
||||
|
||||
// Try to reconstruct data column constructedSidecars from the execution client.
|
||||
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||
if err != nil {
|
||||
@@ -202,8 +220,8 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
}
|
||||
|
||||
// No sidecars are retrieved from the EL, retry later
|
||||
sidecarCount := uint64(len(constructedSidecars))
|
||||
if sidecarCount == 0 {
|
||||
constructedSidecarCount = uint64(len(constructedSidecars))
|
||||
if constructedSidecarCount == 0 {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -212,9 +230,11 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
continue
|
||||
}
|
||||
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
|
||||
// Boundary check.
|
||||
if sidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", sidecarCount, fieldparams.NumberOfColumns)
|
||||
if constructedSidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", constructedSidecarCount, fieldparams.NumberOfColumns)
|
||||
}
|
||||
|
||||
unseenIndices, err := s.broadcastAndReceiveUnseenDataColumnSidecars(ctx, source.Slot(), source.ProposerIndex(), columnIndicesToSample, constructedSidecars)
|
||||
@@ -222,19 +242,12 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
return nil, errors.Wrap(err, "broadcast and receive unseen data column sidecars")
|
||||
}
|
||||
|
||||
if len(unseenIndices) > 0 {
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"iteration": iteration,
|
||||
"type": source.Type(),
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
}
|
||||
dataColumnSidecarsObtainedViaELCount.Observe(float64(len(unseenIndices)))
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -51,14 +51,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
dcsc, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
|
||||
@@ -54,11 +54,13 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -101,7 +103,10 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
@@ -180,11 +185,13 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -227,7 +234,10 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -361,7 +361,7 @@ func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.
|
||||
}
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(errSidecarParentNotSeen)
|
||||
return columnErrBuilder(errors.Wrapf(errSidecarParentNotSeen, "parent root: %#x", parentRoot))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
3
changelog/builder-index.md
Normal file
3
changelog/builder-index.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.
|
||||
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
2
changelog/kasey_fix-backfill-flag.md
Normal file
2
changelog/kasey_fix-backfill-flag.md
Normal file
@@ -0,0 +1,2 @@
|
||||
#### Fixed
|
||||
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767.
|
||||
3
changelog/manu-agg.md
Normal file
3
changelog/manu-agg.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them.
|
||||
2
changelog/manu-remove-error-logs.md
Normal file
2
changelog/manu-remove-error-logs.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- `validateDataColumn`: Remove error logs.
|
||||
3
changelog/manu-revert-16149.md
Normal file
3
changelog/manu-revert-16149.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Revert #16149
|
||||
7
changelog/manu_reconstruct-metrics.md
Normal file
7
changelog/manu_reconstruct-metrics.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Added
|
||||
- prometheus histogram `cells_and_proofs_from_structured_computation_milliseconds` to track computation time for cells and proofs from structured blobs.
|
||||
- prometheus histogram `get_blobs_v2_latency_milliseconds` to track RPC latency for `getBlobsV2` calls to the execution layer.
|
||||
|
||||
### Changed
|
||||
- Run `ComputeCellsAndProofsFromFlat` in parallel to improve performance when computing cells and proofs.
|
||||
- Run `ComputeCellsAndProofsFromStructured` in parallel to improve performance when computing cells and proofs.
|
||||
3
changelog/potuz_next_epoch_attributes.md
Normal file
3
changelog/potuz_next_epoch_attributes.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu
|
||||
2
changelog/potuz_no_fcu_on_batches.md
Normal file
2
changelog/potuz_no_fcu_on_batches.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- D not send FCU on block batches.
|
||||
3
changelog/potuz_remove_signature_check.md
Normal file
3
changelog/potuz_remove_signature_check.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Do not check block signature on state transition.
|
||||
3
changelog/potuz_use_head_previous_epoch.md
Normal file
3
changelog/potuz_use_head_previous_epoch.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint.
|
||||
3
changelog/radek_extend-http-analyzer.md
Normal file
3
changelog/radek_extend-http-analyzer.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Extend `httperror` analyzer to more functions.
|
||||
3
changelog/sashass1315_fix-panic.md
Normal file
3
changelog/sashass1315_fix-panic.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175)
|
||||
3
changelog/satushh-consolidation.md
Normal file
3
changelog/satushh-consolidation.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Performance improvement in ProcessConsolidationRequests: Use more performance HasPendingBalanceToWithdraw instead of PendingBalanceToWithdraw as no need to calculate full total pending balance.
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"basis_points.go",
|
||||
"builder_index.go",
|
||||
"committee_bits_mainnet.go",
|
||||
"committee_bits_minimal.go", # keep
|
||||
"committee_index.go",
|
||||
@@ -31,6 +32,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"builder_index_test.go",
|
||||
"committee_index_test.go",
|
||||
"domain_test.go",
|
||||
"epoch_test.go",
|
||||
|
||||
54
consensus-types/primitives/builder_index.go
Normal file
54
consensus-types/primitives/builder_index.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package primitives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
var _ fssz.HashRoot = (BuilderIndex)(0)
|
||||
var _ fssz.Marshaler = (*BuilderIndex)(nil)
|
||||
var _ fssz.Unmarshaler = (*BuilderIndex)(nil)
|
||||
|
||||
// BuilderIndex is an index into the builder registry.
|
||||
type BuilderIndex uint64
|
||||
|
||||
// HashTreeRoot returns the SSZ hash tree root of the index.
|
||||
func (b BuilderIndex) HashTreeRoot() ([32]byte, error) {
|
||||
return fssz.HashWithDefaultHasher(b)
|
||||
}
|
||||
|
||||
// HashTreeRootWith appends the SSZ uint64 representation of the index to the given hasher.
|
||||
func (b BuilderIndex) HashTreeRootWith(hh *fssz.Hasher) error {
|
||||
hh.PutUint64(uint64(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalSSZ decodes the SSZ-encoded uint64 index from buf.
|
||||
func (b *BuilderIndex) UnmarshalSSZ(buf []byte) error {
|
||||
if len(buf) != b.SizeSSZ() {
|
||||
return fmt.Errorf("expected buffer of length %d received %d", b.SizeSSZ(), len(buf))
|
||||
}
|
||||
*b = BuilderIndex(fssz.UnmarshallUint64(buf))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalSSZTo appends the SSZ-encoded index to dst and returns the extended buffer.
|
||||
func (b *BuilderIndex) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
marshalled, err := b.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(dst, marshalled...), nil
|
||||
}
|
||||
|
||||
// MarshalSSZ encodes the index as an SSZ uint64.
|
||||
func (b *BuilderIndex) MarshalSSZ() ([]byte, error) {
|
||||
marshalled := fssz.MarshalUint64([]byte{}, uint64(*b))
|
||||
return marshalled, nil
|
||||
}
|
||||
|
||||
// SizeSSZ returns the size of the SSZ-encoded index in bytes.
|
||||
func (b *BuilderIndex) SizeSSZ() int {
|
||||
return 8
|
||||
}
|
||||
86
consensus-types/primitives/builder_index_test.go
Normal file
86
consensus-types/primitives/builder_index_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package primitives_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"slices"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestBuilderIndex_SSZRoundTripAndHashRoot(t *testing.T) {
|
||||
cases := []uint64{
|
||||
0,
|
||||
1,
|
||||
42,
|
||||
(1 << 32) - 1,
|
||||
1 << 32,
|
||||
^uint64(0),
|
||||
}
|
||||
|
||||
for _, v := range cases {
|
||||
t.Run("v="+u64name(v), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
val := primitives.BuilderIndex(v)
|
||||
require.Equal(t, 8, (&val).SizeSSZ())
|
||||
|
||||
enc, err := (&val).MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 8, len(enc))
|
||||
|
||||
wantEnc := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(wantEnc, v)
|
||||
require.DeepEqual(t, wantEnc, enc)
|
||||
|
||||
dstPrefix := []byte("prefix:")
|
||||
dst, err := (&val).MarshalSSZTo(slices.Clone(dstPrefix))
|
||||
require.NoError(t, err)
|
||||
wantDst := append(dstPrefix, wantEnc...)
|
||||
require.DeepEqual(t, wantDst, dst)
|
||||
|
||||
var decoded primitives.BuilderIndex
|
||||
require.NoError(t, (&decoded).UnmarshalSSZ(enc))
|
||||
require.Equal(t, val, decoded)
|
||||
|
||||
root, err := val.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
var wantRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(wantRoot[:8], v)
|
||||
require.Equal(t, wantRoot, root)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderIndex_UnmarshalSSZRejectsWrongSize(t *testing.T) {
|
||||
for _, size := range []int{7, 9} {
|
||||
t.Run("size="+strconv.Itoa(size), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var v primitives.BuilderIndex
|
||||
err := (&v).UnmarshalSSZ(make([]byte, size))
|
||||
require.ErrorContains(t, "expected buffer of length 8", err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func u64name(v uint64) string {
|
||||
switch v {
|
||||
case 0:
|
||||
return "0"
|
||||
case 1:
|
||||
return "1"
|
||||
case 42:
|
||||
return "42"
|
||||
case (1 << 32) - 1:
|
||||
return "2^32-1"
|
||||
case 1 << 32:
|
||||
return "2^32"
|
||||
case ^uint64(0):
|
||||
return "max"
|
||||
default:
|
||||
return "custom"
|
||||
}
|
||||
}
|
||||
@@ -43,6 +43,7 @@ func WriteJson(w http.ResponseWriter, v any) {
|
||||
func WriteSsz(w http.ResponseWriter, respSsz []byte) {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", api.OctetStreamMediaType)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
|
||||
log.WithError(err).Error("Could not write response message")
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["analyzer.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httperror",
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httpwriter",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_x_tools//go/analysis:go_default_library",
|
||||
@@ -1,4 +1,4 @@
|
||||
package httperror
|
||||
package httpwriter
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
var Analyzer = &analysis.Analyzer{
|
||||
Name: "httperror",
|
||||
Doc: "Ensures calls to httputil.HandleError are immediately followed by a return statement.",
|
||||
Name: "httpwriter",
|
||||
Doc: "Ensures that httputil functions which make use of the writer are immediately followed by a return statement.",
|
||||
Requires: []*analysis.Analyzer{
|
||||
inspect.Analyzer,
|
||||
},
|
||||
@@ -99,7 +99,7 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
|
||||
|
||||
// Now check the current statement itself: is it (or does it contain) a direct call to httputil.HandleError?
|
||||
// We only consider ExprStmt that are direct CallExpr to httputil.HandleError.
|
||||
call := findHandleErrorCall(stmt)
|
||||
call, name := findHandleErrorCall(stmt)
|
||||
if call == nil {
|
||||
continue
|
||||
}
|
||||
@@ -121,7 +121,7 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
|
||||
continue
|
||||
}
|
||||
// otherwise it's not a return (even if it's an if/for etc) -> violation
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.HandleError must be immediately followed by a return statement")
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -133,31 +133,33 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
|
||||
}
|
||||
|
||||
// Non-void function and it's the last statement → violation
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.HandleError must be followed by return because function has return values")
|
||||
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
|
||||
}
|
||||
}
|
||||
|
||||
// findHandleErrorCall returns the call expression if stmt is a direct call to httputil.HandleError(...),
|
||||
// otherwise nil. We only match direct ExprStmt -> CallExpr -> SelectorExpr where selector is httputil.HandleError.
|
||||
func findHandleErrorCall(stmt ast.Stmt) *ast.CallExpr {
|
||||
func findHandleErrorCall(stmt ast.Stmt) (*ast.CallExpr, string) {
|
||||
es, ok := stmt.(*ast.ExprStmt)
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
call, ok := es.X.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
sel, ok := call.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
pkgIdent, ok := sel.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
if pkgIdent.Name == "httputil" && sel.Sel.Name == "HandleError" {
|
||||
return call
|
||||
selectorName := sel.Sel.Name
|
||||
if pkgIdent.Name == "httputil" &&
|
||||
(selectorName == "HandleError" || selectorName == "WriteError" || selectorName == "WriteJson" || selectorName == "WriteSSZ") {
|
||||
return call, selectorName
|
||||
}
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
Reference in New Issue
Block a user